max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
examples/operator_v1.py | dciangot/pymesos | 177 | 6622851 | #!/usr/bin/env python2.7
from __future__ import print_function
import sys
from pymesos import MesosOperatorMasterDriver, OperatorMaster
class MinimalOperator(OperatorMaster):
def __init__(self):
pass
def taskAdded(self, task_info):
logging.debug('Task added')
logging.debug(task_info)
def taskUpdated(self, task_info):
logging.debug('Task updated')
logging.debug(task_info)
def frameworkAdded(self, framework_info):
logging.debug('Framework added')
logging.debug(framework_info)
def frameworkUpdated(self, framework_info):
logging.debug('Framework updated')
logging.debug(framework_info)
def frameworkRemoved(self, framework_info):
logging.debug('Framework removed')
logging.debug(framework_info)
def agentAdded(self, agent_info):
logging.debug('Agent added')
logging.debug(agent_info)
def agentRemoved(self, agent_info):
logging.debug('Agent removed')
logging.debug(agent_info)
def main(master):
driver = MesosOperatorMasterDriver(master, MinimalOperator())
res = driver.getHealth()
print(res)
driver.run()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 2:
print("Usage: {} <mesos_master>".format(sys.argv[0]))
sys.exit(1)
else:
main(sys.argv[1])
| #!/usr/bin/env python2.7
from __future__ import print_function
import sys
from pymesos import MesosOperatorMasterDriver, OperatorMaster
class MinimalOperator(OperatorMaster):
def __init__(self):
pass
def taskAdded(self, task_info):
logging.debug('Task added')
logging.debug(task_info)
def taskUpdated(self, task_info):
logging.debug('Task updated')
logging.debug(task_info)
def frameworkAdded(self, framework_info):
logging.debug('Framework added')
logging.debug(framework_info)
def frameworkUpdated(self, framework_info):
logging.debug('Framework updated')
logging.debug(framework_info)
def frameworkRemoved(self, framework_info):
logging.debug('Framework removed')
logging.debug(framework_info)
def agentAdded(self, agent_info):
logging.debug('Agent added')
logging.debug(agent_info)
def agentRemoved(self, agent_info):
logging.debug('Agent removed')
logging.debug(agent_info)
def main(master):
driver = MesosOperatorMasterDriver(master, MinimalOperator())
res = driver.getHealth()
print(res)
driver.run()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 2:
print("Usage: {} <mesos_master>".format(sys.argv[0]))
sys.exit(1)
else:
main(sys.argv[1])
| ru | 0.174408 | #!/usr/bin/env python2.7 | 2.245847 | 2 |
tests/plugins/twitter.py | alfie-max/Publish | 1 | 6622852 | <filename>tests/plugins/twitter.py
class Twitter(object):
def __init__(self):
self.__fields__ = ['Message']
def VerifyCredentials(self):
return False
def Authorize(self):
pass
def VerifyFields(self, fields):
return True
def SendMsg(self, msg):
return {'':''}
__plugin__ = Twitter
__cname__ = 'twitter'
| <filename>tests/plugins/twitter.py
class Twitter(object):
def __init__(self):
self.__fields__ = ['Message']
def VerifyCredentials(self):
return False
def Authorize(self):
pass
def VerifyFields(self, fields):
return True
def SendMsg(self, msg):
return {'':''}
__plugin__ = Twitter
__cname__ = 'twitter'
| none | 1 | 2.210048 | 2 | |
handyrep/plugins/multi_pgbouncer.py | ubiquitousthey/handyrep | 0 | 6622853 | <reponame>ubiquitousthey/handyrep
# plugin method for failing over connections
# using pgbouncer
# rewrites the list of databases
# plugin for users running multiple pgbouncer servers
# requires that each pgbouncer server be in the servers dictionary
# as role "pgbouncer" and enabled.
# further, this plugin requires that the handyrep user, DB and password be set
# up on pgbouncer as a valid connection string.
from plugins.handyrepplugin import HandyRepPlugin
class multi_pgbouncer(HandyRepPlugin):
def run(self, newmaster=None):
# used for failover of all pgbouncer servers
if newmaster:
master = newmaster
else:
master = self.get_master_name()
blist = self.bouncer_list()
faillist = []
for bserv in blist:
bpush = self.push_config(bserv, master)
if self.failed(bpush):
self.set_bouncer_status(bserv, "unavailable", 4, "unable to reconfigure pgbouncer server for failover")
faillist.append(bserv)
if faillist:
# report failure if we couldn't reconfigure any of the servers
return self.rd(False, "some pgbouncer servers did not change their configuration at failover: %s" % ','.join(faillist))
else:
return self.rd(True, "pgbouncer failover successful")
def init(self, bouncerserver=None):
# used to initialize proxy servers with the correct connections
# either for just the supplied bouncer server, or for all of them
if bouncerserver:
blist = [bouncerserver,]
else:
blist = self.bouncer_list()
master = self.get_master_name()
faillist = []
for bserv in blist:
bpush = self.push_config(bserv, master)
# if we can't push a config, then add this bouncer server to the list
# of failed servers and mark it unavailable
if self.failed(bpush):
self.set_bouncer_status(bserv, "unavailable", 4, "unable to reconfigure pgbouncer server for failover")
faillist.append(bserv)
else:
try:
pgbcn = self.connection(bserv)
except:
self.set_bouncer_status(bserv, "unavailable", 4, "pgbouncer configured, but does not accept connections")
faillist.append(bserv)
else:
pgbcn.close()
self.set_bouncer_status(bserv, "healthy", 1, "pgbouncer initialized")
if faillist:
# report failure if we couldn't reconfigure any of the servers
return self.rd(False, "some pgbouncer servers could not be initialized: %s" % ','.join(faillist))
else:
return self.rd(True, "pgbouncer initialization successful")
def set_bouncer_status(self, bouncerserver, status, status_no, status_message):
self.servers[bouncerserver]["status"] = status
self.servers[bouncerserver]["status_no"] = status_no
self.servers[bouncerserver]["status_message"] = status_message
self.servers[bouncerserver]["status_ts"] = self.now_string()
return
def push_config(self, bouncerserver, newmaster=None):
# pushes a new config to the named pgbouncer server
# and restarts it
if newmaster:
master = newmaster
else:
master = self.get_master_name()
# get configuration
dbsect = { "dbsection" : self.dbconnect_list(master), "port" : self.servers[bouncerserver]["port"] }
# push new config
myconf = self.conf["plugins"]["multi_pgbouncer"]
writeconf = self.push_template(bouncerserver,myconf["template"],myconf["config_location"],dbsect,myconf["owner"])
if self.failed(writeconf):
return self.rd(False, "could not push new pgbouncer configuration to pgbouncer server")
# restart pgbouncer
restart_command = "%s -u %s -d -R %s" % (myconf["pgbouncerbin"],myconf["owner"],myconf["config_location"],)
rsbouncer = self.run_as_root(bouncerserver,[restart_command,])
if self.succeeded(rsbouncer):
return self.rd(True, "pgbouncer configuration updated")
else:
return self.rd(False, "unable to restart pgbouncer")
def bouncer_list(self):
# gets a list of currently enabled pgbouncers
blist = []
for serv, servdeets in self.servers.iteritems():
if servdeets["role"] == "pgbouncer" and servdeets["enabled"]:
blist.append(serv)
return blist
def test(self):
#check that we have all config variables required
if self.failed( self.test_plugin_conf("multi_pgbouncer","pgbouncerbin","template","owner","config_location","readonly_suffix","all_replicas")):
return self.rd(False, "multi-pgbouncer failover is not configured" )
if self.failed( self.test_plugin_conf("multi_pgbouncer","database_list") or self.test_plugin_conf("multi_pgbouncer","databases")):
return self.rd(False, "multi-pgbouncer failover has no configured databases" )
#check that we can connect to the pgbouncer servers
blist = self.bouncer_list()
if len(blist) == 0:
return self.rd(False, "No pgbouncer servers defined")
faillist = []
for bserv in blist:
if self.failed(self.run_as_root(bserv,self.conf["handyrep"]["test_ssh_command"])):
faillist.append(bserv)
if failist:
return self.rd(False, "cannot SSH to some pgbouncer servers: %s" % ','.join(faillist))
return self.rd(True, "pgbouncer setup is correct")
def poll(self, bouncerserver=None):
if bouncerserver:
blist = [bouncerserver,]
else:
blist = self.bouncer_list()
if len(blist) == 0:
return self.rd(False, "No pgbouncer servers defined")
faillist = []
for bserv in blist:
try:
pgbcn = self.connection(bserv)
except:
self.set_bouncer_status(bserv, "unavailable", 4, "pgbouncer does not accept connections")
faillist.append(bserv)
else:
pgbcn.close()
self.set_bouncer_status(bserv, "healthy", 1, "pgbouncer responding")
if faillist:
# report failure if any previously enabled bouncers are down
return self.rd(False, "some pgbouncer servers are not responding: %s" % ','.join(faillist))
else:
return self.rd(True, "all pgbouncers responding")
def dbconnect_list(self, master):
# creates the list of database aliases and target
# servers for pgbouncer
# build master string first
myconf = self.conf["plugins"]["multi_pgbouncer"]
dbconfig = {}
if myconf["databases"]:
dbconfig.update(myconf["databases"])
if myconf["database_list"]:
for dbname in myconf["database_list"]:
dbconfig[dbname] = myconf["extra_connect_param"]
# add in the handyrep db if the user has forgotten it
if not dbconfig.has_key(self.conf["handyrep"]["handyrep_db"]):
dbconfig[self.conf["handyrep"]["handyrep_db"]] = myconf["extra_connect_param"]
constr = self.dbconnect_line(dbconfig, self.servers[master]["hostname"], self.servers[master]["port"], "")
replicas = self.sorted_replicas()
if self.is_true(myconf["all_replicas"]):
#if we're doing all replicas, we need to put them in as _ro0, _ro1, etc.
# if there's no replicas, set ro1 to go to the master:
if len(replicas) == 0 or (len(replicas) == 1 and master in replicas):
rsuff = "%s%d" % (myconf["readonly_suffix"],1,)
constr += self.dbconnect_line(dbconfig, self.servers[master]["hostname"], self.servers[master]["port"], rsuff)
else:
for rep in replicas:
if not rep == master:
rsuff = "%s%d" % (myconf["readonly_suffix"],repno,)
constr += self.dbconnect_line(dbconfig, self.servers[rep]["hostname"], self.servers[rep]["port"], rsuff)
repno += 1
else:
# only one readonly replica, setting it up with _ro
if len(replicas) > 0:
if replicas[0] == master:
# avoid the master
replicas.pop(0)
if len(replicas) > 0:
constr += self.dbconnect_line(dbconfig, self.servers[replicas[0]]["hostname"], self.servers[replicas[0]]["port"], myconf["readonly_suffix"])
else:
# if no replicas, read-only connections should go to the master
constr += self.dbconnect_line(dbconfig, self.servers[master]["hostname"], self.servers[master]["port"], myconf["readonly_suffix"])
return constr
def dbconnect_line(self, database_list, hostname, portno, suffix):
confout = ""
for dbname,nex in database_list.items():
confout += "%s%s = dbname=%s host=%s port=%s %s \n" % (dbname, suffix, dbname, hostname, portno, nex,)
return confout
| # plugin method for failing over connections
# using pgbouncer
# rewrites the list of databases
# plugin for users running multiple pgbouncer servers
# requires that each pgbouncer server be in the servers dictionary
# as role "pgbouncer" and enabled.
# further, this plugin requires that the handyrep user, DB and password be set
# up on pgbouncer as a valid connection string.
from plugins.handyrepplugin import HandyRepPlugin
class multi_pgbouncer(HandyRepPlugin):
def run(self, newmaster=None):
# used for failover of all pgbouncer servers
if newmaster:
master = newmaster
else:
master = self.get_master_name()
blist = self.bouncer_list()
faillist = []
for bserv in blist:
bpush = self.push_config(bserv, master)
if self.failed(bpush):
self.set_bouncer_status(bserv, "unavailable", 4, "unable to reconfigure pgbouncer server for failover")
faillist.append(bserv)
if faillist:
# report failure if we couldn't reconfigure any of the servers
return self.rd(False, "some pgbouncer servers did not change their configuration at failover: %s" % ','.join(faillist))
else:
return self.rd(True, "pgbouncer failover successful")
def init(self, bouncerserver=None):
# used to initialize proxy servers with the correct connections
# either for just the supplied bouncer server, or for all of them
if bouncerserver:
blist = [bouncerserver,]
else:
blist = self.bouncer_list()
master = self.get_master_name()
faillist = []
for bserv in blist:
bpush = self.push_config(bserv, master)
# if we can't push a config, then add this bouncer server to the list
# of failed servers and mark it unavailable
if self.failed(bpush):
self.set_bouncer_status(bserv, "unavailable", 4, "unable to reconfigure pgbouncer server for failover")
faillist.append(bserv)
else:
try:
pgbcn = self.connection(bserv)
except:
self.set_bouncer_status(bserv, "unavailable", 4, "pgbouncer configured, but does not accept connections")
faillist.append(bserv)
else:
pgbcn.close()
self.set_bouncer_status(bserv, "healthy", 1, "pgbouncer initialized")
if faillist:
# report failure if we couldn't reconfigure any of the servers
return self.rd(False, "some pgbouncer servers could not be initialized: %s" % ','.join(faillist))
else:
return self.rd(True, "pgbouncer initialization successful")
def set_bouncer_status(self, bouncerserver, status, status_no, status_message):
self.servers[bouncerserver]["status"] = status
self.servers[bouncerserver]["status_no"] = status_no
self.servers[bouncerserver]["status_message"] = status_message
self.servers[bouncerserver]["status_ts"] = self.now_string()
return
def push_config(self, bouncerserver, newmaster=None):
# pushes a new config to the named pgbouncer server
# and restarts it
if newmaster:
master = newmaster
else:
master = self.get_master_name()
# get configuration
dbsect = { "dbsection" : self.dbconnect_list(master), "port" : self.servers[bouncerserver]["port"] }
# push new config
myconf = self.conf["plugins"]["multi_pgbouncer"]
writeconf = self.push_template(bouncerserver,myconf["template"],myconf["config_location"],dbsect,myconf["owner"])
if self.failed(writeconf):
return self.rd(False, "could not push new pgbouncer configuration to pgbouncer server")
# restart pgbouncer
restart_command = "%s -u %s -d -R %s" % (myconf["pgbouncerbin"],myconf["owner"],myconf["config_location"],)
rsbouncer = self.run_as_root(bouncerserver,[restart_command,])
if self.succeeded(rsbouncer):
return self.rd(True, "pgbouncer configuration updated")
else:
return self.rd(False, "unable to restart pgbouncer")
def bouncer_list(self):
# gets a list of currently enabled pgbouncers
blist = []
for serv, servdeets in self.servers.iteritems():
if servdeets["role"] == "pgbouncer" and servdeets["enabled"]:
blist.append(serv)
return blist
def test(self):
#check that we have all config variables required
if self.failed( self.test_plugin_conf("multi_pgbouncer","pgbouncerbin","template","owner","config_location","readonly_suffix","all_replicas")):
return self.rd(False, "multi-pgbouncer failover is not configured" )
if self.failed( self.test_plugin_conf("multi_pgbouncer","database_list") or self.test_plugin_conf("multi_pgbouncer","databases")):
return self.rd(False, "multi-pgbouncer failover has no configured databases" )
#check that we can connect to the pgbouncer servers
blist = self.bouncer_list()
if len(blist) == 0:
return self.rd(False, "No pgbouncer servers defined")
faillist = []
for bserv in blist:
if self.failed(self.run_as_root(bserv,self.conf["handyrep"]["test_ssh_command"])):
faillist.append(bserv)
if failist:
return self.rd(False, "cannot SSH to some pgbouncer servers: %s" % ','.join(faillist))
return self.rd(True, "pgbouncer setup is correct")
def poll(self, bouncerserver=None):
if bouncerserver:
blist = [bouncerserver,]
else:
blist = self.bouncer_list()
if len(blist) == 0:
return self.rd(False, "No pgbouncer servers defined")
faillist = []
for bserv in blist:
try:
pgbcn = self.connection(bserv)
except:
self.set_bouncer_status(bserv, "unavailable", 4, "pgbouncer does not accept connections")
faillist.append(bserv)
else:
pgbcn.close()
self.set_bouncer_status(bserv, "healthy", 1, "pgbouncer responding")
if faillist:
# report failure if any previously enabled bouncers are down
return self.rd(False, "some pgbouncer servers are not responding: %s" % ','.join(faillist))
else:
return self.rd(True, "all pgbouncers responding")
def dbconnect_list(self, master):
# creates the list of database aliases and target
# servers for pgbouncer
# build master string first
myconf = self.conf["plugins"]["multi_pgbouncer"]
dbconfig = {}
if myconf["databases"]:
dbconfig.update(myconf["databases"])
if myconf["database_list"]:
for dbname in myconf["database_list"]:
dbconfig[dbname] = myconf["extra_connect_param"]
# add in the handyrep db if the user has forgotten it
if not dbconfig.has_key(self.conf["handyrep"]["handyrep_db"]):
dbconfig[self.conf["handyrep"]["handyrep_db"]] = myconf["extra_connect_param"]
constr = self.dbconnect_line(dbconfig, self.servers[master]["hostname"], self.servers[master]["port"], "")
replicas = self.sorted_replicas()
if self.is_true(myconf["all_replicas"]):
#if we're doing all replicas, we need to put them in as _ro0, _ro1, etc.
# if there's no replicas, set ro1 to go to the master:
if len(replicas) == 0 or (len(replicas) == 1 and master in replicas):
rsuff = "%s%d" % (myconf["readonly_suffix"],1,)
constr += self.dbconnect_line(dbconfig, self.servers[master]["hostname"], self.servers[master]["port"], rsuff)
else:
for rep in replicas:
if not rep == master:
rsuff = "%s%d" % (myconf["readonly_suffix"],repno,)
constr += self.dbconnect_line(dbconfig, self.servers[rep]["hostname"], self.servers[rep]["port"], rsuff)
repno += 1
else:
# only one readonly replica, setting it up with _ro
if len(replicas) > 0:
if replicas[0] == master:
# avoid the master
replicas.pop(0)
if len(replicas) > 0:
constr += self.dbconnect_line(dbconfig, self.servers[replicas[0]]["hostname"], self.servers[replicas[0]]["port"], myconf["readonly_suffix"])
else:
# if no replicas, read-only connections should go to the master
constr += self.dbconnect_line(dbconfig, self.servers[master]["hostname"], self.servers[master]["port"], myconf["readonly_suffix"])
return constr
def dbconnect_line(self, database_list, hostname, portno, suffix):
confout = ""
for dbname,nex in database_list.items():
confout += "%s%s = dbname=%s host=%s port=%s %s \n" % (dbname, suffix, dbname, hostname, portno, nex,)
return confout | en | 0.867269 | # plugin method for failing over connections # using pgbouncer # rewrites the list of databases # plugin for users running multiple pgbouncer servers # requires that each pgbouncer server be in the servers dictionary # as role "pgbouncer" and enabled. # further, this plugin requires that the handyrep user, DB and password be set # up on pgbouncer as a valid connection string. # used for failover of all pgbouncer servers # report failure if we couldn't reconfigure any of the servers # used to initialize proxy servers with the correct connections # either for just the supplied bouncer server, or for all of them # if we can't push a config, then add this bouncer server to the list # of failed servers and mark it unavailable # report failure if we couldn't reconfigure any of the servers # pushes a new config to the named pgbouncer server # and restarts it # get configuration # push new config # restart pgbouncer # gets a list of currently enabled pgbouncers #check that we have all config variables required #check that we can connect to the pgbouncer servers # report failure if any previously enabled bouncers are down # creates the list of database aliases and target # servers for pgbouncer # build master string first # add in the handyrep db if the user has forgotten it #if we're doing all replicas, we need to put them in as _ro0, _ro1, etc. # if there's no replicas, set ro1 to go to the master: # only one readonly replica, setting it up with _ro # avoid the master # if no replicas, read-only connections should go to the master | 2.541012 | 3 |
script/lib/get_next_obs.py | cyberphantom/Selfie-Drone-Stick | 2 | 6622854 | # ROS import
import future
import rospy
import rospkg
from geometry_msgs.msg import Twist, Vector3Stamped, Pose, PoseWithCovarianceStamped
def nextObs():
next_st = None
n_s = []
while next_st is None:
next_st = rospy.wait_for_message('/obs/pos', Pose, timeout=1)
if next_st.orientation.x == 0 and next_st.orientation.y == 0 and next_st.orientation.z == 0 and next_st.orientation.w ==0:
n_s = []
else:
n_s = [next_st.orientation.x, next_st.orientation.y, next_st.orientation.z, next_st.orientation.w]
return n_s | # ROS import
import future
import rospy
import rospkg
from geometry_msgs.msg import Twist, Vector3Stamped, Pose, PoseWithCovarianceStamped
def nextObs():
next_st = None
n_s = []
while next_st is None:
next_st = rospy.wait_for_message('/obs/pos', Pose, timeout=1)
if next_st.orientation.x == 0 and next_st.orientation.y == 0 and next_st.orientation.z == 0 and next_st.orientation.w ==0:
n_s = []
else:
n_s = [next_st.orientation.x, next_st.orientation.y, next_st.orientation.z, next_st.orientation.w]
return n_s | es | 0.271199 | # ROS import | 2.302258 | 2 |
Lander/Timer.py | IrvKalb/pygwidgetsExamples | 1 | 6622855 | # CountDownTimer class
# <NAME> 4/16
import time
class CountUpTimer(object):
def __init__(self):
self.running = False
self.savedSecondsElapsed = 0
def mStart(self):
self.secondsStart = time.time() # get the current seconds, and save it away
#print 'time start:', self.secondsStart
self.running = True
def mGetTime(self):
if self.running:
secondsNow = time.time()
secondsElapsed = secondsNow - self.secondsStart
else:
secondsElapsed = self.savedSecondsElapsed
return secondsElapsed
def mGetTimeInSeconds(self):
nSeconds = self.mGetTime()
nSeconds = int(nSeconds)
return nSeconds
def mGetTimeFloat(self, nDigits=2):
nSeconds = self.mGetTime()
nSeconds = round(nSeconds, nDigits)
return nSeconds
def mStop(self):
self.running = False
secondsNow = time.time()
self.savedSecondsElapsed = secondsNow - self.secondsStart
#############################################
class CountDownTimer(object):
def __init__(self, nStartingSeconds):
self.running = False
self.secondsSavedRemaining = 0
self.nStartingSeconds = nStartingSeconds
def mStart(self):
secondsNow = time.time()
self.secondsEnd = secondsNow + self.nStartingSeconds
self.running = True
def mGetTime(self):
if self.running:
secondsNow = time.time()
secondsRemaining = self.secondsEnd - secondsNow
else:
secondsRemaining = self.secondsSavedRemaining
return secondsRemaining
def mGetTimeInSeconds(self):
nSeconds = self.mGetTime()
nSeconds = int(nSeconds)
return nSeconds
def mStop(self):
self.running = False
secondsNow = time.time()
self.secondsSavedRemaining = self.secondsEnd - secondsNow
| # CountDownTimer class
# <NAME> 4/16
import time
class CountUpTimer(object):
def __init__(self):
self.running = False
self.savedSecondsElapsed = 0
def mStart(self):
self.secondsStart = time.time() # get the current seconds, and save it away
#print 'time start:', self.secondsStart
self.running = True
def mGetTime(self):
if self.running:
secondsNow = time.time()
secondsElapsed = secondsNow - self.secondsStart
else:
secondsElapsed = self.savedSecondsElapsed
return secondsElapsed
def mGetTimeInSeconds(self):
nSeconds = self.mGetTime()
nSeconds = int(nSeconds)
return nSeconds
def mGetTimeFloat(self, nDigits=2):
nSeconds = self.mGetTime()
nSeconds = round(nSeconds, nDigits)
return nSeconds
def mStop(self):
self.running = False
secondsNow = time.time()
self.savedSecondsElapsed = secondsNow - self.secondsStart
#############################################
class CountDownTimer(object):
def __init__(self, nStartingSeconds):
self.running = False
self.secondsSavedRemaining = 0
self.nStartingSeconds = nStartingSeconds
def mStart(self):
secondsNow = time.time()
self.secondsEnd = secondsNow + self.nStartingSeconds
self.running = True
def mGetTime(self):
if self.running:
secondsNow = time.time()
secondsRemaining = self.secondsEnd - secondsNow
else:
secondsRemaining = self.secondsSavedRemaining
return secondsRemaining
def mGetTimeInSeconds(self):
nSeconds = self.mGetTime()
nSeconds = int(nSeconds)
return nSeconds
def mStop(self):
self.running = False
secondsNow = time.time()
self.secondsSavedRemaining = self.secondsEnd - secondsNow
| de | 0.206928 | # CountDownTimer class # <NAME> 4/16 # get the current seconds, and save it away #print 'time start:', self.secondsStart ############################################# | 3.531291 | 4 |
tests/test_output.py | sander76/consoler | 0 | 6622856 | import logging
import pytest
from pyconsoler.output import print_waiting_countdown, prt
_LOGGER = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_print_waiting_countdown():
await print_waiting_countdown(4)
@pytest.mark.parametrize("_input", [1, False, None, "test", {"test": 10}])
def test_prt_single(_input, capsys):
prt(_input)
_output = capsys.readouterr()
lines = _output.out.split("\n")
assert len(lines) == 2
print(_output)
@pytest.mark.parametrize("_input", [(1, 2, 3), ["abc", 1, 2], ("abs", "abc")])
def test_prt_multiple(_input, capsys):
prt(_input)
_output = capsys.readouterr()
lines = _output.out.split("\n")
assert len(lines) > 2
| import logging
import pytest
from pyconsoler.output import print_waiting_countdown, prt
_LOGGER = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_print_waiting_countdown():
await print_waiting_countdown(4)
@pytest.mark.parametrize("_input", [1, False, None, "test", {"test": 10}])
def test_prt_single(_input, capsys):
prt(_input)
_output = capsys.readouterr()
lines = _output.out.split("\n")
assert len(lines) == 2
print(_output)
@pytest.mark.parametrize("_input", [(1, 2, 3), ["abc", 1, 2], ("abs", "abc")])
def test_prt_multiple(_input, capsys):
prt(_input)
_output = capsys.readouterr()
lines = _output.out.split("\n")
assert len(lines) > 2
| none | 1 | 2.560927 | 3 | |
genetic_algo/individual.py | TheRun98/genetic-algo | 3 | 6622857 | import random
SEX_PARAM = [0.25, 0.5, .9]
class Individual:
""" Class that represents an individual
Attributes:
genes(array): array of 16 floats representing the individual's characteristics
fit_func(func): function used to assess fitness of the individual
fitness(float): a float representing the individual's fitness, stored so it doesn't have to be evaluated every
time the value is used, default None
(Ben w/ Charlie)
"""
def __init__(self, genes, fit_func):
"""
Args:
genes(array): an array of 16 floats representing the individual's characteristics
fit_func(func): function for assessing fitness
Driver: Charlie | Navigator: Ben
"""
if genes is None:
self.genes = [random.uniform(0, 1) for x in range(16)]
else:
self.genes = genes
self.fit_func = fit_func
self.fitness = None
def assess_fit(self):
""" Assesses fitness based on self.fit_func and stores it it in self.fit
Driver: Ben | Navigator: Charlie
"""
self.fitness = self.fit_func(self.genes)
return self.fitness
def reproduce(self, other):
""" Creates child with parents 'self' and 'other'.
Args:
other (individual): Mate with which self has a child
Returns:
individual: child individual with parents self and other
Driver: Charlie | Navigator: Ben
"""
child = Individual(None, self.fit_func)
for i in range(16):
child.inherit_gene(i, self, other)
return child
def inherit_gene(self, index, mother, father):
""" Sex inherited gene by self.reproduce
Args:
index (int): index of genes
mother (Individual): mother
father (Individual): father
Side Effects:
Assigns new value to self.genes[index]
Driver: Ben | Navigator: Charlie
"""
rand = random.uniform(0, 1)
if rand <= SEX_PARAM[0]:
self.genes[index] = mother.genes[index] # inherit from mother
elif SEX_PARAM[0] < rand <= SEX_PARAM[1]:
self.genes[index] = father.genes[index] # inherit from father
elif SEX_PARAM[1] < rand <= SEX_PARAM[2]:
self.genes[index] = self.mean([father.genes[index], mother.genes[index]]) # average of parents
else:
self.genes[index] = random.uniform(0, 1)
return
def mean(self, lst):
""" Finds the mean of a list of numbers
Args:
lst (list): list of numbers
Return:
(Int): the mean of the provided list
Driver: Yazeed | Navigator: Ben
"""
n = len(lst)
sum = 0
for i in lst:
sum += i
return (sum/n)
def __str__(self):
return str(self.fitness) + ": " + str(self.genes)
| import random
SEX_PARAM = [0.25, 0.5, .9]
class Individual:
""" Class that represents an individual
Attributes:
genes(array): array of 16 floats representing the individual's characteristics
fit_func(func): function used to assess fitness of the individual
fitness(float): a float representing the individual's fitness, stored so it doesn't have to be evaluated every
time the value is used, default None
(Ben w/ Charlie)
"""
def __init__(self, genes, fit_func):
"""
Args:
genes(array): an array of 16 floats representing the individual's characteristics
fit_func(func): function for assessing fitness
Driver: Charlie | Navigator: Ben
"""
if genes is None:
self.genes = [random.uniform(0, 1) for x in range(16)]
else:
self.genes = genes
self.fit_func = fit_func
self.fitness = None
def assess_fit(self):
""" Assesses fitness based on self.fit_func and stores it it in self.fit
Driver: Ben | Navigator: Charlie
"""
self.fitness = self.fit_func(self.genes)
return self.fitness
def reproduce(self, other):
""" Creates child with parents 'self' and 'other'.
Args:
other (individual): Mate with which self has a child
Returns:
individual: child individual with parents self and other
Driver: Charlie | Navigator: Ben
"""
child = Individual(None, self.fit_func)
for i in range(16):
child.inherit_gene(i, self, other)
return child
def inherit_gene(self, index, mother, father):
""" Sex inherited gene by self.reproduce
Args:
index (int): index of genes
mother (Individual): mother
father (Individual): father
Side Effects:
Assigns new value to self.genes[index]
Driver: Ben | Navigator: Charlie
"""
rand = random.uniform(0, 1)
if rand <= SEX_PARAM[0]:
self.genes[index] = mother.genes[index] # inherit from mother
elif SEX_PARAM[0] < rand <= SEX_PARAM[1]:
self.genes[index] = father.genes[index] # inherit from father
elif SEX_PARAM[1] < rand <= SEX_PARAM[2]:
self.genes[index] = self.mean([father.genes[index], mother.genes[index]]) # average of parents
else:
self.genes[index] = random.uniform(0, 1)
return
def mean(self, lst):
""" Finds the mean of a list of numbers
Args:
lst (list): list of numbers
Return:
(Int): the mean of the provided list
Driver: Yazeed | Navigator: Ben
"""
n = len(lst)
sum = 0
for i in lst:
sum += i
return (sum/n)
def __str__(self):
return str(self.fitness) + ": " + str(self.genes)
| en | 0.786134 | Class that represents an individual Attributes: genes(array): array of 16 floats representing the individual's characteristics fit_func(func): function used to assess fitness of the individual fitness(float): a float representing the individual's fitness, stored so it doesn't have to be evaluated every time the value is used, default None (Ben w/ Charlie) Args: genes(array): an array of 16 floats representing the individual's characteristics fit_func(func): function for assessing fitness Driver: Charlie | Navigator: Ben Assesses fitness based on self.fit_func and stores it it in self.fit Driver: Ben | Navigator: Charlie Creates child with parents 'self' and 'other'. Args: other (individual): Mate with which self has a child Returns: individual: child individual with parents self and other Driver: Charlie | Navigator: Ben Sex inherited gene by self.reproduce Args: index (int): index of genes mother (Individual): mother father (Individual): father Side Effects: Assigns new value to self.genes[index] Driver: Ben | Navigator: Charlie # inherit from mother # inherit from father # average of parents Finds the mean of a list of numbers Args: lst (list): list of numbers Return: (Int): the mean of the provided list Driver: Yazeed | Navigator: Ben | 3.632895 | 4 |
sdk/python/pulumi_google_native/dialogflow/v2/get_participant.py | AaronFriel/pulumi-google-native | 44 | 6622858 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetParticipantResult',
'AwaitableGetParticipantResult',
'get_participant',
'get_participant_output',
]
@pulumi.output_type
class GetParticipantResult:
def __init__(__self__, documents_metadata_filters=None, name=None, role=None, sip_recording_media_label=None):
if documents_metadata_filters and not isinstance(documents_metadata_filters, dict):
raise TypeError("Expected argument 'documents_metadata_filters' to be a dict")
pulumi.set(__self__, "documents_metadata_filters", documents_metadata_filters)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if role and not isinstance(role, str):
raise TypeError("Expected argument 'role' to be a str")
pulumi.set(__self__, "role", role)
if sip_recording_media_label and not isinstance(sip_recording_media_label, str):
raise TypeError("Expected argument 'sip_recording_media_label' to be a str")
pulumi.set(__self__, "sip_recording_media_label", sip_recording_media_label)
@property
@pulumi.getter(name="documentsMetadataFilters")
def documents_metadata_filters(self) -> Mapping[str, str]:
"""
Optional. Key-value filters on the metadata of documents returned by article suggestion. If specified, article suggestion only returns suggested documents that match all filters in their Document.metadata. Multiple values for a metadata key should be concatenated by comma. For example, filters to match all documents that have 'US' or 'CA' in their market metadata values and 'agent' in their user metadata values will be ``` documents_metadata_filters { key: "market" value: "US,CA" } documents_metadata_filters { key: "user" value: "agent" } ```
"""
return pulumi.get(self, "documents_metadata_filters")
@property
@pulumi.getter
def name(self) -> str:
"""
Optional. The unique identifier of this participant. Format: `projects//locations//conversations//participants/`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def role(self) -> str:
"""
Immutable. The role this participant plays in the conversation. This field must be set during participant creation and is then immutable.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="sipRecordingMediaLabel")
def sip_recording_media_label(self) -> str:
"""
Optional. Label applied to streams representing this participant in SIPREC XML metadata and SDP. This is used to assign transcriptions from that media stream to this participant. This field can be updated.
"""
return pulumi.get(self, "sip_recording_media_label")
class AwaitableGetParticipantResult(GetParticipantResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetParticipantResult(
documents_metadata_filters=self.documents_metadata_filters,
name=self.name,
role=self.role,
sip_recording_media_label=self.sip_recording_media_label)
def get_participant(conversation_id: Optional[str] = None,
location: Optional[str] = None,
participant_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetParticipantResult:
"""
Retrieves a conversation participant.
"""
__args__ = dict()
__args__['conversationId'] = conversation_id
__args__['location'] = location
__args__['participantId'] = participant_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:dialogflow/v2:getParticipant', __args__, opts=opts, typ=GetParticipantResult).value
return AwaitableGetParticipantResult(
documents_metadata_filters=__ret__.documents_metadata_filters,
name=__ret__.name,
role=__ret__.role,
sip_recording_media_label=__ret__.sip_recording_media_label)
@_utilities.lift_output_func(get_participant)
def get_participant_output(conversation_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
participant_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetParticipantResult]:
"""
Retrieves a conversation participant.
"""
...
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetParticipantResult',
'AwaitableGetParticipantResult',
'get_participant',
'get_participant_output',
]
@pulumi.output_type
class GetParticipantResult:
def __init__(__self__, documents_metadata_filters=None, name=None, role=None, sip_recording_media_label=None):
if documents_metadata_filters and not isinstance(documents_metadata_filters, dict):
raise TypeError("Expected argument 'documents_metadata_filters' to be a dict")
pulumi.set(__self__, "documents_metadata_filters", documents_metadata_filters)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if role and not isinstance(role, str):
raise TypeError("Expected argument 'role' to be a str")
pulumi.set(__self__, "role", role)
if sip_recording_media_label and not isinstance(sip_recording_media_label, str):
raise TypeError("Expected argument 'sip_recording_media_label' to be a str")
pulumi.set(__self__, "sip_recording_media_label", sip_recording_media_label)
@property
@pulumi.getter(name="documentsMetadataFilters")
def documents_metadata_filters(self) -> Mapping[str, str]:
"""
Optional. Key-value filters on the metadata of documents returned by article suggestion. If specified, article suggestion only returns suggested documents that match all filters in their Document.metadata. Multiple values for a metadata key should be concatenated by comma. For example, filters to match all documents that have 'US' or 'CA' in their market metadata values and 'agent' in their user metadata values will be ``` documents_metadata_filters { key: "market" value: "US,CA" } documents_metadata_filters { key: "user" value: "agent" } ```
"""
return pulumi.get(self, "documents_metadata_filters")
@property
@pulumi.getter
def name(self) -> str:
"""
Optional. The unique identifier of this participant. Format: `projects//locations//conversations//participants/`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def role(self) -> str:
"""
Immutable. The role this participant plays in the conversation. This field must be set during participant creation and is then immutable.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="sipRecordingMediaLabel")
def sip_recording_media_label(self) -> str:
"""
Optional. Label applied to streams representing this participant in SIPREC XML metadata and SDP. This is used to assign transcriptions from that media stream to this participant. This field can be updated.
"""
return pulumi.get(self, "sip_recording_media_label")
class AwaitableGetParticipantResult(GetParticipantResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetParticipantResult(
documents_metadata_filters=self.documents_metadata_filters,
name=self.name,
role=self.role,
sip_recording_media_label=self.sip_recording_media_label)
def get_participant(conversation_id: Optional[str] = None,
location: Optional[str] = None,
participant_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetParticipantResult:
"""
Retrieves a conversation participant.
"""
__args__ = dict()
__args__['conversationId'] = conversation_id
__args__['location'] = location
__args__['participantId'] = participant_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:dialogflow/v2:getParticipant', __args__, opts=opts, typ=GetParticipantResult).value
return AwaitableGetParticipantResult(
documents_metadata_filters=__ret__.documents_metadata_filters,
name=__ret__.name,
role=__ret__.role,
sip_recording_media_label=__ret__.sip_recording_media_label)
@_utilities.lift_output_func(get_participant)
def get_participant_output(conversation_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
participant_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetParticipantResult]:
"""
Retrieves a conversation participant.
"""
...
| en | 0.816806 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Optional. Key-value filters on the metadata of documents returned by article suggestion. If specified, article suggestion only returns suggested documents that match all filters in their Document.metadata. Multiple values for a metadata key should be concatenated by comma. For example, filters to match all documents that have 'US' or 'CA' in their market metadata values and 'agent' in their user metadata values will be ``` documents_metadata_filters { key: "market" value: "US,CA" } documents_metadata_filters { key: "user" value: "agent" } ``` Optional. The unique identifier of this participant. Format: `projects//locations//conversations//participants/`. Immutable. The role this participant plays in the conversation. This field must be set during participant creation and is then immutable. Optional. Label applied to streams representing this participant in SIPREC XML metadata and SDP. This is used to assign transcriptions from that media stream to this participant. This field can be updated. # pylint: disable=using-constant-test Retrieves a conversation participant. Retrieves a conversation participant. | 1.61694 | 2 |
lintcode/Depth First Search & Backtracking/1353. Sum Root to Leaf Numbers.py | yanshengjia/algorithm | 23 | 6622859 | """
Description:
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
Example:
Input: {1,2,3}
1
/ \
2 3
Output: 25
Explanation:
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Therefore, sum = 12 + 13 = 25.
Solution:
DFS
用一个变量 prev 来存储之前遍历到的路径上的和
sum = root.val + prev * 10
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
import copy
class Solution:
"""
@param root: the root of the tree
@return: the total sum of all root-to-leaf numbers
"""
def sumNumbers(self, root):
# write your code here
res = 0
return self.dfs(root, res)
def dfs(self, root, prev):
if root == None:
return 0
sum = root.val + prev * 10
if root.left == None and root.right == None:
return sum
return self.dfs(root.left, sum) + self.dfs(root.right, sum)
| """
Description:
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
Example:
Input: {1,2,3}
1
/ \
2 3
Output: 25
Explanation:
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Therefore, sum = 12 + 13 = 25.
Solution:
DFS
用一个变量 prev 来存储之前遍历到的路径上的和
sum = root.val + prev * 10
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
import copy
class Solution:
"""
@param root: the root of the tree
@return: the total sum of all root-to-leaf numbers
"""
def sumNumbers(self, root):
# write your code here
res = 0
return self.dfs(root, res)
def dfs(self, root, prev):
if root == None:
return 0
sum = root.val + prev * 10
if root.left == None and root.right == None:
return sum
return self.dfs(root.left, sum) + self.dfs(root.right, sum)
| en | 0.704434 | Description: Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number. An example is the root-to-leaf path 1->2->3 which represents the number 123. Find the total sum of all root-to-leaf numbers. Example: Input: {1,2,3} 1 / \ 2 3 Output: 25 Explanation: The root-to-leaf path 1->2 represents the number 12. The root-to-leaf path 1->3 represents the number 13. Therefore, sum = 12 + 13 = 25. Solution: DFS 用一个变量 prev 来存储之前遍历到的路径上的和 sum = root.val + prev * 10 Definition of TreeNode: class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None @param root: the root of the tree @return: the total sum of all root-to-leaf numbers # write your code here | 4.035043 | 4 |
freshdesk/python-connectors/freshdesk-users-simple/connector.py | gbetegon88/dataiku-contrib | 93 | 6622860 | from dataiku.connector import Connector
from freshdesk_utils import FreshdeskConnector
class FreshDeskUsersConnector(FreshdeskConnector, Connector):
def __init__(self, config, plugin_config):
Connector.__init__(self, config, plugin_config)
FreshdeskConnector.__init__(self, config, plugin_config)
self.path = '/contacts.json?page='
def extract_json_subelement(self,user):
return user['user']
| from dataiku.connector import Connector
from freshdesk_utils import FreshdeskConnector
class FreshDeskUsersConnector(FreshdeskConnector, Connector):
def __init__(self, config, plugin_config):
Connector.__init__(self, config, plugin_config)
FreshdeskConnector.__init__(self, config, plugin_config)
self.path = '/contacts.json?page='
def extract_json_subelement(self,user):
return user['user']
| none | 1 | 2.142267 | 2 | |
asist_nsf_2018/process_level1.py | sustain-lab/asist-nsf-2018 | 1 | 6622861 | """
process_level1.py
"""
from asist.irgason import read_irgason_from_toa5
from asist.hotfilm import read_hotfilm_from_lvm
from asist.pressure import read_pressure_from_toa5
from asist.pitot import pitot_velocity
from asist_nsf_2018.experiments import experiments
from datetime import datetime, timedelta
import glob
from matplotlib.dates import date2num, num2date
from netCDF4 import Dataset
import numpy as np
import os
def remove_drift(p, t):
return p - (p[-1] - p[0]) * (t - t[0]) / (t[-1] - t[0])
def get_data_path(data_name):
"""Gets the data path from the env variable."""
assert data_name in ['LEG', 'HOTFILM', 'IRGASON', 'PRESSURE'],\
data_name + ' is not available'
try:
return os.environ[data_name + '_DATA_PATH']
except KeyError:
raise KeyError('Set ' + data_name + '_DATA_PATH env variable to the path with ' + data_name + ' data')
def get_experiment_time_series(time, data, exp):
"""Returns time and data slice between
experiment start and end times."""
t0, t1 = exp.runs[0].start_time, exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
return time[mask], data[mask]
def process_dp_to_level2():
"""Processes pressure gradient into a NetCDF file."""
PRESSURE_DATA_PATH = get_data_path('PRESSURE')
exp_name = 'asist-christian-shadowgraph'
exp = experiments[exp_name]
files = glob.glob(PRESSURE_DATA_PATH + '/TOA5_SUSTAINpresX4X2.pressure_229*.dat')
files.sort()
time, dp1, dp2 = read_pressure_from_toa5(files)
# remove offset from pressure
t0 = exp.runs[0].start_time
t1 = exp.runs[0].end_time - timedelta(seconds=60)
mask = (time >= t0) & (time <= t1)
dp2_offset = np.mean(dp2[mask])
for run in exp.runs:
run_mask = (time >= run.start_time) & (time <= run.end_time)
dp2[run_mask] -= dp2_offset
time, dp = get_experiment_time_series(time, dp2, exp)
# fan frequency
fan = np.zeros(time.size)
for run in exp.runs:
run_mask = (time >= run.start_time) & (time <= run.end_time)
fan[run_mask] = run.fan
print('Writing ' + ncfile)
# distance between air pressure ports
# 14 panels, 0.77 m each, minus 2 cm on each end
dx = 14 * 0.77 - 0.04
seconds = (date2num(time) - int(date2num(t0))) * 86400
ncfile = 'air-pressure_' + exp_name + '.nc'
with Dataset(ncfile, 'w', format='NETCDF4') as nc:
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', t0.strftime('%Y-%m-%dT%H:%M:%S'))
var.setncattr('dx', dx)
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('dp', 'f4', dimensions=('Time'))
var[:] = dp
var.setncattr('name', 'Along-tank air pressure difference')
var.setncattr('units', 'Pa')
var = nc.createVariable('dpdx', 'f4', dimensions=('Time'))
var[:] = dp / dx
var.setncattr('name', 'Along-tank air pressure gradient')
var.setncattr('units', 'Pa / m')
def process_hotfilm_to_level2():
"""Processes Hot film Labview files into NetCDF."""
HOTFILM_DATA_PATH = get_data_path('HOTFILM')
experiments_to_process = [
'asist-windonly-fresh_warmup',
'asist-windonly-fresh',
'asist-windonly-salt'
]
for exp_name in experiments_to_process:
exp = experiments[exp_name]
filename = HOTFILM_DATA_PATH + '/hot_film_'\
+ exp.runs[0].start_time.strftime('%Y%m%d') + '.lvm'
if exp_name == 'asist-windonly-fresh_warmup':
start_time, seconds, ch1, ch2 = read_hotfilm_from_lvm(filename, dt=2e-3)
else:
start_time, seconds, ch1, ch2 = read_hotfilm_from_lvm(filename, dt=1e-3)
origin = datetime(start_time.year, start_time.month, start_time.day)
seconds = np.array(seconds) + (start_time - origin).total_seconds()
ch1 = np.array(ch1)
ch2 = np.array(ch2)
t0 = date2num(exp.runs[0].start_time)
t1 = date2num(exp.runs[-1].end_time)
t0 = (t0 - int(t0)) * 86400
t1 = (t1 - int(t1)) * 86400
mask = (seconds >= t0) & (seconds <= t1)
exp_seconds = seconds[mask]
# fan frequency
fan = np.zeros(exp_seconds.size)
for run in exp.runs:
run_mask = (exp_seconds >= t0) & (exp_seconds <= t1)
fan[run_mask] = run.fan
ncfile = 'hotfilm_' + exp_name + '.nc'
print('Writing ' + ncfile)
nc = Dataset(ncfile, 'w', format='NETCDF4')
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = exp_seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', origin.strftime('%Y-%m-%dT%H:%M:%S'))
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('ch1', 'f4', dimensions=('Time'))
var[:] = ch1[mask]
var.setncattr('name', 'Channel 1 voltage')
var.setncattr('units', 'V')
var = nc.createVariable('ch2', 'f4', dimensions=('Time'))
var[:] = ch2[mask]
var.setncattr('name', 'Channel 2 voltage')
var.setncattr('units', 'V')
nc.close()
def process_irgason_to_level2():
"""Processes IRGASON TOA5 files into NetCDF."""
IRGASON_DATA_PATH = get_data_path('IRGASON')
experiments_to_process = [
'asist-windonly-fresh_warmup',
'asist-windonly-fresh',
'asist-wind-swell-fresh',
'asist-windonly-salt',
'asist-wind-swell-salt',
'asist-flow-distortion'
]
files = glob.glob(IRGASON_DATA_PATH + '/TOA5*.dat')
files.sort()
time, u, v, w, Ts, Tc, Pc, RH = read_irgason_from_toa5(files)
for exp_name in experiments_to_process:
exp = experiments[exp_name]
t0 = exp.runs[0].start_time
t1 = exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
exp_time = time[mask]
# time in seconds of the day; save origin in nc attribute
exp_seconds = (date2num(exp_time) - int(date2num(t0))) * 86400
# fan frequency
fan = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time <= run.end_time)
fan[run_mask] = run.fan
# status flag (0: good; 1: fan spin-up; 2: bad)
flag = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time < run.start_time + timedelta(seconds=60))
flag[run_mask] = 1
ncfile = 'irgason_' + exp_name + '.nc'
print('Writing ' + ncfile)
nc = Dataset(ncfile, 'w', format='NETCDF4')
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = exp_seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', num2date(int(date2num(t0))).strftime('%Y-%m-%dT%H:%M:%S'))
var = nc.createVariable('flag', 'i4', dimensions=('Time'))
var[:] = flag
var.setncattr('name', 'Status flag')
var.setncattr('description', '0: good; 1: fan spin-up; 2: bad')
var.setncattr('units', '')
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('u', 'f4', dimensions=('Time'))
var[:] = u[mask]
var.setncattr('name', 'x- component of velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('v', 'f4', dimensions=('Time'))
var[:] = v[mask]
var.setncattr('name', 'y- component of velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('w', 'f4', dimensions=('Time'))
var[:] = w[mask]
var.setncattr('name', 'z- component of velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('Ts', 'f4', dimensions=('Time'))
var[:] = Ts[mask]
var.setncattr('name', 'Sonic temperature')
var.setncattr('units', 'deg. C')
var = nc.createVariable('Tc', 'f4', dimensions=('Time'))
var[:] = Tc[mask]
var.setncattr('name', 'Cell temperature')
var.setncattr('units', 'deg. C')
var = nc.createVariable('Pc', 'f4', dimensions=('Time'))
var[:] = Pc[mask]
var.setncattr('name', 'Cell pressure')
var.setncattr('units', 'hPa')
var = nc.createVariable('RH', 'f4', dimensions=('Time'))
var[:] = RH[mask]
var.setncattr('name', 'Relative humidity')
var.setncattr('units', '%')
nc.close()
def process_pitot_to_level2():
"""Processes MKS pressure difference from TOA5 files
into pitot tube velocity and writes it to NetCDF."""
PRESSURE_DATA_PATH = get_data_path('PRESSURE')
experiments_to_process = [
'asist-windonly-fresh',
'asist-wind-swell-fresh',
'asist-windonly-salt',
'asist-wind-swell-salt'
]
files = glob.glob(PRESSURE_DATA_PATH + '/TOA5*.dat')
files.sort()
time, dp1, dp2 = read_pressure_from_toa5(files)
# remove offset from pressure before computing velocity
for exp_name in experiments_to_process:
exp = experiments[exp_name]
t0 = exp.runs[0].start_time + timedelta(seconds=30)
t1 = exp.runs[0].end_time - timedelta(seconds=30)
mask = (time >= t0) & (time <= t1)
dp1_offset = np.mean(dp1[mask])
dp2_offset = np.mean(dp2[mask])
for run in exp.runs:
run_mask = (time >= run.start_time) & (time <= run.end_time)
dp1[run_mask] -= dp1_offset
dp2[run_mask] -= dp2_offset
dp1[dp1 < 0] = 0
# now remove offset due to pressure drift
# (avoid swell experiments in this step)
for exp_name in experiments_to_process:
if 'swell' in exp_name:
continue
exp = experiments[exp_name]
t0 = exp.runs[0].start_time
t1 = exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
dp1[mask] = remove_drift(dp1[mask], date2num(time[mask]))
air_density = 1.1554 # at 30 deg. C and 90% RH
u = pitot_velocity(dp1, air_density)
for exp_name in experiments_to_process:
exp = experiments[exp_name]
t0 = exp.runs[0].start_time
t1 = exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
exp_time = time[mask]
# time in seconds of the day; save origin in nc attribute
exp_seconds = (date2num(exp_time) - int(date2num(t0))) * 86400
# fan frequency
fan = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time <= run.end_time)
fan[run_mask] = run.fan
# status flag (0: good; 1: fan spin-up; 2: bad)
flag = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time < run.start_time + timedelta(seconds=60))
flag[run_mask] = 1
ncfile = 'pitot_' + exp_name + '.nc'
print('Writing ' + ncfile)
nc = Dataset(ncfile, 'w', format='NETCDF4')
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = exp_seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', num2date(int(date2num(t0))).strftime('%Y-%m-%dT%H:%M:%S'))
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('u', 'f4', dimensions=('Time'))
var[:] = u[mask]
var.setncattr('name', 'Pitot velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('dp_pitot', 'f4', dimensions=('Time'))
var[:] = dp1[mask]
var.setncattr('name', 'Pitot pressure difference')
var.setncattr('units', 'Pa')
var = nc.createVariable('dp_alongtank', 'f4', dimensions=('Time'))
var[:] = dp2[mask]
var.setncattr('name', 'Along-tank pressure difference')
var.setncattr('units', 'Pa')
nc.close()
| """
process_level1.py
"""
from asist.irgason import read_irgason_from_toa5
from asist.hotfilm import read_hotfilm_from_lvm
from asist.pressure import read_pressure_from_toa5
from asist.pitot import pitot_velocity
from asist_nsf_2018.experiments import experiments
from datetime import datetime, timedelta
import glob
from matplotlib.dates import date2num, num2date
from netCDF4 import Dataset
import numpy as np
import os
def remove_drift(p, t):
return p - (p[-1] - p[0]) * (t - t[0]) / (t[-1] - t[0])
def get_data_path(data_name):
"""Gets the data path from the env variable."""
assert data_name in ['LEG', 'HOTFILM', 'IRGASON', 'PRESSURE'],\
data_name + ' is not available'
try:
return os.environ[data_name + '_DATA_PATH']
except KeyError:
raise KeyError('Set ' + data_name + '_DATA_PATH env variable to the path with ' + data_name + ' data')
def get_experiment_time_series(time, data, exp):
"""Returns time and data slice between
experiment start and end times."""
t0, t1 = exp.runs[0].start_time, exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
return time[mask], data[mask]
def process_dp_to_level2():
"""Processes pressure gradient into a NetCDF file."""
PRESSURE_DATA_PATH = get_data_path('PRESSURE')
exp_name = 'asist-christian-shadowgraph'
exp = experiments[exp_name]
files = glob.glob(PRESSURE_DATA_PATH + '/TOA5_SUSTAINpresX4X2.pressure_229*.dat')
files.sort()
time, dp1, dp2 = read_pressure_from_toa5(files)
# remove offset from pressure
t0 = exp.runs[0].start_time
t1 = exp.runs[0].end_time - timedelta(seconds=60)
mask = (time >= t0) & (time <= t1)
dp2_offset = np.mean(dp2[mask])
for run in exp.runs:
run_mask = (time >= run.start_time) & (time <= run.end_time)
dp2[run_mask] -= dp2_offset
time, dp = get_experiment_time_series(time, dp2, exp)
# fan frequency
fan = np.zeros(time.size)
for run in exp.runs:
run_mask = (time >= run.start_time) & (time <= run.end_time)
fan[run_mask] = run.fan
print('Writing ' + ncfile)
# distance between air pressure ports
# 14 panels, 0.77 m each, minus 2 cm on each end
dx = 14 * 0.77 - 0.04
seconds = (date2num(time) - int(date2num(t0))) * 86400
ncfile = 'air-pressure_' + exp_name + '.nc'
with Dataset(ncfile, 'w', format='NETCDF4') as nc:
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', t0.strftime('%Y-%m-%dT%H:%M:%S'))
var.setncattr('dx', dx)
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('dp', 'f4', dimensions=('Time'))
var[:] = dp
var.setncattr('name', 'Along-tank air pressure difference')
var.setncattr('units', 'Pa')
var = nc.createVariable('dpdx', 'f4', dimensions=('Time'))
var[:] = dp / dx
var.setncattr('name', 'Along-tank air pressure gradient')
var.setncattr('units', 'Pa / m')
def process_hotfilm_to_level2():
"""Processes Hot film Labview files into NetCDF."""
HOTFILM_DATA_PATH = get_data_path('HOTFILM')
experiments_to_process = [
'asist-windonly-fresh_warmup',
'asist-windonly-fresh',
'asist-windonly-salt'
]
for exp_name in experiments_to_process:
exp = experiments[exp_name]
filename = HOTFILM_DATA_PATH + '/hot_film_'\
+ exp.runs[0].start_time.strftime('%Y%m%d') + '.lvm'
if exp_name == 'asist-windonly-fresh_warmup':
start_time, seconds, ch1, ch2 = read_hotfilm_from_lvm(filename, dt=2e-3)
else:
start_time, seconds, ch1, ch2 = read_hotfilm_from_lvm(filename, dt=1e-3)
origin = datetime(start_time.year, start_time.month, start_time.day)
seconds = np.array(seconds) + (start_time - origin).total_seconds()
ch1 = np.array(ch1)
ch2 = np.array(ch2)
t0 = date2num(exp.runs[0].start_time)
t1 = date2num(exp.runs[-1].end_time)
t0 = (t0 - int(t0)) * 86400
t1 = (t1 - int(t1)) * 86400
mask = (seconds >= t0) & (seconds <= t1)
exp_seconds = seconds[mask]
# fan frequency
fan = np.zeros(exp_seconds.size)
for run in exp.runs:
run_mask = (exp_seconds >= t0) & (exp_seconds <= t1)
fan[run_mask] = run.fan
ncfile = 'hotfilm_' + exp_name + '.nc'
print('Writing ' + ncfile)
nc = Dataset(ncfile, 'w', format='NETCDF4')
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = exp_seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', origin.strftime('%Y-%m-%dT%H:%M:%S'))
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('ch1', 'f4', dimensions=('Time'))
var[:] = ch1[mask]
var.setncattr('name', 'Channel 1 voltage')
var.setncattr('units', 'V')
var = nc.createVariable('ch2', 'f4', dimensions=('Time'))
var[:] = ch2[mask]
var.setncattr('name', 'Channel 2 voltage')
var.setncattr('units', 'V')
nc.close()
def process_irgason_to_level2():
"""Processes IRGASON TOA5 files into NetCDF."""
IRGASON_DATA_PATH = get_data_path('IRGASON')
experiments_to_process = [
'asist-windonly-fresh_warmup',
'asist-windonly-fresh',
'asist-wind-swell-fresh',
'asist-windonly-salt',
'asist-wind-swell-salt',
'asist-flow-distortion'
]
files = glob.glob(IRGASON_DATA_PATH + '/TOA5*.dat')
files.sort()
time, u, v, w, Ts, Tc, Pc, RH = read_irgason_from_toa5(files)
for exp_name in experiments_to_process:
exp = experiments[exp_name]
t0 = exp.runs[0].start_time
t1 = exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
exp_time = time[mask]
# time in seconds of the day; save origin in nc attribute
exp_seconds = (date2num(exp_time) - int(date2num(t0))) * 86400
# fan frequency
fan = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time <= run.end_time)
fan[run_mask] = run.fan
# status flag (0: good; 1: fan spin-up; 2: bad)
flag = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time < run.start_time + timedelta(seconds=60))
flag[run_mask] = 1
ncfile = 'irgason_' + exp_name + '.nc'
print('Writing ' + ncfile)
nc = Dataset(ncfile, 'w', format='NETCDF4')
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = exp_seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', num2date(int(date2num(t0))).strftime('%Y-%m-%dT%H:%M:%S'))
var = nc.createVariable('flag', 'i4', dimensions=('Time'))
var[:] = flag
var.setncattr('name', 'Status flag')
var.setncattr('description', '0: good; 1: fan spin-up; 2: bad')
var.setncattr('units', '')
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('u', 'f4', dimensions=('Time'))
var[:] = u[mask]
var.setncattr('name', 'x- component of velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('v', 'f4', dimensions=('Time'))
var[:] = v[mask]
var.setncattr('name', 'y- component of velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('w', 'f4', dimensions=('Time'))
var[:] = w[mask]
var.setncattr('name', 'z- component of velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('Ts', 'f4', dimensions=('Time'))
var[:] = Ts[mask]
var.setncattr('name', 'Sonic temperature')
var.setncattr('units', 'deg. C')
var = nc.createVariable('Tc', 'f4', dimensions=('Time'))
var[:] = Tc[mask]
var.setncattr('name', 'Cell temperature')
var.setncattr('units', 'deg. C')
var = nc.createVariable('Pc', 'f4', dimensions=('Time'))
var[:] = Pc[mask]
var.setncattr('name', 'Cell pressure')
var.setncattr('units', 'hPa')
var = nc.createVariable('RH', 'f4', dimensions=('Time'))
var[:] = RH[mask]
var.setncattr('name', 'Relative humidity')
var.setncattr('units', '%')
nc.close()
def process_pitot_to_level2():
"""Processes MKS pressure difference from TOA5 files
into pitot tube velocity and writes it to NetCDF."""
PRESSURE_DATA_PATH = get_data_path('PRESSURE')
experiments_to_process = [
'asist-windonly-fresh',
'asist-wind-swell-fresh',
'asist-windonly-salt',
'asist-wind-swell-salt'
]
files = glob.glob(PRESSURE_DATA_PATH + '/TOA5*.dat')
files.sort()
time, dp1, dp2 = read_pressure_from_toa5(files)
# remove offset from pressure before computing velocity
for exp_name in experiments_to_process:
exp = experiments[exp_name]
t0 = exp.runs[0].start_time + timedelta(seconds=30)
t1 = exp.runs[0].end_time - timedelta(seconds=30)
mask = (time >= t0) & (time <= t1)
dp1_offset = np.mean(dp1[mask])
dp2_offset = np.mean(dp2[mask])
for run in exp.runs:
run_mask = (time >= run.start_time) & (time <= run.end_time)
dp1[run_mask] -= dp1_offset
dp2[run_mask] -= dp2_offset
dp1[dp1 < 0] = 0
# now remove offset due to pressure drift
# (avoid swell experiments in this step)
for exp_name in experiments_to_process:
if 'swell' in exp_name:
continue
exp = experiments[exp_name]
t0 = exp.runs[0].start_time
t1 = exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
dp1[mask] = remove_drift(dp1[mask], date2num(time[mask]))
air_density = 1.1554 # at 30 deg. C and 90% RH
u = pitot_velocity(dp1, air_density)
for exp_name in experiments_to_process:
exp = experiments[exp_name]
t0 = exp.runs[0].start_time
t1 = exp.runs[-1].end_time
mask = (time >= t0) & (time <= t1)
exp_time = time[mask]
# time in seconds of the day; save origin in nc attribute
exp_seconds = (date2num(exp_time) - int(date2num(t0))) * 86400
# fan frequency
fan = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time <= run.end_time)
fan[run_mask] = run.fan
# status flag (0: good; 1: fan spin-up; 2: bad)
flag = np.zeros(exp_time.size)
for run in exp.runs:
run_mask = (exp_time >= run.start_time) & (exp_time < run.start_time + timedelta(seconds=60))
flag[run_mask] = 1
ncfile = 'pitot_' + exp_name + '.nc'
print('Writing ' + ncfile)
nc = Dataset(ncfile, 'w', format='NETCDF4')
nc.createDimension('Time', size=0)
var = nc.createVariable('Time', 'f8', dimensions=('Time'))
var[:] = exp_seconds
var.setncattr('name', 'Time in seconds of the day')
var.setncattr('units', 's')
var.setncattr('origin', num2date(int(date2num(t0))).strftime('%Y-%m-%dT%H:%M:%S'))
var = nc.createVariable('fan', 'f4', dimensions=('Time'))
var[:] = fan
var.setncattr('name', 'Fan frequency')
var.setncattr('units', 'Hz')
var = nc.createVariable('u', 'f4', dimensions=('Time'))
var[:] = u[mask]
var.setncattr('name', 'Pitot velocity')
var.setncattr('units', 'm/s')
var = nc.createVariable('dp_pitot', 'f4', dimensions=('Time'))
var[:] = dp1[mask]
var.setncattr('name', 'Pitot pressure difference')
var.setncattr('units', 'Pa')
var = nc.createVariable('dp_alongtank', 'f4', dimensions=('Time'))
var[:] = dp2[mask]
var.setncattr('name', 'Along-tank pressure difference')
var.setncattr('units', 'Pa')
nc.close()
| en | 0.770355 | process_level1.py Gets the data path from the env variable. Returns time and data slice between experiment start and end times. Processes pressure gradient into a NetCDF file. # remove offset from pressure # fan frequency # distance between air pressure ports # 14 panels, 0.77 m each, minus 2 cm on each end Processes Hot film Labview files into NetCDF. # fan frequency Processes IRGASON TOA5 files into NetCDF. # time in seconds of the day; save origin in nc attribute # fan frequency # status flag (0: good; 1: fan spin-up; 2: bad) Processes MKS pressure difference from TOA5 files into pitot tube velocity and writes it to NetCDF. # remove offset from pressure before computing velocity # now remove offset due to pressure drift # (avoid swell experiments in this step) # at 30 deg. C and 90% RH # time in seconds of the day; save origin in nc attribute # fan frequency # status flag (0: good; 1: fan spin-up; 2: bad) | 2.243066 | 2 |
generate.py | mankadronit/Spotify-Playlist-Generator | 0 | 6622862 | import base64
from bs4 import BeautifulSoup
import datetime
import json
from os.path import isfile, getsize
import re
import requests
import six
import sqlite3
import urllib
import webbrowser
from constants import *
def authenticate_client():
""" Spotify's Auth Flow, a three steps process """
auth_code = get_auth_code()
return get_access_token(auth_code)
def get_auth_code():
""" 1st Step of Auth Process """
if isfile('auth.txt') and getsize('auth.txt') > 0:
with open('auth.txt', 'r') as f:
auth_code = f.read()
else:
auth_code = fetch_auth_code()
save_auth_code(auth_code)
return auth_code
def get_access_token(auth_code):
""" 2nd Step of Auth Process """
tokens = cursor.execute('SELECT access_token, refresh_token, add_time '
'from tokens ORDER BY add_time DESC')
token_row = tokens.fetchone()
if token_row is not None:
access_token, refresh_token, add_time = token_row[0], token_row[1], token_row[2]
if not check_token_validity(add_time):
return fetch_refreshed_token(refresh_token)
else:
return access_token
else:
access_token, refresh_token, add_time = fetch_access_token(auth_code)
return access_token
def fetch_access_token(auth_code):
""" Fetches Access Token from Spotify API """
payload = {'grant_type': 'authorization_code',
'code': str(auth_code), 'redirect_uri': REDIRECT_URI}
auth_headers = base64.b64encode(six.text_type(CLIENT_ID + ':' + CLIENT_SECRET).encode('ascii'))
headers = {'Authorization': 'Basic ' + auth_headers.decode('ascii')}
response = requests.post(OAUTH_TOKENS_URL, headers=headers, data=payload)
response_data = json.loads(response.content)
cursor.execute('INSERT INTO tokens('
'access_token, '
'token_type, scope, '
'expires_in, '
'refresh_token, '
'add_time) '
'VALUES (?,?,?,?,?,?)',
(response_data['access_token'],
response_data['token_type'],
response_data['scope'],
response_data['expires_in'],
response_data['refresh_token'],
datetime.datetime.now()))
tokens = cursor.execute('SELECT access_token, refresh_token, add_time '
'from tokens ORDER BY add_time DESC')
access_token, refresh_token, add_time = tokens.fetchone()
return access_token, refresh_token, add_time
def fetch_refreshed_token(refresh_token):
""" Fetches a new access token using refresh token """
payload = {'grant_type': 'refresh_token', 'refresh_token': refresh_token}
auth_headers = base64.b64encode(six.text_type(CLIENT_ID + ':' + CLIENT_SECRET).encode('ascii'))
headers = {'Authorization': 'Basic ' + auth_headers.decode('ascii')}
response = requests.post(OAUTH_TOKENS_URL, data=payload, headers=headers)
response_data = json.loads(response.content)
cursor.execute('INSERT INTO tokens('
'access_token, '
'token_type, scope, '
'expires_in, '
'refresh_token, '
'add_time) '
'VALUES (?,?,?,?,?,?)',
(response_data['access_token'],
response_data['token_type'],
response_data['scope'],
response_data['expires_in'],
refresh_token,
datetime.datetime.now()))
return response_data['access_token']
def fetch_auth_code():
""" Fetches Auth code by making a request to Spotify OAUTH URL """
data = {'client_id': CLIENT_ID, 'response_type': 'code', 'redirect_uri': REDIRECT_URI,
'scope': 'playlist-modify-private playlist-modify-public'}
payload = urllib.parse.urlencode(data)
webbrowser.open(OAUTH_AUTHORIZE_URL + payload)
response = prompt_user_input()
auth_code = response.split("?code=")[1]
return auth_code
def save_auth_code(auth_code):
""" Saves auth code to the disk """
with open('auth.txt', 'w') as f:
f.write(auth_code)
def prompt_user_input():
""" Asks the user to paste the redirect URL through any input mechanism """
return input("Enter the redirect URL: ")
def check_token_validity(add_time):
""" Checks if the token is older than 1 hour """
return datetime.datetime.now() - datetime.datetime.strptime(add_time, '%Y-%m-%d %H:%M:%S.%f') < \
datetime.timedelta(hours=1)
def fetch_user_profile(access_token):
""" Fetches the User's Spotify Profile """
headers = {'Authorization': 'Bearer %s' % access_token}
response = requests.get(SPOTIFY_PROFILE_URL, headers=headers)
response_data = json.loads(response.content)
return response_data['id']
def fetch_playlist(access_token, name):
""" Fetches the Playlist the user wants to be automated """
headers = {'Authorization': 'Bearer %s' % access_token}
response = requests.get(SPOTIFY_PLAYLIST_URL, headers=headers)
response_data = json.loads(response.content)
for playlist in response_data['items']:
if playlist['name'] == name:
return playlist['id']
else:
return None
def fetch_hot_songs():
""" Scrapes a list of top songs from HotNewHipHop's Website """
page = requests.get(url=HOT_100_URL)
soup = BeautifulSoup(page.content, 'lxml')
divs = soup.find_all('div', class_='chartItem-body-artist')
song_artist_pair = []
for div in divs:
a = div.find('a', class_='cover-title chartItem-artist-trackTitle')
song_name = re.sub(' +', ' ', a.text).rstrip()
div2 = div.findChildren(
'div', class_='chartItem-artist-info', recursive=True)
artist_name = ""
for element in div2:
artist_name += element.text + " "
artist_name = re.sub(' +', ' ', artist_name).rstrip()
song_artist_pair.append((song_name.lower(), artist_name.lower()))
return select_desirable_songs(song_artist_pair)
def select_desirable_songs(song_artist_pair):
""" Creates a desirable songs list by picking relevant artists """
cleaned_list = []
for song, artist in song_artist_pair:
featuring_artists = [art.split('&') for art in artist.split('\xa0feat. ')]
for art in featuring_artists:
for a in art:
cleaned_list.append((song, a.rstrip()))
return [(song, artist) for (song, artist) in cleaned_list if artist in [a.lower() for a in DESIRED_ARTISTS]]
def add_to_playlist(user_id, playlist_id, song_artist_list, access_token):
""" Adds the songs to the playlist """
songs_list = remove_already_added_songs(song_artist_list)
songs_uri_list = fetch_songs_uri(songs_list, access_token)
payload = {'uris': songs_uri_list}
spotify_add_to_playlist_url = 'https://api.spotify.com/v1/users/{}/playlists/{}/tracks'.format(
user_id, playlist_id)
headers = {'Authorization': 'Bearer %s' % access_token,
'Content-Type': 'application/json'}
response = requests.post(spotify_add_to_playlist_url, json=payload, headers=headers)
if response.status_code not in (400, 403, 404):
print('The following songs have been successfully added to your playlist: \n', songs_list)
else:
print('Error adding songs')
def remove_already_added_songs(song_artist_list):
""" Check's the database for the list of already added songs and removes
the song from the list if it already exists
"""
cursor.execute("CREATE TABLE if not exists songs(song text not null, artist text not null)")
songs = cursor.execute("SELECT * from songs")
songs_list = songs.fetchall()
if len(songs_list) > 0:
new_songs = [(song, artist) for (song, artist) in song_artist_list if (song, artist) not in songs_list]
for song, artist in new_songs:
cursor.execute("INSERT INTO songs(song, artist) VALUES (?,?)", (song, artist))
else:
for song, artist in song_artist_list:
cursor.execute("INSERT INTO songs(song, artist) VALUES (?,?)", (song, artist))
return song_artist_list
def fetch_songs_uri(songs_list, access_token):
""" Returns a list of song uri's to add to the playlist.
The list it created by searching for each track individually
"""
song_uris = []
for song, artist in songs_list:
query = (song + " " + artist)
payload = {'q': query, 'type': 'track', 'limit': 1}
headers = {'Authorization': 'Bearer %s' % access_token}
response = requests.get(SPOTIFY_SEARCH_URL, headers=headers, params=payload)
response_data = response.json()
if len(response_data['tracks']['items']) != 0:
song_uris.append(response_data['tracks']['items'][0]['uri'])
return song_uris
if __name__ == '__main__':
db = sqlite3.connect('spotify.db', isolation_level=None)
cursor = db.cursor()
cursor.execute('CREATE TABLE if not exists tokens('
'access_token text not null, '
'token_type text not null, '
'scope text not null, '
'expires_in int not null, '
'refresh_token text, '
'add_time timestamp)'
)
token = authenticate_client()
user_id = fetch_user_profile(token)
playlist_id = fetch_playlist(token, name='Automated Playlist')
song_artist_list = fetch_hot_songs()
add_to_playlist(user_id, playlist_id, song_artist_list, token)
| import base64
from bs4 import BeautifulSoup
import datetime
import json
from os.path import isfile, getsize
import re
import requests
import six
import sqlite3
import urllib
import webbrowser
from constants import *
def authenticate_client():
""" Spotify's Auth Flow, a three steps process """
auth_code = get_auth_code()
return get_access_token(auth_code)
def get_auth_code():
""" 1st Step of Auth Process """
if isfile('auth.txt') and getsize('auth.txt') > 0:
with open('auth.txt', 'r') as f:
auth_code = f.read()
else:
auth_code = fetch_auth_code()
save_auth_code(auth_code)
return auth_code
def get_access_token(auth_code):
""" 2nd Step of Auth Process """
tokens = cursor.execute('SELECT access_token, refresh_token, add_time '
'from tokens ORDER BY add_time DESC')
token_row = tokens.fetchone()
if token_row is not None:
access_token, refresh_token, add_time = token_row[0], token_row[1], token_row[2]
if not check_token_validity(add_time):
return fetch_refreshed_token(refresh_token)
else:
return access_token
else:
access_token, refresh_token, add_time = fetch_access_token(auth_code)
return access_token
def fetch_access_token(auth_code):
""" Fetches Access Token from Spotify API """
payload = {'grant_type': 'authorization_code',
'code': str(auth_code), 'redirect_uri': REDIRECT_URI}
auth_headers = base64.b64encode(six.text_type(CLIENT_ID + ':' + CLIENT_SECRET).encode('ascii'))
headers = {'Authorization': 'Basic ' + auth_headers.decode('ascii')}
response = requests.post(OAUTH_TOKENS_URL, headers=headers, data=payload)
response_data = json.loads(response.content)
cursor.execute('INSERT INTO tokens('
'access_token, '
'token_type, scope, '
'expires_in, '
'refresh_token, '
'add_time) '
'VALUES (?,?,?,?,?,?)',
(response_data['access_token'],
response_data['token_type'],
response_data['scope'],
response_data['expires_in'],
response_data['refresh_token'],
datetime.datetime.now()))
tokens = cursor.execute('SELECT access_token, refresh_token, add_time '
'from tokens ORDER BY add_time DESC')
access_token, refresh_token, add_time = tokens.fetchone()
return access_token, refresh_token, add_time
def fetch_refreshed_token(refresh_token):
""" Fetches a new access token using refresh token """
payload = {'grant_type': 'refresh_token', 'refresh_token': refresh_token}
auth_headers = base64.b64encode(six.text_type(CLIENT_ID + ':' + CLIENT_SECRET).encode('ascii'))
headers = {'Authorization': 'Basic ' + auth_headers.decode('ascii')}
response = requests.post(OAUTH_TOKENS_URL, data=payload, headers=headers)
response_data = json.loads(response.content)
cursor.execute('INSERT INTO tokens('
'access_token, '
'token_type, scope, '
'expires_in, '
'refresh_token, '
'add_time) '
'VALUES (?,?,?,?,?,?)',
(response_data['access_token'],
response_data['token_type'],
response_data['scope'],
response_data['expires_in'],
refresh_token,
datetime.datetime.now()))
return response_data['access_token']
def fetch_auth_code():
""" Fetches Auth code by making a request to Spotify OAUTH URL """
data = {'client_id': CLIENT_ID, 'response_type': 'code', 'redirect_uri': REDIRECT_URI,
'scope': 'playlist-modify-private playlist-modify-public'}
payload = urllib.parse.urlencode(data)
webbrowser.open(OAUTH_AUTHORIZE_URL + payload)
response = prompt_user_input()
auth_code = response.split("?code=")[1]
return auth_code
def save_auth_code(auth_code):
""" Saves auth code to the disk """
with open('auth.txt', 'w') as f:
f.write(auth_code)
def prompt_user_input():
""" Asks the user to paste the redirect URL through any input mechanism """
return input("Enter the redirect URL: ")
def check_token_validity(add_time):
""" Checks if the token is older than 1 hour """
return datetime.datetime.now() - datetime.datetime.strptime(add_time, '%Y-%m-%d %H:%M:%S.%f') < \
datetime.timedelta(hours=1)
def fetch_user_profile(access_token):
""" Fetches the User's Spotify Profile """
headers = {'Authorization': 'Bearer %s' % access_token}
response = requests.get(SPOTIFY_PROFILE_URL, headers=headers)
response_data = json.loads(response.content)
return response_data['id']
def fetch_playlist(access_token, name):
""" Fetches the Playlist the user wants to be automated """
headers = {'Authorization': 'Bearer %s' % access_token}
response = requests.get(SPOTIFY_PLAYLIST_URL, headers=headers)
response_data = json.loads(response.content)
for playlist in response_data['items']:
if playlist['name'] == name:
return playlist['id']
else:
return None
def fetch_hot_songs():
""" Scrapes a list of top songs from HotNewHipHop's Website """
page = requests.get(url=HOT_100_URL)
soup = BeautifulSoup(page.content, 'lxml')
divs = soup.find_all('div', class_='chartItem-body-artist')
song_artist_pair = []
for div in divs:
a = div.find('a', class_='cover-title chartItem-artist-trackTitle')
song_name = re.sub(' +', ' ', a.text).rstrip()
div2 = div.findChildren(
'div', class_='chartItem-artist-info', recursive=True)
artist_name = ""
for element in div2:
artist_name += element.text + " "
artist_name = re.sub(' +', ' ', artist_name).rstrip()
song_artist_pair.append((song_name.lower(), artist_name.lower()))
return select_desirable_songs(song_artist_pair)
def select_desirable_songs(song_artist_pair):
""" Creates a desirable songs list by picking relevant artists """
cleaned_list = []
for song, artist in song_artist_pair:
featuring_artists = [art.split('&') for art in artist.split('\xa0feat. ')]
for art in featuring_artists:
for a in art:
cleaned_list.append((song, a.rstrip()))
return [(song, artist) for (song, artist) in cleaned_list if artist in [a.lower() for a in DESIRED_ARTISTS]]
def add_to_playlist(user_id, playlist_id, song_artist_list, access_token):
""" Adds the songs to the playlist """
songs_list = remove_already_added_songs(song_artist_list)
songs_uri_list = fetch_songs_uri(songs_list, access_token)
payload = {'uris': songs_uri_list}
spotify_add_to_playlist_url = 'https://api.spotify.com/v1/users/{}/playlists/{}/tracks'.format(
user_id, playlist_id)
headers = {'Authorization': 'Bearer %s' % access_token,
'Content-Type': 'application/json'}
response = requests.post(spotify_add_to_playlist_url, json=payload, headers=headers)
if response.status_code not in (400, 403, 404):
print('The following songs have been successfully added to your playlist: \n', songs_list)
else:
print('Error adding songs')
def remove_already_added_songs(song_artist_list):
""" Check's the database for the list of already added songs and removes
the song from the list if it already exists
"""
cursor.execute("CREATE TABLE if not exists songs(song text not null, artist text not null)")
songs = cursor.execute("SELECT * from songs")
songs_list = songs.fetchall()
if len(songs_list) > 0:
new_songs = [(song, artist) for (song, artist) in song_artist_list if (song, artist) not in songs_list]
for song, artist in new_songs:
cursor.execute("INSERT INTO songs(song, artist) VALUES (?,?)", (song, artist))
else:
for song, artist in song_artist_list:
cursor.execute("INSERT INTO songs(song, artist) VALUES (?,?)", (song, artist))
return song_artist_list
def fetch_songs_uri(songs_list, access_token):
""" Returns a list of song uri's to add to the playlist.
The list it created by searching for each track individually
"""
song_uris = []
for song, artist in songs_list:
query = (song + " " + artist)
payload = {'q': query, 'type': 'track', 'limit': 1}
headers = {'Authorization': 'Bearer %s' % access_token}
response = requests.get(SPOTIFY_SEARCH_URL, headers=headers, params=payload)
response_data = response.json()
if len(response_data['tracks']['items']) != 0:
song_uris.append(response_data['tracks']['items'][0]['uri'])
return song_uris
if __name__ == '__main__':
db = sqlite3.connect('spotify.db', isolation_level=None)
cursor = db.cursor()
cursor.execute('CREATE TABLE if not exists tokens('
'access_token text not null, '
'token_type text not null, '
'scope text not null, '
'expires_in int not null, '
'refresh_token text, '
'add_time timestamp)'
)
token = authenticate_client()
user_id = fetch_user_profile(token)
playlist_id = fetch_playlist(token, name='Automated Playlist')
song_artist_list = fetch_hot_songs()
add_to_playlist(user_id, playlist_id, song_artist_list, token)
| en | 0.897803 | Spotify's Auth Flow, a three steps process 1st Step of Auth Process 2nd Step of Auth Process Fetches Access Token from Spotify API Fetches a new access token using refresh token Fetches Auth code by making a request to Spotify OAUTH URL Saves auth code to the disk Asks the user to paste the redirect URL through any input mechanism Checks if the token is older than 1 hour Fetches the User's Spotify Profile Fetches the Playlist the user wants to be automated Scrapes a list of top songs from HotNewHipHop's Website Creates a desirable songs list by picking relevant artists Adds the songs to the playlist Check's the database for the list of already added songs and removes the song from the list if it already exists Returns a list of song uri's to add to the playlist. The list it created by searching for each track individually | 2.919765 | 3 |
src/tagger_from_scratch/data.py | soldni/tagger-from-scratch | 0 | 6622863 | from typing import Sequence, Dict, Tuple
import dataclasses
import torch
from tagger_from_scratch.config import Config
@dataclasses.dataclass
class ConllCourpusSample:
tokens: list = dataclasses.field(default_factory=lambda: [])
pos: list = dataclasses.field(default_factory=lambda: [])
con: list = dataclasses.field(default_factory=lambda: [])
ner: list = dataclasses.field(default_factory=lambda: [])
def append(self, token: str, pos: str, con: str, ner: str):
self.tokens.append(token)
self.pos.append(pos)
self.con.append(con)
self.ner.append(ner)
def __len__(self):
return len(self.tokens)
def load_conll_corpus(path: str) -> Sequence[ConllCourpusSample]:
raw_data = []
with open(path, mode='r', encoding='utf-8') as f:
for ln in f:
if ln.startswith('-DOCSTART-'):
continue
elif ln == '\n':
if len(raw_data) == 0 or len(raw_data[-1]) > 0:
# only append a new sample if the previous sample
# was filled up or it's the first sample
raw_data.append(ConllCourpusSample())
else:
token, pos, con, ner = ln.strip().split()
raw_data[-1].append(token=token, pos=pos, con=con, ner=ner)
if len(raw_data[-1]) == 0:
raw_data.pop(-1)
return raw_data
def load_fasttext_vectors(path: str) -> Sequence[Tuple[str, torch.Tensor]]:
fasttext_dict = []
with open(path, mode='r', encoding='utf-8') as f:
# skip first line, only has shape info about dim
# and size of vocab
next(f)
for ln in f:
token, *embedding_values = ln.strip().split()
embedding_values = torch.Tensor(tuple(float(e) for e in embedding_values))
fasttext_dict.append((token, embedding_values))
return fasttext_dict
class ConllTokenizer:
unk_token = '__UNK__'
pad_token = '__PAD__'
def __init__(self):
self.tokens_vocab = {}
self.pos_vocab = {}
self.con_vocab = {}
self.ner_vocab = {}
self.has_trained = False
self.max_length = -1
def train(self, conll_corpus: Sequence[ConllCourpusSample], fasttext_vectors: Dict[str, torch.Tensor] = None):
if fasttext_vectors:
self.tokens_vocab.update({token: i for i, (token, _) in enumerate(fasttext_vectors)})
for sample in conll_corpus:
if not fasttext_vectors:
for token in sample.tokens:
self.tokens_vocab.setdefault(token, len(self.tokens_vocab))
for ner in sample.ner:
self.ner_vocab.setdefault(ner, len(self.ner_vocab))
for con in sample.con:
self.con_vocab.setdefault(con, len(self.con_vocab))
for pos in sample.pos:
self.pos_vocab.setdefault(pos, len(self.pos_vocab))
self.max_length = max(self.max_length, len(sample))
for vocab in (self.tokens_vocab, self.pos_vocab, self.con_vocab, self.ner_vocab):
vocab[self.unk_token] = len(vocab)
vocab[self.pad_token] = len(vocab)
self.has_trained = True
def _tokenize_field(self, sample, vocab):
unk_token_id = vocab[self.unk_token]
ids = tuple(vocab.get(sample[i] if i < len(sample) else self.pad_token, unk_token_id)
for i in range(self.max_length))
return ids
def tokenize_tokens(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.tokens, self.tokens_vocab)
def tokenize_pos(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.pos, self.pos_vocab)
def tokenize_con(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.con, self.con_vocab)
def tokenize_ner(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.ner, self.ner_vocab)
@property
def tokens_pad_id(self):
return self.tokens_vocab[self.pad_token]
@property
def pos_pad_id(self):
return self.pos_vocab[self.pad_token]
@property
def ner_pad_id(self):
return self.ner_vocab[self.pad_token]
@property
def con_pad_id(self):
return self.con_vocab[self.pad_token]
def make_conll_dataset(config: Config, split: str, tokenizer: ConllTokenizer = None):
assert split in {'train', 'test', 'valid'}, \
f"Split should either 'train', 'test', or 'valid', not {split}"
conll_corpus = load_conll_corpus(f'{config.conll_data_path}/{split}.txt')
if not tokenizer:
tokenizer = ConllTokenizer()
if config.use_fasttext:
fasttext_vectors = load_fasttext_vectors(f'{config.fasttext_data_path}/{config.fasttext_emb_file}')
else:
fasttext_vectors = None
tokenizer.train(conll_corpus=conll_corpus, fasttext_vectors=fasttext_vectors)
tokens_tensor = torch.LongTensor(tuple(tokenizer.tokenize_tokens(sample) for sample in conll_corpus))
ner_tensor = torch.LongTensor(tuple(tokenizer.tokenize_ner(sample) for sample in conll_corpus))
pos_tensor = torch.LongTensor(tuple(tokenizer.tokenize_pos(sample) for sample in conll_corpus))
con_tensor = torch.LongTensor(tuple(tokenizer.tokenize_con(sample) for sample in conll_corpus))
dataset = ConllDataset(tokens_tensor=tokens_tensor,
ner_tensor=ner_tensor,
pos_tensor=pos_tensor,
con_tensor=con_tensor)
return dataset, tokenizer
@dataclasses.dataclass
class ConllTensorSample:
tokens: torch.LongTensor
ner: torch.LongTensor
pos: torch.LongTensor
con: torch.LongTensor
@classmethod
def collate_tensor_samples(cls, seq: Sequence):
return cls(tokens=torch.stack(tuple(elem.tokens for elem in seq)),
ner=torch.stack(tuple(elem.ner for elem in seq)),
pos=torch.stack(tuple(elem.pos for elem in seq)),
con=torch.stack(tuple(elem.con for elem in seq)))
def to(self, device: str):
return self.__class__(tokens=self.tokens.to(device),
ner=self.ner.to(device),
pos=self.pos.to(device),
con=self.con.to(device))
def keys(self):
return (f.name for f in dataclasses.fields(self))
def __getitem__(self, key):
return getattr(self, key)
class ConllDataset(torch.utils.data.Dataset):
def __init__(self,
tokens_tensor: torch.LongTensor,
ner_tensor: torch.LongTensor,
pos_tensor: torch.LongTensor,
con_tensor: torch.LongTensor):
self.tokens_tensor = tokens_tensor
self.ner_tensor = ner_tensor
self.pos_tensor = pos_tensor
self.con_tensor = con_tensor
super().__init__()
def __len__(self):
return self.tokens_tensor.size(0)
def __getitem__(self, index: int):
return ConllTensorSample(tokens=self.tokens_tensor[index],
ner=self.ner_tensor[index],
pos=self.pos_tensor[index],
con=self.con_tensor[index])
| from typing import Sequence, Dict, Tuple
import dataclasses
import torch
from tagger_from_scratch.config import Config
@dataclasses.dataclass
class ConllCourpusSample:
tokens: list = dataclasses.field(default_factory=lambda: [])
pos: list = dataclasses.field(default_factory=lambda: [])
con: list = dataclasses.field(default_factory=lambda: [])
ner: list = dataclasses.field(default_factory=lambda: [])
def append(self, token: str, pos: str, con: str, ner: str):
self.tokens.append(token)
self.pos.append(pos)
self.con.append(con)
self.ner.append(ner)
def __len__(self):
return len(self.tokens)
def load_conll_corpus(path: str) -> Sequence[ConllCourpusSample]:
raw_data = []
with open(path, mode='r', encoding='utf-8') as f:
for ln in f:
if ln.startswith('-DOCSTART-'):
continue
elif ln == '\n':
if len(raw_data) == 0 or len(raw_data[-1]) > 0:
# only append a new sample if the previous sample
# was filled up or it's the first sample
raw_data.append(ConllCourpusSample())
else:
token, pos, con, ner = ln.strip().split()
raw_data[-1].append(token=token, pos=pos, con=con, ner=ner)
if len(raw_data[-1]) == 0:
raw_data.pop(-1)
return raw_data
def load_fasttext_vectors(path: str) -> Sequence[Tuple[str, torch.Tensor]]:
fasttext_dict = []
with open(path, mode='r', encoding='utf-8') as f:
# skip first line, only has shape info about dim
# and size of vocab
next(f)
for ln in f:
token, *embedding_values = ln.strip().split()
embedding_values = torch.Tensor(tuple(float(e) for e in embedding_values))
fasttext_dict.append((token, embedding_values))
return fasttext_dict
class ConllTokenizer:
unk_token = '__UNK__'
pad_token = '__PAD__'
def __init__(self):
self.tokens_vocab = {}
self.pos_vocab = {}
self.con_vocab = {}
self.ner_vocab = {}
self.has_trained = False
self.max_length = -1
def train(self, conll_corpus: Sequence[ConllCourpusSample], fasttext_vectors: Dict[str, torch.Tensor] = None):
if fasttext_vectors:
self.tokens_vocab.update({token: i for i, (token, _) in enumerate(fasttext_vectors)})
for sample in conll_corpus:
if not fasttext_vectors:
for token in sample.tokens:
self.tokens_vocab.setdefault(token, len(self.tokens_vocab))
for ner in sample.ner:
self.ner_vocab.setdefault(ner, len(self.ner_vocab))
for con in sample.con:
self.con_vocab.setdefault(con, len(self.con_vocab))
for pos in sample.pos:
self.pos_vocab.setdefault(pos, len(self.pos_vocab))
self.max_length = max(self.max_length, len(sample))
for vocab in (self.tokens_vocab, self.pos_vocab, self.con_vocab, self.ner_vocab):
vocab[self.unk_token] = len(vocab)
vocab[self.pad_token] = len(vocab)
self.has_trained = True
def _tokenize_field(self, sample, vocab):
unk_token_id = vocab[self.unk_token]
ids = tuple(vocab.get(sample[i] if i < len(sample) else self.pad_token, unk_token_id)
for i in range(self.max_length))
return ids
def tokenize_tokens(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.tokens, self.tokens_vocab)
def tokenize_pos(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.pos, self.pos_vocab)
def tokenize_con(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.con, self.con_vocab)
def tokenize_ner(self, conll_sample: ConllCourpusSample):
return self._tokenize_field(conll_sample.ner, self.ner_vocab)
@property
def tokens_pad_id(self):
return self.tokens_vocab[self.pad_token]
@property
def pos_pad_id(self):
return self.pos_vocab[self.pad_token]
@property
def ner_pad_id(self):
return self.ner_vocab[self.pad_token]
@property
def con_pad_id(self):
return self.con_vocab[self.pad_token]
def make_conll_dataset(config: Config, split: str, tokenizer: ConllTokenizer = None):
assert split in {'train', 'test', 'valid'}, \
f"Split should either 'train', 'test', or 'valid', not {split}"
conll_corpus = load_conll_corpus(f'{config.conll_data_path}/{split}.txt')
if not tokenizer:
tokenizer = ConllTokenizer()
if config.use_fasttext:
fasttext_vectors = load_fasttext_vectors(f'{config.fasttext_data_path}/{config.fasttext_emb_file}')
else:
fasttext_vectors = None
tokenizer.train(conll_corpus=conll_corpus, fasttext_vectors=fasttext_vectors)
tokens_tensor = torch.LongTensor(tuple(tokenizer.tokenize_tokens(sample) for sample in conll_corpus))
ner_tensor = torch.LongTensor(tuple(tokenizer.tokenize_ner(sample) for sample in conll_corpus))
pos_tensor = torch.LongTensor(tuple(tokenizer.tokenize_pos(sample) for sample in conll_corpus))
con_tensor = torch.LongTensor(tuple(tokenizer.tokenize_con(sample) for sample in conll_corpus))
dataset = ConllDataset(tokens_tensor=tokens_tensor,
ner_tensor=ner_tensor,
pos_tensor=pos_tensor,
con_tensor=con_tensor)
return dataset, tokenizer
@dataclasses.dataclass
class ConllTensorSample:
tokens: torch.LongTensor
ner: torch.LongTensor
pos: torch.LongTensor
con: torch.LongTensor
@classmethod
def collate_tensor_samples(cls, seq: Sequence):
return cls(tokens=torch.stack(tuple(elem.tokens for elem in seq)),
ner=torch.stack(tuple(elem.ner for elem in seq)),
pos=torch.stack(tuple(elem.pos for elem in seq)),
con=torch.stack(tuple(elem.con for elem in seq)))
def to(self, device: str):
return self.__class__(tokens=self.tokens.to(device),
ner=self.ner.to(device),
pos=self.pos.to(device),
con=self.con.to(device))
def keys(self):
return (f.name for f in dataclasses.fields(self))
def __getitem__(self, key):
return getattr(self, key)
class ConllDataset(torch.utils.data.Dataset):
def __init__(self,
tokens_tensor: torch.LongTensor,
ner_tensor: torch.LongTensor,
pos_tensor: torch.LongTensor,
con_tensor: torch.LongTensor):
self.tokens_tensor = tokens_tensor
self.ner_tensor = ner_tensor
self.pos_tensor = pos_tensor
self.con_tensor = con_tensor
super().__init__()
def __len__(self):
return self.tokens_tensor.size(0)
def __getitem__(self, index: int):
return ConllTensorSample(tokens=self.tokens_tensor[index],
ner=self.ner_tensor[index],
pos=self.pos_tensor[index],
con=self.con_tensor[index])
| en | 0.970676 | # only append a new sample if the previous sample # was filled up or it's the first sample # skip first line, only has shape info about dim # and size of vocab | 2.417305 | 2 |
PSO2LogReader.pyw | yumimint/PSO2LogReader | 0 | 6622864 | import os
import pathlib
from app.gmain import main
os.chdir(pathlib.Path(__file__).parent)
main()
| import os
import pathlib
from app.gmain import main
os.chdir(pathlib.Path(__file__).parent)
main()
| none | 1 | 1.439104 | 1 | |
code/mcmc.py | mjvakili/alpha-forecast | 0 | 6622865 | from linear_bias import linear_model, shear_extractor
import os
import sys
import numpy as np
import emcee
from numpy.linalg import solve
import h5py
def lnPost(theta, **kwargs):
def lnprior(theta, **kwargs):
'''log prior
'''
obs = kwargs['data']
obs_cov = kwargs['data_cov']
kwargs.pop('data', None)
kwargs.pop('data_cov', None)
prior_min = 0.2
prior_max = 4.0
if (prior_min < theta[0] < prior_max):
return 0.0
else:
return -np.inf
def lnlike(theta, **kwargs):
'''log likelihood
'''
obs = kwargs['data']
obs_cov = kwargs['data_cov']
kwargs.pop('data', None)
kwargs.pop('data_cov', None)
# Likelihood
model_obs = generator(theta)
res = model_obs - obs
neg_chisq = -0.5 * np.sum(np.dot(res , np.linalg.solve(obs_cov , res)))
#print("neg_chi_tot" , neg_chisq*2./len(res))
return neg_chisq
lp = lnprior(theta , **kwargs)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, **kwargs)
def mcmc_mpi(Nwalkers, Niters, zmin, incomp, shape_noise, chain_file_name):
'''
Parameters
-----------
- Nwalker :
Number of walkers
- Nchains :
Number of MCMC chains
'''
shear_data = shear_extractor(zmin, incomp, shape_noise)
Ndim = 1
random_guess = np.array([1.2])
#initializing the positions of the walkers
pos0 = np.repeat(random_guess, Nwalkers).reshape(Ndim, Nwalkers).T + \
1.e-4 * np.random.randn(Ndim * Nwalkers).reshape(Nwalkers, Ndim)
print("initial positions of the walkers = ", pos0)
#setting up the data kwargs to be passed to log-likelihood
data_kwargs = { 'data': shear_data["xi"],
'data_cov': shear_data["total_cov"]}
#setting up the MCMC sampler
sampler = emcee.EnsembleSampler(Nwalkers, Ndim, lnPost, kwargs = data_kwargs)
sampler.sample(pos0, iterations = Niters)
cnt = 0
for result in sampler.sample(pos0, iterations = Niters):
position = list(result)[0]
sample_file = h5py.File(chain_file_name)
sample_file["mcmc"][cnt] = position
sample_file.close()
print(cnt)
cnt += 1
pass
return None
def chain_fname(zmin, incomp):
chain_file_name = 'mcmc_bias_wtheta_zmin_'+str(zmin)+'_incompleteness_'+str(incomp)+'.hdf5'
sample_file = h5py.File(chain_file_name , 'w')
sample_file.create_dataset("mcmc",(Niters, Nwalkers, 1), data = np.zeros((Niters, Nwalkers , 1)))
sample_file.close()
return chain_file_name
if __name__=="__main__":
Nwalkers = int(sys.argv[1])
print('N walkers = ', Nwalkers)
Niters = int(sys.argv[2])
print('N iterations = ', Niters)
zmin = np.float(sys.argv[3])
print('zmin = ', np.float(zmin))
#setting the l-range for integration
lmin, lmax, nl = 10, 10000, 1000
#incompleteness
incomp = True
#shape noise
shape_noise = 0.3
# assumed cosmo model
cosmo_dict = {"Omega_m" : 0.319, "Omega_b" : 0.04,
"sigma8" : 0.83, "h" : 0.67, "n_s" : 0.96}
#linear bias model on top of the nonlinear cosmo model
generator = linear_model(zmin, lmin, lmax, nl, incomp, cosmo_dict)
#MCMC output initialization
chain_file_name = chain_fname(zmin, incomp)
#run the MCMC
mcmc_mpi(Nwalkers, Niters, zmin, incomp, shape_noise, chain_file_name)
| from linear_bias import linear_model, shear_extractor
import os
import sys
import numpy as np
import emcee
from numpy.linalg import solve
import h5py
def lnPost(theta, **kwargs):
def lnprior(theta, **kwargs):
'''log prior
'''
obs = kwargs['data']
obs_cov = kwargs['data_cov']
kwargs.pop('data', None)
kwargs.pop('data_cov', None)
prior_min = 0.2
prior_max = 4.0
if (prior_min < theta[0] < prior_max):
return 0.0
else:
return -np.inf
def lnlike(theta, **kwargs):
'''log likelihood
'''
obs = kwargs['data']
obs_cov = kwargs['data_cov']
kwargs.pop('data', None)
kwargs.pop('data_cov', None)
# Likelihood
model_obs = generator(theta)
res = model_obs - obs
neg_chisq = -0.5 * np.sum(np.dot(res , np.linalg.solve(obs_cov , res)))
#print("neg_chi_tot" , neg_chisq*2./len(res))
return neg_chisq
lp = lnprior(theta , **kwargs)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, **kwargs)
def mcmc_mpi(Nwalkers, Niters, zmin, incomp, shape_noise, chain_file_name):
'''
Parameters
-----------
- Nwalker :
Number of walkers
- Nchains :
Number of MCMC chains
'''
shear_data = shear_extractor(zmin, incomp, shape_noise)
Ndim = 1
random_guess = np.array([1.2])
#initializing the positions of the walkers
pos0 = np.repeat(random_guess, Nwalkers).reshape(Ndim, Nwalkers).T + \
1.e-4 * np.random.randn(Ndim * Nwalkers).reshape(Nwalkers, Ndim)
print("initial positions of the walkers = ", pos0)
#setting up the data kwargs to be passed to log-likelihood
data_kwargs = { 'data': shear_data["xi"],
'data_cov': shear_data["total_cov"]}
#setting up the MCMC sampler
sampler = emcee.EnsembleSampler(Nwalkers, Ndim, lnPost, kwargs = data_kwargs)
sampler.sample(pos0, iterations = Niters)
cnt = 0
for result in sampler.sample(pos0, iterations = Niters):
position = list(result)[0]
sample_file = h5py.File(chain_file_name)
sample_file["mcmc"][cnt] = position
sample_file.close()
print(cnt)
cnt += 1
pass
return None
def chain_fname(zmin, incomp):
chain_file_name = 'mcmc_bias_wtheta_zmin_'+str(zmin)+'_incompleteness_'+str(incomp)+'.hdf5'
sample_file = h5py.File(chain_file_name , 'w')
sample_file.create_dataset("mcmc",(Niters, Nwalkers, 1), data = np.zeros((Niters, Nwalkers , 1)))
sample_file.close()
return chain_file_name
if __name__=="__main__":
Nwalkers = int(sys.argv[1])
print('N walkers = ', Nwalkers)
Niters = int(sys.argv[2])
print('N iterations = ', Niters)
zmin = np.float(sys.argv[3])
print('zmin = ', np.float(zmin))
#setting the l-range for integration
lmin, lmax, nl = 10, 10000, 1000
#incompleteness
incomp = True
#shape noise
shape_noise = 0.3
# assumed cosmo model
cosmo_dict = {"Omega_m" : 0.319, "Omega_b" : 0.04,
"sigma8" : 0.83, "h" : 0.67, "n_s" : 0.96}
#linear bias model on top of the nonlinear cosmo model
generator = linear_model(zmin, lmin, lmax, nl, incomp, cosmo_dict)
#MCMC output initialization
chain_file_name = chain_fname(zmin, incomp)
#run the MCMC
mcmc_mpi(Nwalkers, Niters, zmin, incomp, shape_noise, chain_file_name)
| en | 0.566646 | log prior log likelihood # Likelihood #print("neg_chi_tot" , neg_chisq*2./len(res)) Parameters ----------- - Nwalker : Number of walkers - Nchains : Number of MCMC chains #initializing the positions of the walkers #setting up the data kwargs to be passed to log-likelihood #setting up the MCMC sampler #setting the l-range for integration #incompleteness #shape noise # assumed cosmo model #linear bias model on top of the nonlinear cosmo model #MCMC output initialization #run the MCMC | 1.979949 | 2 |
hexrd/ui/calibration/auto/powder_runner.py | HEXRD/hexrdgui | 13 | 6622866 | import traceback
import numpy as np
from PySide2.QtCore import QObject, QTimer, Qt, Signal
from PySide2.QtWidgets import QCheckBox, QMessageBox
from hexrd.ui.async_runner import AsyncRunner
from hexrd.ui.create_hedm_instrument import create_hedm_instrument
from hexrd.ui.constants import OverlayType
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.overlays import default_overlay_refinements
from hexrd.ui.utils import instr_to_internal_dict
from hexrd.ui.calibration.auto import (
InstrumentCalibrator,
PowderCalibrationDialog,
PowderCalibrator,
)
class PowderRunner(QObject):
finished = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self.parent = parent
self.async_runner = AsyncRunner(parent)
def clear(self):
self.remove_lines()
if hasattr(self, '_ask_if_lines_are_acceptable_box'):
# Remove the box if it is still there...
self._ask_if_lines_are_acceptable_box.reject()
del self._ask_if_lines_are_acceptable_box
def run(self):
try:
self.validate()
self._run()
except Exception as e:
QMessageBox.critical(self.parent, 'HEXRD', f'Error: {e}')
traceback.print_exc()
def validate(self):
overlays = self.visible_powder_overlays
if len(overlays) != 1:
raise Exception('There must be exactly one visible powder overlay')
if np.count_nonzero(self.refinement_flags) == 0:
raise Exception('There are no refinable parameters')
def _run(self):
# First, have the user pick some options
if not PowderCalibrationDialog(self.material, self.parent).exec_():
# User canceled...
return
# The options they chose are saved here
options = HexrdConfig().config['calibration']['powder']
self.instr = create_hedm_instrument()
# Assume there is only one image in each image series for now...
img_dict = {k: x[0] for k, x in HexrdConfig().imageseries_dict.items()}
statuses = self.refinement_flags_without_overlays
self.cf = statuses
self.instr.calibration_flags = statuses
kwargs = {
'instr': self.instr,
'plane_data': self.material.planeData,
'img_dict': img_dict,
'flags': self.refinement_flags,
'eta_tol': options['eta_tol'],
'pktype': options['pk_type'],
}
self.pc = PowderCalibrator(**kwargs)
self.ic = InstrumentCalibrator(self.pc)
self.extract_powder_lines()
def extract_powder_lines(self):
self.async_runner.progress_title = 'Auto picking points...'
self.async_runner.success_callback = self.extract_powder_lines_finished
self.async_runner.run(self.run_extract_powder_lines)
def run_extract_powder_lines(self):
options = HexrdConfig().config['calibration']['powder']
kwargs = {
'fit_tth_tol': options['fit_tth_tol'],
'int_cutoff': options['int_cutoff'],
}
# FIXME: currently coded to handle only a single material
# so grabbing first (only) element
self.data_dict = self.ic.extract_points(**kwargs)[0]
def extract_powder_lines_finished(self):
try:
self.draw_lines()
self.ask_if_lines_are_acceptable()
except Exception:
self.remove_lines()
raise
@property
def data_xys(self):
ret = {}
for k, v in self.data_dict.items():
if len(v) == 0:
v = np.empty((0, 2))
else:
v = np.vstack(v)[:, :2]
ret[k] = v
return ret
def show_lines(self, b):
self.draw_lines() if b else self.remove_lines()
def draw_lines(self):
HexrdConfig().auto_picked_data = self.data_xys
def remove_lines(self):
HexrdConfig().auto_picked_data = None
def ask_if_lines_are_acceptable(self):
msg = 'Perform calibration with the points drawn?'
standard_buttons = QMessageBox.StandardButton
buttons = standard_buttons.Yes | standard_buttons.No
box = QMessageBox(QMessageBox.Question, 'HEXRD', msg, buttons,
self.parent)
box.setWindowModality(Qt.NonModal)
# Add a checkbox
cb = QCheckBox('Show auto picks?')
cb.setStyleSheet('margin-left:50%; margin-right:50%;')
cb.setChecked(True)
cb.toggled.connect(self.show_lines)
box.setCheckBox(cb)
# We must show() in the GUI thread, or on Mac, the dialog
# will appear behind the main window...
QTimer.singleShot(0, lambda: box.show())
box.finished.connect(self.remove_lines)
box.accepted.connect(self.lines_accepted)
self._show_auto_picks_check_box = cb
self._ask_if_lines_are_acceptable_box = box
def lines_accepted(self):
# If accepted, run it
self.async_runner.progress_title = 'Running calibration...'
self.async_runner.success_callback = self.update_config
self.async_runner.run(self.run_calibration)
def run_calibration(self):
options = HexrdConfig().config['calibration']['powder']
x0 = self.ic.reduced_params
kwargs = {
'conv_tol': options['conv_tol'],
'fit_tth_tol': options['fit_tth_tol'],
'int_cutoff': options['int_cutoff'],
'max_iter': options['max_iter'],
'use_robust_optimization': options['use_robust_optimization'],
}
x1 = self.ic.run_calibration(**kwargs)
results_message = 'Calibration Results:\n'
for params in np.vstack([x0, x1]).T:
results_message += f'{params[0]:6.3e}--->{params[1]:6.3e}\n'
print(results_message)
self.results_message = results_message
def update_config(self):
msg = 'Optimization successful!'
msg_box = QMessageBox(QMessageBox.Information, 'HEXRD', msg)
msg_box.setDetailedText(self.results_message)
msg_box.exec_()
output_dict = instr_to_internal_dict(self.instr)
# Save the previous iconfig to restore the statuses
prev_iconfig = HexrdConfig().config['instrument']
# Update the config
HexrdConfig().config['instrument'] = output_dict
# This adds in any missing keys. In particular, it is going to
# add in any "None" detector distortions
HexrdConfig().set_detector_defaults_if_missing()
# Add status values
HexrdConfig().add_status(output_dict)
# Set the previous statuses to be the current statuses
HexrdConfig().set_statuses_from_prev_iconfig(prev_iconfig)
# the other parameters
if np.any(self.ic.flags[self.ic.npi:]):
# this means we asked to refine lattice parameters
# FIXME: currently, there is only 1 phase/calibrator allowed, so
# this array is the reduce lattice parameter set.
refined_lattice_params = self.ic.full_params[self.ic.npi:]
self.material.latticeParameters = refined_lattice_params
# Tell GUI that the overlays need to be re-computed
HexrdConfig().flag_overlay_updates_for_material(self.material.name)
# update the materials panel
if self.material is HexrdConfig().active_material:
HexrdConfig().active_material_modified.emit()
# redraw updated overlays
HexrdConfig().overlay_config_changed.emit()
self.finished.emit()
@property
def overlays(self):
return HexrdConfig().overlays
@property
def visible_overlays(self):
return [x for x in self.overlays if x['visible']]
@property
def visible_powder_overlays(self):
overlays = self.visible_overlays
return [x for x in overlays if x['type'] == OverlayType.powder]
@property
def active_overlay(self):
overlays = self.visible_powder_overlays
return overlays[0] if overlays else None
@property
def material(self):
overlay = self.active_overlay
return HexrdConfig().material(overlay['material']) if overlay else None
@property
def active_overlay_refinements(self):
return [x[1] for x in self.overlay_refinements(self.active_overlay)]
def overlay_refinements(self, overlay):
refinements = overlay.get('refinements')
if refinements is None:
refinements = default_overlay_refinements(overlay)
return refinements
@property
def refinement_flags_without_overlays(self):
return HexrdConfig().get_statuses_instrument_format()
@property
def refinement_flags(self):
return np.hstack([self.refinement_flags_without_overlays,
self.active_overlay_refinements])
| import traceback
import numpy as np
from PySide2.QtCore import QObject, QTimer, Qt, Signal
from PySide2.QtWidgets import QCheckBox, QMessageBox
from hexrd.ui.async_runner import AsyncRunner
from hexrd.ui.create_hedm_instrument import create_hedm_instrument
from hexrd.ui.constants import OverlayType
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.overlays import default_overlay_refinements
from hexrd.ui.utils import instr_to_internal_dict
from hexrd.ui.calibration.auto import (
InstrumentCalibrator,
PowderCalibrationDialog,
PowderCalibrator,
)
class PowderRunner(QObject):
finished = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self.parent = parent
self.async_runner = AsyncRunner(parent)
def clear(self):
self.remove_lines()
if hasattr(self, '_ask_if_lines_are_acceptable_box'):
# Remove the box if it is still there...
self._ask_if_lines_are_acceptable_box.reject()
del self._ask_if_lines_are_acceptable_box
def run(self):
try:
self.validate()
self._run()
except Exception as e:
QMessageBox.critical(self.parent, 'HEXRD', f'Error: {e}')
traceback.print_exc()
def validate(self):
overlays = self.visible_powder_overlays
if len(overlays) != 1:
raise Exception('There must be exactly one visible powder overlay')
if np.count_nonzero(self.refinement_flags) == 0:
raise Exception('There are no refinable parameters')
def _run(self):
# First, have the user pick some options
if not PowderCalibrationDialog(self.material, self.parent).exec_():
# User canceled...
return
# The options they chose are saved here
options = HexrdConfig().config['calibration']['powder']
self.instr = create_hedm_instrument()
# Assume there is only one image in each image series for now...
img_dict = {k: x[0] for k, x in HexrdConfig().imageseries_dict.items()}
statuses = self.refinement_flags_without_overlays
self.cf = statuses
self.instr.calibration_flags = statuses
kwargs = {
'instr': self.instr,
'plane_data': self.material.planeData,
'img_dict': img_dict,
'flags': self.refinement_flags,
'eta_tol': options['eta_tol'],
'pktype': options['pk_type'],
}
self.pc = PowderCalibrator(**kwargs)
self.ic = InstrumentCalibrator(self.pc)
self.extract_powder_lines()
def extract_powder_lines(self):
self.async_runner.progress_title = 'Auto picking points...'
self.async_runner.success_callback = self.extract_powder_lines_finished
self.async_runner.run(self.run_extract_powder_lines)
def run_extract_powder_lines(self):
options = HexrdConfig().config['calibration']['powder']
kwargs = {
'fit_tth_tol': options['fit_tth_tol'],
'int_cutoff': options['int_cutoff'],
}
# FIXME: currently coded to handle only a single material
# so grabbing first (only) element
self.data_dict = self.ic.extract_points(**kwargs)[0]
def extract_powder_lines_finished(self):
try:
self.draw_lines()
self.ask_if_lines_are_acceptable()
except Exception:
self.remove_lines()
raise
@property
def data_xys(self):
ret = {}
for k, v in self.data_dict.items():
if len(v) == 0:
v = np.empty((0, 2))
else:
v = np.vstack(v)[:, :2]
ret[k] = v
return ret
def show_lines(self, b):
self.draw_lines() if b else self.remove_lines()
def draw_lines(self):
HexrdConfig().auto_picked_data = self.data_xys
def remove_lines(self):
HexrdConfig().auto_picked_data = None
def ask_if_lines_are_acceptable(self):
msg = 'Perform calibration with the points drawn?'
standard_buttons = QMessageBox.StandardButton
buttons = standard_buttons.Yes | standard_buttons.No
box = QMessageBox(QMessageBox.Question, 'HEXRD', msg, buttons,
self.parent)
box.setWindowModality(Qt.NonModal)
# Add a checkbox
cb = QCheckBox('Show auto picks?')
cb.setStyleSheet('margin-left:50%; margin-right:50%;')
cb.setChecked(True)
cb.toggled.connect(self.show_lines)
box.setCheckBox(cb)
# We must show() in the GUI thread, or on Mac, the dialog
# will appear behind the main window...
QTimer.singleShot(0, lambda: box.show())
box.finished.connect(self.remove_lines)
box.accepted.connect(self.lines_accepted)
self._show_auto_picks_check_box = cb
self._ask_if_lines_are_acceptable_box = box
def lines_accepted(self):
# If accepted, run it
self.async_runner.progress_title = 'Running calibration...'
self.async_runner.success_callback = self.update_config
self.async_runner.run(self.run_calibration)
def run_calibration(self):
options = HexrdConfig().config['calibration']['powder']
x0 = self.ic.reduced_params
kwargs = {
'conv_tol': options['conv_tol'],
'fit_tth_tol': options['fit_tth_tol'],
'int_cutoff': options['int_cutoff'],
'max_iter': options['max_iter'],
'use_robust_optimization': options['use_robust_optimization'],
}
x1 = self.ic.run_calibration(**kwargs)
results_message = 'Calibration Results:\n'
for params in np.vstack([x0, x1]).T:
results_message += f'{params[0]:6.3e}--->{params[1]:6.3e}\n'
print(results_message)
self.results_message = results_message
def update_config(self):
msg = 'Optimization successful!'
msg_box = QMessageBox(QMessageBox.Information, 'HEXRD', msg)
msg_box.setDetailedText(self.results_message)
msg_box.exec_()
output_dict = instr_to_internal_dict(self.instr)
# Save the previous iconfig to restore the statuses
prev_iconfig = HexrdConfig().config['instrument']
# Update the config
HexrdConfig().config['instrument'] = output_dict
# This adds in any missing keys. In particular, it is going to
# add in any "None" detector distortions
HexrdConfig().set_detector_defaults_if_missing()
# Add status values
HexrdConfig().add_status(output_dict)
# Set the previous statuses to be the current statuses
HexrdConfig().set_statuses_from_prev_iconfig(prev_iconfig)
# the other parameters
if np.any(self.ic.flags[self.ic.npi:]):
# this means we asked to refine lattice parameters
# FIXME: currently, there is only 1 phase/calibrator allowed, so
# this array is the reduce lattice parameter set.
refined_lattice_params = self.ic.full_params[self.ic.npi:]
self.material.latticeParameters = refined_lattice_params
# Tell GUI that the overlays need to be re-computed
HexrdConfig().flag_overlay_updates_for_material(self.material.name)
# update the materials panel
if self.material is HexrdConfig().active_material:
HexrdConfig().active_material_modified.emit()
# redraw updated overlays
HexrdConfig().overlay_config_changed.emit()
self.finished.emit()
@property
def overlays(self):
return HexrdConfig().overlays
@property
def visible_overlays(self):
return [x for x in self.overlays if x['visible']]
@property
def visible_powder_overlays(self):
overlays = self.visible_overlays
return [x for x in overlays if x['type'] == OverlayType.powder]
@property
def active_overlay(self):
overlays = self.visible_powder_overlays
return overlays[0] if overlays else None
@property
def material(self):
overlay = self.active_overlay
return HexrdConfig().material(overlay['material']) if overlay else None
@property
def active_overlay_refinements(self):
return [x[1] for x in self.overlay_refinements(self.active_overlay)]
def overlay_refinements(self, overlay):
refinements = overlay.get('refinements')
if refinements is None:
refinements = default_overlay_refinements(overlay)
return refinements
@property
def refinement_flags_without_overlays(self):
return HexrdConfig().get_statuses_instrument_format()
@property
def refinement_flags(self):
return np.hstack([self.refinement_flags_without_overlays,
self.active_overlay_refinements])
| en | 0.826788 | # Remove the box if it is still there... # First, have the user pick some options # User canceled... # The options they chose are saved here # Assume there is only one image in each image series for now... # FIXME: currently coded to handle only a single material # so grabbing first (only) element # Add a checkbox # We must show() in the GUI thread, or on Mac, the dialog # will appear behind the main window... # If accepted, run it # Save the previous iconfig to restore the statuses # Update the config # This adds in any missing keys. In particular, it is going to # add in any "None" detector distortions # Add status values # Set the previous statuses to be the current statuses # the other parameters # this means we asked to refine lattice parameters # FIXME: currently, there is only 1 phase/calibrator allowed, so # this array is the reduce lattice parameter set. # Tell GUI that the overlays need to be re-computed # update the materials panel # redraw updated overlays | 1.837643 | 2 |
pip_services_runtime/build/DefaultFactory.py | pip-services-archive/pip-services-runtime-python | 0 | 6622867 | # -*- coding: utf-8 -*-
"""
pip_services_runtime.build.DefaultFactory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default component factory implementation
:copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .ComponentFactory import ComponentFactory
from ..logs.NullLogger import NullLogger
from ..logs.ConsoleLogger import ConsoleLogger
from ..counters.NullCounters import NullCounters
from ..counters.LogCounters import LogCounters
from ..cache.NullCache import NullCache
from ..cache.MemoryCache import MemoryCache
from ..boot.FileBootConfig import FileBootConfig
class DefaultFactory(ComponentFactory):
"""
Component factory that contains registrations of standard runtime components.
This factory is typically used as a base for microservice factories.
"""
def __init__(self):
"""
Creates an instance of default factory with standard runtime components
"""
super(DefaultFactory, self).__init__()
self.register(NullLogger.Descriptor, NullLogger)
self.register(ConsoleLogger.Descriptor, ConsoleLogger)
self.register(NullCounters.Descriptor, NullCounters)
self.register(LogCounters.Descriptor, LogCounters)
self.register(NullCache.Descriptor, NullCache)
self.register(MemoryCache.Descriptor, MemoryCache)
self.register(FileBootConfig.Descriptor, FileBootConfig)
DefaultFactory.Instance = DefaultFactory()
"""
The instance of default factory
"""
| # -*- coding: utf-8 -*-
"""
pip_services_runtime.build.DefaultFactory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default component factory implementation
:copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .ComponentFactory import ComponentFactory
from ..logs.NullLogger import NullLogger
from ..logs.ConsoleLogger import ConsoleLogger
from ..counters.NullCounters import NullCounters
from ..counters.LogCounters import LogCounters
from ..cache.NullCache import NullCache
from ..cache.MemoryCache import MemoryCache
from ..boot.FileBootConfig import FileBootConfig
class DefaultFactory(ComponentFactory):
"""
Component factory that contains registrations of standard runtime components.
This factory is typically used as a base for microservice factories.
"""
def __init__(self):
"""
Creates an instance of default factory with standard runtime components
"""
super(DefaultFactory, self).__init__()
self.register(NullLogger.Descriptor, NullLogger)
self.register(ConsoleLogger.Descriptor, ConsoleLogger)
self.register(NullCounters.Descriptor, NullCounters)
self.register(LogCounters.Descriptor, LogCounters)
self.register(NullCache.Descriptor, NullCache)
self.register(MemoryCache.Descriptor, MemoryCache)
self.register(FileBootConfig.Descriptor, FileBootConfig)
DefaultFactory.Instance = DefaultFactory()
"""
The instance of default factory
"""
| en | 0.723415 | # -*- coding: utf-8 -*- pip_services_runtime.build.DefaultFactory ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default component factory implementation :copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details. :license: MIT, see LICENSE for more details. Component factory that contains registrations of standard runtime components. This factory is typically used as a base for microservice factories. Creates an instance of default factory with standard runtime components The instance of default factory | 2.203776 | 2 |
src/Tuple/unpackTuple.py | mikeludemann/helperFunctions_Python | 0 | 6622868 | x = 4, 17, 31
print(x)
numberOne, numberTwo, numberThree = x
print(numberOne + numberTwo + numberThree) | x = 4, 17, 31
print(x)
numberOne, numberTwo, numberThree = x
print(numberOne + numberTwo + numberThree) | none | 1 | 2.949842 | 3 | |
Desafios/Desafio102.py | julianascimentosantos/cursoemvideo-python3 | 0 | 6622869 | def fatorial(num, show=False):
"""
-> Calcula o Fatorial de um número.
:param num: O numero a ser calculado.
:param show: (opcional) Mostrar ou não a conta
:return: O valor do fatorial de um numero num.
"""
f = 1
for n in range(num, 0, -1):
f *= n
if show:
print(n, end='')
if n > 1:
print(' x ', end='')
else:
print(' = ', end='')
return f
#Programa Principal
print(fatorial(4, show=True)) | def fatorial(num, show=False):
"""
-> Calcula o Fatorial de um número.
:param num: O numero a ser calculado.
:param show: (opcional) Mostrar ou não a conta
:return: O valor do fatorial de um numero num.
"""
f = 1
for n in range(num, 0, -1):
f *= n
if show:
print(n, end='')
if n > 1:
print(' x ', end='')
else:
print(' = ', end='')
return f
#Programa Principal
print(fatorial(4, show=True)) | pt | 0.92676 | -> Calcula o Fatorial de um número. :param num: O numero a ser calculado. :param show: (opcional) Mostrar ou não a conta :return: O valor do fatorial de um numero num. #Programa Principal | 3.745573 | 4 |
serial_temp/serial_temp.py | nathanwiens/sdk-samples | 44 | 6622870 | import time
import serial
import paho.mqtt.client as mqtt
import json
from csclient import EventingCSClient
cp = EventingCSClient('serial_temp')
broker_address = '127.0.0.01'
port = '/dev/ttyUSB0'
speed = 9600
my_sim = 'mdm-e152d8b2' # Change to your SIM slot UID
class Timeout(Exception):
pass
def has_t1t2(chunks):
return len(chunks) > 2 and '1.' in chunks[0] and '2.' in chunks[1]
def parse_temp(temp_str):
dotpos = temp_str.find('.')
if dotpos:
return float(temp_str[dotpos+1:-1])
else:
return None
def modem_state(cp, state, sim):
# Blocking call that will wait until a given state is shown as the modem's status
timeout_counter = 0
sleep_seconds = 0
conn_path = '%s/%s/status/connection_state' % ('status/wan/devices', sim)
cp.log(f"modem_state waiting sim={sim} state={state}")
while True:
sleep_seconds += 5
conn_state = cp.get(conn_path).get('data', '')
# TODO add checking for error states
cp.log(f'waiting for state={state} on sim={sim} curr state={conn_state}')
if conn_state == state:
break
if timeout_counter > 600:
cp.log(f"timeout waiting on sim={sim}")
raise Timeout(conn_path)
time.sleep(min(sleep_seconds, 45))
timeout_counter += min(sleep_seconds, 45)
cp.log(f"sim={sim} connected")
return True
def data_logger_to_mqtt_reader():
client = mqtt.Client("Datalogger2Mqtt", protocol=mqtt.MQTTv311) # create new instance
try:
client.connect(broker_address, port=9898) # connect to broker
except ConnectionRefusedError:
return
try:
with serial.Serial('/dev/ttyUSB0', 9600, timeout=1) as ser:
while True:
line = ser.readline()
chunks = line.decode("utf-8").split(",")
if chunks and has_t1t2(chunks):
d1temp = parse_temp(chunks[0])
d2temp = parse_temp(chunks[1])
data = {"d1temp": d1temp, "d2temp": d2temp}
client.publish("measurement/", json.dumps(data))
except Exception as e:
cp.log(f"Exception is {e}")
finally:
client.disconnect()
if modem_state(cp, 'connected', my_sim):
data_logger_to_mqtt_reader()
| import time
import serial
import paho.mqtt.client as mqtt
import json
from csclient import EventingCSClient
cp = EventingCSClient('serial_temp')
broker_address = '127.0.0.01'
port = '/dev/ttyUSB0'
speed = 9600
my_sim = 'mdm-e152d8b2' # Change to your SIM slot UID
class Timeout(Exception):
pass
def has_t1t2(chunks):
return len(chunks) > 2 and '1.' in chunks[0] and '2.' in chunks[1]
def parse_temp(temp_str):
dotpos = temp_str.find('.')
if dotpos:
return float(temp_str[dotpos+1:-1])
else:
return None
def modem_state(cp, state, sim):
# Blocking call that will wait until a given state is shown as the modem's status
timeout_counter = 0
sleep_seconds = 0
conn_path = '%s/%s/status/connection_state' % ('status/wan/devices', sim)
cp.log(f"modem_state waiting sim={sim} state={state}")
while True:
sleep_seconds += 5
conn_state = cp.get(conn_path).get('data', '')
# TODO add checking for error states
cp.log(f'waiting for state={state} on sim={sim} curr state={conn_state}')
if conn_state == state:
break
if timeout_counter > 600:
cp.log(f"timeout waiting on sim={sim}")
raise Timeout(conn_path)
time.sleep(min(sleep_seconds, 45))
timeout_counter += min(sleep_seconds, 45)
cp.log(f"sim={sim} connected")
return True
def data_logger_to_mqtt_reader():
client = mqtt.Client("Datalogger2Mqtt", protocol=mqtt.MQTTv311) # create new instance
try:
client.connect(broker_address, port=9898) # connect to broker
except ConnectionRefusedError:
return
try:
with serial.Serial('/dev/ttyUSB0', 9600, timeout=1) as ser:
while True:
line = ser.readline()
chunks = line.decode("utf-8").split(",")
if chunks and has_t1t2(chunks):
d1temp = parse_temp(chunks[0])
d2temp = parse_temp(chunks[1])
data = {"d1temp": d1temp, "d2temp": d2temp}
client.publish("measurement/", json.dumps(data))
except Exception as e:
cp.log(f"Exception is {e}")
finally:
client.disconnect()
if modem_state(cp, 'connected', my_sim):
data_logger_to_mqtt_reader()
| en | 0.841226 | # Change to your SIM slot UID # Blocking call that will wait until a given state is shown as the modem's status # TODO add checking for error states # create new instance # connect to broker | 2.750875 | 3 |
Tools/Jinja2/Jinja2-2.5/build.py | rtobar/askapsoft | 1 | 6622871 | from askapdev.rbuild.builders import Setuptools as Builder
builder = Builder()
builder.remote_archive = "Jinja2-2.5.tar.gz"
builder.build()
| from askapdev.rbuild.builders import Setuptools as Builder
builder = Builder()
builder.remote_archive = "Jinja2-2.5.tar.gz"
builder.build()
| none | 1 | 1.334567 | 1 | |
scripts/data_manipulation/build_datasets.py | GuyBaele/sars-cov-2-belgium | 0 | 6622872 | <gh_stars>0
"""build_datasets.py
"""
from __future__ import print_function
import datetime as dt
import json
# import multiprocessing as mp
import os
import random
import subprocess
import sys
import numpy as np
import pandas as pd
from argh import dispatch_command # type: ignore
from Bio import SeqIO # type: ignore
from redis_cache import cache_it # type: ignore
from tqdm import tqdm # type: ignore
CACHE_HOURS = 3
def main():
"""The main process to follow for incorporating metadata files
"""
# There should be two files that come from GISAID:
# 1) A dated metadata tsv
# 2) A dated sequences fasta
# These files can be found throught:
# GISAID
# -> EpiCoV
# -> Downloads
# -> Genomic epidemiology
# -> "FASTA" and "metadata" links
# After being downloaded and extracted with `gunzip`
# they can be renamed/relocated to the paths shown below
gisaid_metadata = "data/metadata.tsv"
gisaid_fasta = "data/sequences.fasta"
# We expect to have a directory full of data (both sequence and metadata)
# which is not on GISAID
non_gisaid_dir = "data/non_gisaid"
# Define names of the updated sequence and metadata files
# that we want at the end of the pipeline
OUTPUT_FASTA = "data/ALL_SEQUENCES.fasta"
OUTPUT_META_FNAME = "data/ALL_METADATA.tsv"
sequence_names = read_all_sequence_lists()
exclude_names = read_excludes()
##################
# Main process #
##################
# First, concatenate all the fasta files into one master fasta
# This gives us two outputs:
# recordIDs: set of all the record names (i.e. fasta headers)
# that are included in the dataset
# records: dictionary mapping the recordIDs to their associated sequence
# TODO: Change this to be just sequence length
# since that is all we really need
(recordIDs, records) = concat_and_write_fasta(
gisaid_fasta, non_gisaid_dir, OUTPUT_FASTA, sequence_names, exclude_names
)
print(f"recordIDs: {len(recordIDs)}")
print(f"records: {len(records)}")
# (recordIDs, records) = bypass_fasta_prep(OUTPUT_FASTA)
# Second, concatenate all the associated metadata
# This is a bit of a mess
concat_and_write_metadata(
gisaid_metadata, non_gisaid_dir, OUTPUT_META_FNAME, recordIDs, records
)
def bypass_fasta_prep(fastaFile):
"""Save a bunch of time during teting."""
recIDs = set()
recDict = {}
with open(fastaFile, "r") as f:
for record in tqdm(
SeqIO.parse(f, "fasta"),
desc="Reading fasta",
total=count_lines_in_fasta(fastaFile),
):
recIDs.add(record.id)
recDict[record.id] = record
return (recIDs, recDict)
# @cache_it(limit=100000, expire=60*60*CACHE_HOURS)
def concat_and_write_fasta(baseFname, fastaDir, oFname, sequence_set, exclude_set):
"""
Take a single fasta (containing multiple GISAID records) and add a
set of other fasta records stored in a given directory to that
fasta. Write the output to a new file.
Return both a set of unique record IDs and a dictionary of the records
"""
# Initialize empty lists for outputs
records = []
recordIDs = set()
duplicates = []
@cache_it(limit=1_000_000, expire=60 * 60 * CACHE_HOURS)
def check_record_validity(id):
"""A little helper to make check the following:
1. The record is in our master sequence set
2. The record is not flagged to be excluded
"""
if id in sequence_set:
if id in exclude_set:
eprint(f"Excluding {id}")
return False
else:
return True
return False
# Read the gisaid fasta
nLines = count_lines_in_fasta(baseFname)
print(f"Reading the base GISAID fasta: {baseFname}")
with open(baseFname, "r") as handle:
for record in tqdm(
SeqIO.parse(handle, "fasta"), desc="Nextstrain fasta import", total=nLines
):
# Check if a sequence with the same name already exists
if record.id not in recordIDs:
if check_record_validity(record.id):
records.append(record)
# Keep track of the sequence names that have been processed
recordIDs.add(record.id)
else:
# If it already exists, warn the user
duplicates.append(f"WARNING: Duplicate record ID {record.id}.")
print(f"Added {len(recordIDs)} records")
process_non_gisaid_fastas(fastaDir, records, recordIDs, duplicates)
print(f"Final dataset size (in sequences): {len(records)}")
# Write the output fasta
print(f"Writing {oFname}")
with open(oFname, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
# Write the list of duplicates, we care for debugging issues
print("Writing duplicates to results/duplicate_sequence.txt")
with open("results/duplicate_sequences.txt", "w") as output_handle:
for line in duplicates:
output_handle.write(f"{line}\n")
# Transform records into a dictionary keyed on id,
# as that will be easier to handle later
# NOTE: This fucking sucks.
new_records = {record.id: record for record in records}
return recordIDs, new_records
def process_non_gisaid_fastas(fastaDir, records, recordIDs, duplicates):
# NOTE: The following logic is more or less deprecated, as we don't really
# use additional fastas at this point and just pull things from
# GISAID instead. That said, I'm keeping it in for now.
# TODO: Check if everything works correctly without doing this, as it will
# clean up the whole process quite a bit
# Now, process each of the files in the directory that
# contains non-gisaid fastas
for fname in os.listdir(fastaDir):
# Note: some of the files may be metadata files,
# we only care about fastas for now
if fname.endswith(".fasta"):
print(f"Processing {fname}")
# Keep track of how many sequences we add from additional files
added = 0
with open(f"{fastaDir}/{fname}", "r") as handle:
for record in tqdm(
SeqIO.parse(handle, "fasta"), desc=f"Importing {fname}"
):
# Use the same logic as we did handling the gisaid fasta
if record.id not in recordIDs:
records.append(record)
recordIDs.add(record.id)
added += 1
else:
duplicates.append(f"Duplicate record ID: {record.id}.")
print(f"Added {added} records")
def concat_and_write_metadata(baseFname, metaDir, oFname, recordIDs, records):
"""
IMPORTANT: This function absolutely sucks. I'll try to
break it apart into more sub-functions with time
This function takes multiple metadata spreadsheets and
sticks them together.
Then it appropriately fixes Belgian samples so they
behave the way that we want them to.
It then writes to a new file.
Also it prints out a bunch of stuff that needs to be fixed later on
"""
# Define some things that will be used later.
# There is some inconsistency in how headers are labeled,
# `renames` maps between those
renames = {
"sequence name": "strain",
"Town": "location",
"Acc.Number": "gisaid_epi_isl",
"Sex": "sex",
"Age": "age",
"sample date": "date",
}
# `drop_cols` is the column names that we will take out of the final merge
# Note: Maybe include "ZIP"?
drop_cols = ["#"]
# First, we read in the GISAID metadata file
metadata = pd.read_csv(baseFname, sep="\t", header=0)
print(f"Metadata original rows: {len(metadata)}")
def reduce_metadata(df, ids):
"""Reduce a potentially massive metadata file to only include listed entries."""
print(f"Length of metada before redution: {len(df)}.")
newMeta = df[df["strain"].isin(list(ids))]
newMeta = df[df["date"].apply(lambda x: len(str(x)) == 10)]
print(f"Length of metadata after reduction: {len(newMeta)}.")
return newMeta
dropRows = []
# Reduce the metadata dataFrame so that it is more reasonable to work with
metadata = reduce_metadata(metadata, recordIDs)
# Second, look at every file in the
print(
"Completed base metadata file import, now processing all excel spreadsheets in {metaDir}"
)
for file in tqdm(os.listdir(metaDir), desc="Reading metadata files"):
# Only deal with excel spreadsheets for now
if file.endswith(".xlsx"):
# Make a new dataframe
newMeta = pd.read_excel(f"{metaDir}/{file}", engine="openpyxl")
# Rename the columns appropriately
newMeta = newMeta.rename(columns=renames)
# Slam in some "reasonable" assumptions:
# our belgian sequences are probably european
newMeta["region"] = "Europe"
# they are also probably Belgian (some are French)
newMeta["country"] = "Belgium"
# our ncov sequences are _hopefully_ ncov
newMeta["virus"] = "ncov"
# full genome
newMeta["segment"] = "genome"
# they all come from human hosts
newMeta["host"] = "Human"
# We are just filling in an empty column for sequence lenght
newMeta["length"] = np.nan
# These aren't from GISAID, but they need a date to avoid
# gumming up the works. We use today's date
newMeta["date_submitted"] = dt.date.today().strftime("%Y-%m-%d")
newMeta = reduce_metadata(newMeta, recordIDs)
# Some things need to happen to every sequence individually
# 1) remove year from sequence name (to match fasta)
# 2) set country (if it isn't Belgium)
# 3) set sequence length
for (index, row) in tqdm(
newMeta.iterrows(), total=len(newMeta), desc=f"Processing {file}"
):
# strain name fix. I know this sucks
try:
newDate = pd.to_datetime(row["date"]).strftime("%Y-%m-%d")
newMeta.at[index, "date"] = newDate
row["strain"] = fix_strain_name(row["strain"])
# fix country
row["country"] = fix_country_from_strain_name(row["strain"])
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
dropRows.append(index)
# set length for each sequence, if it doesn't have a length
# for some reason indicate it should be dropped
if row["strain"] in records.keys():
newMeta.at[index, "length"] = int(len(records[row["strain"]].seq))
else:
dropRows.append(index)
# I don't know why this next line exists but it seems to be necessary for things to work
# I'll try to figure out why later if it becomes an issue
newMeta.at[index, "date_submitted"] = newMeta.at[index, "date"]
# determine division
# Indicate missing data for columns for which we don't have data
for item in set(metadata.columns).difference(set(newMeta.columns)):
newMeta[item] = "?"
metadata = pd.concat([metadata, newMeta])
metadata = metadata.reset_index(drop=True)
print(f"New metadata length: {len(metadata)}")
# Build the big strain name-zip dictionary
strainNameToZip = build_strain_to_zip()
epiIslToZip = build_isl_to_zip()
# Read the mapping fils we need
munMap = read_muni_map()
zipCodesToProvinces, zipCodesToMunicipalities = get_zip_location_map()
myManualFixes = read_manual_fix_map()
locFixes = fix_location_map()
lonelyBoys = set()
for (index, row) in tqdm(
metadata.iterrows(), desc="Applying location fixes", total=len(metadata)
):
# Not a location fix, but while we are looking at the individual rows
# we also should drop anything that we don't want.
try:
if len(row["date"]) <= 9:
dropRows.append(index)
continue
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
dropRows.append(index)
continue
metadata = apply_location_corrections(
metadata,
index,
row,
epiIslToZip,
strainNameToZip,
locFixes,
zipCodesToProvinces,
zipCodesToMunicipalities,
munMap,
myManualFixes,
lonelyBoys,
)
print("Los Lonely Boys:")
for thing in lonelyBoys:
print(thing)
# Before we write, drop all the filenames
metadata = metadata.drop(index=dropRows)
metadata = metadata.drop(columns=drop_cols)
# Drop duplicates
metadata = metadata.drop_duplicates(
subset="strain", ignore_index=True
).reset_index()
# metadata = coarse_downsample(metadata)
# print(metadata)
print(f"Writing {oFname}")
metadata.to_csv(oFname, sep="\t", index=False)
def spotcheck(df: pd.DataFrame, r: pd.Series, note: str) -> None:
try:
s = "Belgium/rega-4590/2021"
# Europe / Belgium / Vilvoorde
if r["strain"] == s:
print(f"{note}: {r['location']}")
print(df[df["strain"] == s]["location"])
except:
pass
def apply_location_corrections(
metadata: pd.DataFrame,
index: int,
row: pd.Series,
epiIslToZip: dict,
strainNameToZip: dict,
locFixes: dict,
zipCodesToProvinces: dict,
zipCodesToMunicipalities: dict,
munMap: dict,
myManualFixes: dict,
lonelyBoys: set,
) -> pd.DataFrame:
"""NOTE: this function fucking sucks
"""
if metadata.at[index, "country"] == "Belgium":
if metadata.at[index, "country_exposure"] == "?":
metadata.at[index, "country_exposure"] = "Belgium"
# This identifies any sequences without a "location"
if isinstance(row["location"], float):
if row["division"] != "Belgium":
# Trickle down
metadata.at[index, "location"] = metadata.at[index, "division"]
metadata.at[index, "division"] = "?"
spotcheck(metadata, row, "1")
# Set ZIP:
if metadata.at[index, "gisaid_epi_isl"] in epiIslToZip.keys():
metadata.at[index, "ZIP"] = epiIslToZip[
metadata.at[index, "gisaid_epi_isl"]
]
elif metadata.at[index, "strain"] in strainNameToZip.keys():
metadata.at[index, "ZIP"] = strainNameToZip[metadata.at[index, "strain"]]
spotcheck(metadata, row, "2")
zip = str(metadata.at[index, "ZIP"])
loc = metadata.at[index, "location"]
# Fix location names
if loc in locFixes.keys():
loc = locFixes[loc]
metadata.at[index, "location"] = loc
spotcheck(metadata, row, "3")
metadata.at[index, "location_exposure"] = loc
metadata.at[index, "region_exposure"] = "Europe"
if zip in zipCodesToProvinces.keys():
metadata.at[index, "division"] = zipCodesToProvinces[zip]
metadata.at[index, "division_exposure"] = zipCodesToProvinces[zip]
metadata.at[index, "location"] = zipCodesToMunicipalities[zip]
metadata.at[index, "location_exposure"] = zipCodesToMunicipalities[zip]
spotcheck(metadata, row, "4")
elif loc in munMap.keys():
metadata.at[index, "division"] = munMap[loc]
metadata.at[index, "division_exposure"] = munMap[loc]
spotcheck(metadata, row, "5")
elif loc in myManualFixes.keys():
metadata.at[index, "division"] = myManualFixes[loc]
metadata.at[index, "division_exposure"] = myManualFixes[loc]
spotcheck(metadata, row, "6")
else:
lonelyBoys.add(loc)
spotcheck(metadata, row, "7")
fix_liege(metadata, index)
spotcheck(metadata, row, "8")
return metadata
@cache_it(limit=1000, expire=60 * 60 * CACHE_HOURS)
def read_all_sequence_lists():
"""Read all the .txt files in the sequence list directory
This creates a sort of "master" list of all sequences that we ever might use
"""
# Name of the directory we care about
seqListDir = "data/sequence_lists/"
print(f"Creating a master sequence list from {seqListDir}.")
# Empty set to store our output
allSeqs = set([])
for fname in os.listdir(seqListDir):
if fname.endswith(".txt"):
with open(f"{seqListDir}{fname}", "r") as f:
for line in f.readlines():
# Remove \n characters
line = line.strip()
allSeqs.add(line)
print(f"Sequence list initialized with {len(allSeqs)} sequences.")
return allSeqs
@cache_it(limit=1000, expire=60 * 60 * CACHE_HOURS)
def read_excludes():
"""Read the exclude list to give us the set of what we should ignore."""
# Name of the file we are reading
excludeFile = "defaults/exclude.txt"
print(f"Creating a master exclude list from {excludeFile}")
# Empty set to store our outuput
exclude = set([])
with open(excludeFile, "r") as f:
for line in f.readlines():
line = line.strip()
exclude.add(line)
print(f"Initialized exclude list with {len(exclude)} sequences.")
return exclude
@cache_it(limit=1000, expire=60 * 60 * CACHE_HOURS)
def count_lines_in_fasta(fname):
print(f"Processing {fname} for total fasta entries.")
call = ["grep", "-c", '">"', fname]
lines = subprocess.Popen(" ".join(call), shell=True, stdout=subprocess.PIPE)
nLines = int(lines.stdout.read().strip())
print(f"Found {nLines} fasta entries.")
return nLines
def coarse_downsample(df):
p = 0.0 # drop European, non-belgian sequences
p1 = 0.0 # drop DK and UK sequences
p2 = 0.0 # drop non-european sequences
force_includes = read_includes()
print(f"Started downsampling with {len(df.index)} rows.")
drops = []
for index, row in df.iterrows():
if df.at[index, "country"] != "Belgium":
n = random.random()
if df.at[index, "strain"] not in force_includes:
if df.at[index, "country"] in ["Denmark", "United Kingdom"]:
if n < p1:
drops.append(index)
elif df.at[index, "region"] != "Europe":
if n < p2:
drops.append(index)
elif n < p:
drops.append(index)
if not df.at[index, "date"]:
drops.append(index)
elif not df.at[index, "strain"]:
drops.append(index)
elif not df.at[index, "date_submitted"]:
drops.append(index)
print(f"Attempting to remove {len(drops)} rows.")
df = df.drop(index=drops).reset_index() # drop the noted sequences
print(f"Final dataset of {len(df.index)} rows.")
return df
def read_includes():
inclf = "defaults/include.txt"
incl = set([])
with open(inclf, "r") as f:
for line in f.readlines():
line = line.strip("\n")
incl.add(line)
return incl
def fix_strain_name(s):
"""
This can be expanded later if we need it
"""
# Cast to str
s = str(s)
# Remove trailing dates from strain names
if s.endswith("/2020") or s.endswith("/2019"):
s = "/".join(s.split("/")[:-1])
# Remove leading ""
return s
def fix_location_map():
m = {}
fixfname = "data/source_files/municipalities_name_fixes.csv"
with open(fixfname, "r") as f:
for line in f.readlines():
l = line.strip("\n").split(",")
k = l[0]
v = l[1]
m[k] = v
return m
def fix_country_from_strain_name(s):
"""
Pull a country from a strain name
"""
c = s.split("/")[1]
return c
def build_strain_to_zip():
m = {}
liege_file = "data/zip_codes/SARS-CoV-2_ULiegeSeq_211220.xlsx"
liege_file2 = "data/zip_codes/SARS-CoV-2_ULiegeSeq_011220.csv"
df = pd.read_excel(liege_file, engine="openpyxl").rename(
columns={"virus name": "strain", "Postal code": "ZIP"}
)
df2 = pd.read_csv(liege_file2).rename(columns={"sequence_ID": "strain"})
# df = pd.concat([df,df2])
df = pd.concat([df, df2], ignore_index=True, verify_integrity=True)
def sf(s):
if s.startswith("hCoV-19"):
s = s[8:]
return s
for index, row in df.iterrows():
strainName = row["strain"].strip()
zipCode = str(row["ZIP"])
try:
int(zipCode.strip())
m[strainName] = zipCode
except Exception as e:
# print(f"Wah: {e}")
# print(strainName)
pass
return m
def build_isl_to_zip():
r = {}
# Add other files here
gfile = "data/zip_codes/PostCodes_2020-12-29.xlsx"
df = pd.concat(
[
pd.read_excel(gfile, sheet_name=0, engine="openpyxl"),
pd.read_excel(gfile, sheet_name=1, engine="openpyxl"),
]
)
for i, row in df.iterrows():
s = str(df.at[i, "GISAID_ID"])
if s.startswith("EPI"):
# print(s)
# print(df.at[i])
r[s] = str(df.at[i, "Postcode"]).strip()
return r
def read_muni_map(case_json="data/epi/COVID19BE_CASES_MUNI_CUM.json"):
"""Parse a set of files mapping municipality names to their province.
Keyword arguments:
case_json -- a string that gives the path (relative to project root)
of the case json file that is being read
Output:
map -- a dictionary keying all named municipalities in Belgium to their
province. Each municipality will be the key two times:
once in Dutch and once in French.
{"Leuven" : "VlaamsBrabant",
"Louvain" : "VlaamsBrabant",
...
}
"""
print("Creating a map of municipalities to their province.")
print("This may take several minutes.")
map = {} # Initialize the final dictionary to be returned
# Use the json module to load the full file into memory :(
with open(case_json, "r") as f:
data = json.load(f)
# Add a small function that will clean up municipalities with parentheses
fixit = lambda x: x.split("(")[0][:-1] if "(" in x else x
# TODO: Handle all these poorly caught exceptions properly
# Set both dutch and french names
for item in tqdm(data, desc="Reading municipalities"):
# Add the Dutch municipality name
try:
map[fixit(item["TX_DESCR_NL"])] = item["PROVINCE"]
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
# Add the French municipality name
try:
map[fixit(item["TX_DESCR_FR"])] = item["PROVINCE"]
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
with open("data/source_files/municipalities_to_provinces.csv", "r") as f:
for line in f.readlines():
try:
line = line.strip("\n").split(",")
map[line[0]] = line[1]
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write("WARNING: {e}.\n")
return map
def read_manual_fix_map():
fname = "data/source_files/municipalities_to_provinces.csv"
m = {}
with open(fname, "r") as f:
for line in f.readlines():
try:
line = line.strip("\n").split(",")
k = line[0]
v = line[1]
m[k] = v
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
return m
def fix_liege(df, i):
"""
Add diacritic marks to Liège
"""
geo_fixes = ["location", "location_exposure", "division", "division_exposure"]
for gf in geo_fixes:
if df.at[i, gf] == "Liege":
df.at[i, gf] = "Liège"
def get_zip_location_map():
"""make dictionaries taking zip code to province and municipality
"""
bmap = pd.read_csv(
"../Belgium-Geographic-Data/dist/metadata/be-dictionary.csv",
error_bad_lines=False,
encoding="ISO-8859-1",
)
bmap["PostCode"] = bmap["PostCode"].astype(int, errors="ignore")
pro = {}
mun = {}
fn = {
"Vlaams-Brabant": "VlaamsBrabant",
"Brabant Wallon": "BrabantWallon",
"West-Vlaanderen": "WestVlaanderen",
"Oost-Vlaanderen": "OostVlaanderen",
"Liège": "Liège",
}
myfix = lambda n: fn[n] if n in fn.keys() else n
for index, row in bmap.iterrows():
try:
zip = str(int(bmap.at[index, "PostCode"]))
except:
continue
if zip not in pro.keys():
pro[zip] = myfix(bmap.at[index, "Province"])
mun[zip] = bmap.at[index, "Municipality"]
return pro, mun
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if __name__ == "__main__":
# print(build_strain_to_zip())
# sys.exit()
dispatch_command(main)
| """build_datasets.py
"""
from __future__ import print_function
import datetime as dt
import json
# import multiprocessing as mp
import os
import random
import subprocess
import sys
import numpy as np
import pandas as pd
from argh import dispatch_command # type: ignore
from Bio import SeqIO # type: ignore
from redis_cache import cache_it # type: ignore
from tqdm import tqdm # type: ignore
CACHE_HOURS = 3
def main():
"""The main process to follow for incorporating metadata files
"""
# There should be two files that come from GISAID:
# 1) A dated metadata tsv
# 2) A dated sequences fasta
# These files can be found throught:
# GISAID
# -> EpiCoV
# -> Downloads
# -> Genomic epidemiology
# -> "FASTA" and "metadata" links
# After being downloaded and extracted with `gunzip`
# they can be renamed/relocated to the paths shown below
gisaid_metadata = "data/metadata.tsv"
gisaid_fasta = "data/sequences.fasta"
# We expect to have a directory full of data (both sequence and metadata)
# which is not on GISAID
non_gisaid_dir = "data/non_gisaid"
# Define names of the updated sequence and metadata files
# that we want at the end of the pipeline
OUTPUT_FASTA = "data/ALL_SEQUENCES.fasta"
OUTPUT_META_FNAME = "data/ALL_METADATA.tsv"
sequence_names = read_all_sequence_lists()
exclude_names = read_excludes()
##################
# Main process #
##################
# First, concatenate all the fasta files into one master fasta
# This gives us two outputs:
# recordIDs: set of all the record names (i.e. fasta headers)
# that are included in the dataset
# records: dictionary mapping the recordIDs to their associated sequence
# TODO: Change this to be just sequence length
# since that is all we really need
(recordIDs, records) = concat_and_write_fasta(
gisaid_fasta, non_gisaid_dir, OUTPUT_FASTA, sequence_names, exclude_names
)
print(f"recordIDs: {len(recordIDs)}")
print(f"records: {len(records)}")
# (recordIDs, records) = bypass_fasta_prep(OUTPUT_FASTA)
# Second, concatenate all the associated metadata
# This is a bit of a mess
concat_and_write_metadata(
gisaid_metadata, non_gisaid_dir, OUTPUT_META_FNAME, recordIDs, records
)
def bypass_fasta_prep(fastaFile):
"""Save a bunch of time during teting."""
recIDs = set()
recDict = {}
with open(fastaFile, "r") as f:
for record in tqdm(
SeqIO.parse(f, "fasta"),
desc="Reading fasta",
total=count_lines_in_fasta(fastaFile),
):
recIDs.add(record.id)
recDict[record.id] = record
return (recIDs, recDict)
# @cache_it(limit=100000, expire=60*60*CACHE_HOURS)
def concat_and_write_fasta(baseFname, fastaDir, oFname, sequence_set, exclude_set):
"""
Take a single fasta (containing multiple GISAID records) and add a
set of other fasta records stored in a given directory to that
fasta. Write the output to a new file.
Return both a set of unique record IDs and a dictionary of the records
"""
# Initialize empty lists for outputs
records = []
recordIDs = set()
duplicates = []
@cache_it(limit=1_000_000, expire=60 * 60 * CACHE_HOURS)
def check_record_validity(id):
"""A little helper to make check the following:
1. The record is in our master sequence set
2. The record is not flagged to be excluded
"""
if id in sequence_set:
if id in exclude_set:
eprint(f"Excluding {id}")
return False
else:
return True
return False
# Read the gisaid fasta
nLines = count_lines_in_fasta(baseFname)
print(f"Reading the base GISAID fasta: {baseFname}")
with open(baseFname, "r") as handle:
for record in tqdm(
SeqIO.parse(handle, "fasta"), desc="Nextstrain fasta import", total=nLines
):
# Check if a sequence with the same name already exists
if record.id not in recordIDs:
if check_record_validity(record.id):
records.append(record)
# Keep track of the sequence names that have been processed
recordIDs.add(record.id)
else:
# If it already exists, warn the user
duplicates.append(f"WARNING: Duplicate record ID {record.id}.")
print(f"Added {len(recordIDs)} records")
process_non_gisaid_fastas(fastaDir, records, recordIDs, duplicates)
print(f"Final dataset size (in sequences): {len(records)}")
# Write the output fasta
print(f"Writing {oFname}")
with open(oFname, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
# Write the list of duplicates, we care for debugging issues
print("Writing duplicates to results/duplicate_sequence.txt")
with open("results/duplicate_sequences.txt", "w") as output_handle:
for line in duplicates:
output_handle.write(f"{line}\n")
# Transform records into a dictionary keyed on id,
# as that will be easier to handle later
# NOTE: This fucking sucks.
new_records = {record.id: record for record in records}
return recordIDs, new_records
def process_non_gisaid_fastas(fastaDir, records, recordIDs, duplicates):
# NOTE: The following logic is more or less deprecated, as we don't really
# use additional fastas at this point and just pull things from
# GISAID instead. That said, I'm keeping it in for now.
# TODO: Check if everything works correctly without doing this, as it will
# clean up the whole process quite a bit
# Now, process each of the files in the directory that
# contains non-gisaid fastas
for fname in os.listdir(fastaDir):
# Note: some of the files may be metadata files,
# we only care about fastas for now
if fname.endswith(".fasta"):
print(f"Processing {fname}")
# Keep track of how many sequences we add from additional files
added = 0
with open(f"{fastaDir}/{fname}", "r") as handle:
for record in tqdm(
SeqIO.parse(handle, "fasta"), desc=f"Importing {fname}"
):
# Use the same logic as we did handling the gisaid fasta
if record.id not in recordIDs:
records.append(record)
recordIDs.add(record.id)
added += 1
else:
duplicates.append(f"Duplicate record ID: {record.id}.")
print(f"Added {added} records")
def concat_and_write_metadata(baseFname, metaDir, oFname, recordIDs, records):
"""
IMPORTANT: This function absolutely sucks. I'll try to
break it apart into more sub-functions with time
This function takes multiple metadata spreadsheets and
sticks them together.
Then it appropriately fixes Belgian samples so they
behave the way that we want them to.
It then writes to a new file.
Also it prints out a bunch of stuff that needs to be fixed later on
"""
# Define some things that will be used later.
# There is some inconsistency in how headers are labeled,
# `renames` maps between those
renames = {
"sequence name": "strain",
"Town": "location",
"Acc.Number": "gisaid_epi_isl",
"Sex": "sex",
"Age": "age",
"sample date": "date",
}
# `drop_cols` is the column names that we will take out of the final merge
# Note: Maybe include "ZIP"?
drop_cols = ["#"]
# First, we read in the GISAID metadata file
metadata = pd.read_csv(baseFname, sep="\t", header=0)
print(f"Metadata original rows: {len(metadata)}")
def reduce_metadata(df, ids):
"""Reduce a potentially massive metadata file to only include listed entries."""
print(f"Length of metada before redution: {len(df)}.")
newMeta = df[df["strain"].isin(list(ids))]
newMeta = df[df["date"].apply(lambda x: len(str(x)) == 10)]
print(f"Length of metadata after reduction: {len(newMeta)}.")
return newMeta
dropRows = []
# Reduce the metadata dataFrame so that it is more reasonable to work with
metadata = reduce_metadata(metadata, recordIDs)
# Second, look at every file in the
print(
"Completed base metadata file import, now processing all excel spreadsheets in {metaDir}"
)
for file in tqdm(os.listdir(metaDir), desc="Reading metadata files"):
# Only deal with excel spreadsheets for now
if file.endswith(".xlsx"):
# Make a new dataframe
newMeta = pd.read_excel(f"{metaDir}/{file}", engine="openpyxl")
# Rename the columns appropriately
newMeta = newMeta.rename(columns=renames)
# Slam in some "reasonable" assumptions:
# our belgian sequences are probably european
newMeta["region"] = "Europe"
# they are also probably Belgian (some are French)
newMeta["country"] = "Belgium"
# our ncov sequences are _hopefully_ ncov
newMeta["virus"] = "ncov"
# full genome
newMeta["segment"] = "genome"
# they all come from human hosts
newMeta["host"] = "Human"
# We are just filling in an empty column for sequence lenght
newMeta["length"] = np.nan
# These aren't from GISAID, but they need a date to avoid
# gumming up the works. We use today's date
newMeta["date_submitted"] = dt.date.today().strftime("%Y-%m-%d")
newMeta = reduce_metadata(newMeta, recordIDs)
# Some things need to happen to every sequence individually
# 1) remove year from sequence name (to match fasta)
# 2) set country (if it isn't Belgium)
# 3) set sequence length
for (index, row) in tqdm(
newMeta.iterrows(), total=len(newMeta), desc=f"Processing {file}"
):
# strain name fix. I know this sucks
try:
newDate = pd.to_datetime(row["date"]).strftime("%Y-%m-%d")
newMeta.at[index, "date"] = newDate
row["strain"] = fix_strain_name(row["strain"])
# fix country
row["country"] = fix_country_from_strain_name(row["strain"])
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
dropRows.append(index)
# set length for each sequence, if it doesn't have a length
# for some reason indicate it should be dropped
if row["strain"] in records.keys():
newMeta.at[index, "length"] = int(len(records[row["strain"]].seq))
else:
dropRows.append(index)
# I don't know why this next line exists but it seems to be necessary for things to work
# I'll try to figure out why later if it becomes an issue
newMeta.at[index, "date_submitted"] = newMeta.at[index, "date"]
# determine division
# Indicate missing data for columns for which we don't have data
for item in set(metadata.columns).difference(set(newMeta.columns)):
newMeta[item] = "?"
metadata = pd.concat([metadata, newMeta])
metadata = metadata.reset_index(drop=True)
print(f"New metadata length: {len(metadata)}")
# Build the big strain name-zip dictionary
strainNameToZip = build_strain_to_zip()
epiIslToZip = build_isl_to_zip()
# Read the mapping fils we need
munMap = read_muni_map()
zipCodesToProvinces, zipCodesToMunicipalities = get_zip_location_map()
myManualFixes = read_manual_fix_map()
locFixes = fix_location_map()
lonelyBoys = set()
for (index, row) in tqdm(
metadata.iterrows(), desc="Applying location fixes", total=len(metadata)
):
# Not a location fix, but while we are looking at the individual rows
# we also should drop anything that we don't want.
try:
if len(row["date"]) <= 9:
dropRows.append(index)
continue
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
dropRows.append(index)
continue
metadata = apply_location_corrections(
metadata,
index,
row,
epiIslToZip,
strainNameToZip,
locFixes,
zipCodesToProvinces,
zipCodesToMunicipalities,
munMap,
myManualFixes,
lonelyBoys,
)
print("Los Lonely Boys:")
for thing in lonelyBoys:
print(thing)
# Before we write, drop all the filenames
metadata = metadata.drop(index=dropRows)
metadata = metadata.drop(columns=drop_cols)
# Drop duplicates
metadata = metadata.drop_duplicates(
subset="strain", ignore_index=True
).reset_index()
# metadata = coarse_downsample(metadata)
# print(metadata)
print(f"Writing {oFname}")
metadata.to_csv(oFname, sep="\t", index=False)
def spotcheck(df: pd.DataFrame, r: pd.Series, note: str) -> None:
try:
s = "Belgium/rega-4590/2021"
# Europe / Belgium / Vilvoorde
if r["strain"] == s:
print(f"{note}: {r['location']}")
print(df[df["strain"] == s]["location"])
except:
pass
def apply_location_corrections(
metadata: pd.DataFrame,
index: int,
row: pd.Series,
epiIslToZip: dict,
strainNameToZip: dict,
locFixes: dict,
zipCodesToProvinces: dict,
zipCodesToMunicipalities: dict,
munMap: dict,
myManualFixes: dict,
lonelyBoys: set,
) -> pd.DataFrame:
"""NOTE: this function fucking sucks
"""
if metadata.at[index, "country"] == "Belgium":
if metadata.at[index, "country_exposure"] == "?":
metadata.at[index, "country_exposure"] = "Belgium"
# This identifies any sequences without a "location"
if isinstance(row["location"], float):
if row["division"] != "Belgium":
# Trickle down
metadata.at[index, "location"] = metadata.at[index, "division"]
metadata.at[index, "division"] = "?"
spotcheck(metadata, row, "1")
# Set ZIP:
if metadata.at[index, "gisaid_epi_isl"] in epiIslToZip.keys():
metadata.at[index, "ZIP"] = epiIslToZip[
metadata.at[index, "gisaid_epi_isl"]
]
elif metadata.at[index, "strain"] in strainNameToZip.keys():
metadata.at[index, "ZIP"] = strainNameToZip[metadata.at[index, "strain"]]
spotcheck(metadata, row, "2")
zip = str(metadata.at[index, "ZIP"])
loc = metadata.at[index, "location"]
# Fix location names
if loc in locFixes.keys():
loc = locFixes[loc]
metadata.at[index, "location"] = loc
spotcheck(metadata, row, "3")
metadata.at[index, "location_exposure"] = loc
metadata.at[index, "region_exposure"] = "Europe"
if zip in zipCodesToProvinces.keys():
metadata.at[index, "division"] = zipCodesToProvinces[zip]
metadata.at[index, "division_exposure"] = zipCodesToProvinces[zip]
metadata.at[index, "location"] = zipCodesToMunicipalities[zip]
metadata.at[index, "location_exposure"] = zipCodesToMunicipalities[zip]
spotcheck(metadata, row, "4")
elif loc in munMap.keys():
metadata.at[index, "division"] = munMap[loc]
metadata.at[index, "division_exposure"] = munMap[loc]
spotcheck(metadata, row, "5")
elif loc in myManualFixes.keys():
metadata.at[index, "division"] = myManualFixes[loc]
metadata.at[index, "division_exposure"] = myManualFixes[loc]
spotcheck(metadata, row, "6")
else:
lonelyBoys.add(loc)
spotcheck(metadata, row, "7")
fix_liege(metadata, index)
spotcheck(metadata, row, "8")
return metadata
@cache_it(limit=1000, expire=60 * 60 * CACHE_HOURS)
def read_all_sequence_lists():
"""Read all the .txt files in the sequence list directory
This creates a sort of "master" list of all sequences that we ever might use
"""
# Name of the directory we care about
seqListDir = "data/sequence_lists/"
print(f"Creating a master sequence list from {seqListDir}.")
# Empty set to store our output
allSeqs = set([])
for fname in os.listdir(seqListDir):
if fname.endswith(".txt"):
with open(f"{seqListDir}{fname}", "r") as f:
for line in f.readlines():
# Remove \n characters
line = line.strip()
allSeqs.add(line)
print(f"Sequence list initialized with {len(allSeqs)} sequences.")
return allSeqs
@cache_it(limit=1000, expire=60 * 60 * CACHE_HOURS)
def read_excludes():
"""Read the exclude list to give us the set of what we should ignore."""
# Name of the file we are reading
excludeFile = "defaults/exclude.txt"
print(f"Creating a master exclude list from {excludeFile}")
# Empty set to store our outuput
exclude = set([])
with open(excludeFile, "r") as f:
for line in f.readlines():
line = line.strip()
exclude.add(line)
print(f"Initialized exclude list with {len(exclude)} sequences.")
return exclude
@cache_it(limit=1000, expire=60 * 60 * CACHE_HOURS)
def count_lines_in_fasta(fname):
print(f"Processing {fname} for total fasta entries.")
call = ["grep", "-c", '">"', fname]
lines = subprocess.Popen(" ".join(call), shell=True, stdout=subprocess.PIPE)
nLines = int(lines.stdout.read().strip())
print(f"Found {nLines} fasta entries.")
return nLines
def coarse_downsample(df):
p = 0.0 # drop European, non-belgian sequences
p1 = 0.0 # drop DK and UK sequences
p2 = 0.0 # drop non-european sequences
force_includes = read_includes()
print(f"Started downsampling with {len(df.index)} rows.")
drops = []
for index, row in df.iterrows():
if df.at[index, "country"] != "Belgium":
n = random.random()
if df.at[index, "strain"] not in force_includes:
if df.at[index, "country"] in ["Denmark", "United Kingdom"]:
if n < p1:
drops.append(index)
elif df.at[index, "region"] != "Europe":
if n < p2:
drops.append(index)
elif n < p:
drops.append(index)
if not df.at[index, "date"]:
drops.append(index)
elif not df.at[index, "strain"]:
drops.append(index)
elif not df.at[index, "date_submitted"]:
drops.append(index)
print(f"Attempting to remove {len(drops)} rows.")
df = df.drop(index=drops).reset_index() # drop the noted sequences
print(f"Final dataset of {len(df.index)} rows.")
return df
def read_includes():
inclf = "defaults/include.txt"
incl = set([])
with open(inclf, "r") as f:
for line in f.readlines():
line = line.strip("\n")
incl.add(line)
return incl
def fix_strain_name(s):
"""
This can be expanded later if we need it
"""
# Cast to str
s = str(s)
# Remove trailing dates from strain names
if s.endswith("/2020") or s.endswith("/2019"):
s = "/".join(s.split("/")[:-1])
# Remove leading ""
return s
def fix_location_map():
m = {}
fixfname = "data/source_files/municipalities_name_fixes.csv"
with open(fixfname, "r") as f:
for line in f.readlines():
l = line.strip("\n").split(",")
k = l[0]
v = l[1]
m[k] = v
return m
def fix_country_from_strain_name(s):
"""
Pull a country from a strain name
"""
c = s.split("/")[1]
return c
def build_strain_to_zip():
m = {}
liege_file = "data/zip_codes/SARS-CoV-2_ULiegeSeq_211220.xlsx"
liege_file2 = "data/zip_codes/SARS-CoV-2_ULiegeSeq_011220.csv"
df = pd.read_excel(liege_file, engine="openpyxl").rename(
columns={"virus name": "strain", "Postal code": "ZIP"}
)
df2 = pd.read_csv(liege_file2).rename(columns={"sequence_ID": "strain"})
# df = pd.concat([df,df2])
df = pd.concat([df, df2], ignore_index=True, verify_integrity=True)
def sf(s):
if s.startswith("hCoV-19"):
s = s[8:]
return s
for index, row in df.iterrows():
strainName = row["strain"].strip()
zipCode = str(row["ZIP"])
try:
int(zipCode.strip())
m[strainName] = zipCode
except Exception as e:
# print(f"Wah: {e}")
# print(strainName)
pass
return m
def build_isl_to_zip():
r = {}
# Add other files here
gfile = "data/zip_codes/PostCodes_2020-12-29.xlsx"
df = pd.concat(
[
pd.read_excel(gfile, sheet_name=0, engine="openpyxl"),
pd.read_excel(gfile, sheet_name=1, engine="openpyxl"),
]
)
for i, row in df.iterrows():
s = str(df.at[i, "GISAID_ID"])
if s.startswith("EPI"):
# print(s)
# print(df.at[i])
r[s] = str(df.at[i, "Postcode"]).strip()
return r
def read_muni_map(case_json="data/epi/COVID19BE_CASES_MUNI_CUM.json"):
"""Parse a set of files mapping municipality names to their province.
Keyword arguments:
case_json -- a string that gives the path (relative to project root)
of the case json file that is being read
Output:
map -- a dictionary keying all named municipalities in Belgium to their
province. Each municipality will be the key two times:
once in Dutch and once in French.
{"Leuven" : "VlaamsBrabant",
"Louvain" : "VlaamsBrabant",
...
}
"""
print("Creating a map of municipalities to their province.")
print("This may take several minutes.")
map = {} # Initialize the final dictionary to be returned
# Use the json module to load the full file into memory :(
with open(case_json, "r") as f:
data = json.load(f)
# Add a small function that will clean up municipalities with parentheses
fixit = lambda x: x.split("(")[0][:-1] if "(" in x else x
# TODO: Handle all these poorly caught exceptions properly
# Set both dutch and french names
for item in tqdm(data, desc="Reading municipalities"):
# Add the Dutch municipality name
try:
map[fixit(item["TX_DESCR_NL"])] = item["PROVINCE"]
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
# Add the French municipality name
try:
map[fixit(item["TX_DESCR_FR"])] = item["PROVINCE"]
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
with open("data/source_files/municipalities_to_provinces.csv", "r") as f:
for line in f.readlines():
try:
line = line.strip("\n").split(",")
map[line[0]] = line[1]
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write("WARNING: {e}.\n")
return map
def read_manual_fix_map():
fname = "data/source_files/municipalities_to_provinces.csv"
m = {}
with open(fname, "r") as f:
for line in f.readlines():
try:
line = line.strip("\n").split(",")
k = line[0]
v = line[1]
m[k] = v
except Exception as e:
with open("logs/build_datasets_warnings.log", "a") as logFile:
logFile.write(f"WARNING: {e}.\n")
return m
def fix_liege(df, i):
"""
Add diacritic marks to Liège
"""
geo_fixes = ["location", "location_exposure", "division", "division_exposure"]
for gf in geo_fixes:
if df.at[i, gf] == "Liege":
df.at[i, gf] = "Liège"
def get_zip_location_map():
"""make dictionaries taking zip code to province and municipality
"""
bmap = pd.read_csv(
"../Belgium-Geographic-Data/dist/metadata/be-dictionary.csv",
error_bad_lines=False,
encoding="ISO-8859-1",
)
bmap["PostCode"] = bmap["PostCode"].astype(int, errors="ignore")
pro = {}
mun = {}
fn = {
"Vlaams-Brabant": "VlaamsBrabant",
"Brabant Wallon": "BrabantWallon",
"West-Vlaanderen": "WestVlaanderen",
"Oost-Vlaanderen": "OostVlaanderen",
"Liège": "Liège",
}
myfix = lambda n: fn[n] if n in fn.keys() else n
for index, row in bmap.iterrows():
try:
zip = str(int(bmap.at[index, "PostCode"]))
except:
continue
if zip not in pro.keys():
pro[zip] = myfix(bmap.at[index, "Province"])
mun[zip] = bmap.at[index, "Municipality"]
return pro, mun
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if __name__ == "__main__":
# print(build_strain_to_zip())
# sys.exit()
dispatch_command(main) | en | 0.890006 | build_datasets.py # import multiprocessing as mp # type: ignore # type: ignore # type: ignore # type: ignore The main process to follow for incorporating metadata files # There should be two files that come from GISAID: # 1) A dated metadata tsv # 2) A dated sequences fasta # These files can be found throught: # GISAID # -> EpiCoV # -> Downloads # -> Genomic epidemiology # -> "FASTA" and "metadata" links # After being downloaded and extracted with `gunzip` # they can be renamed/relocated to the paths shown below # We expect to have a directory full of data (both sequence and metadata) # which is not on GISAID # Define names of the updated sequence and metadata files # that we want at the end of the pipeline ################## # Main process # ################## # First, concatenate all the fasta files into one master fasta # This gives us two outputs: # recordIDs: set of all the record names (i.e. fasta headers) # that are included in the dataset # records: dictionary mapping the recordIDs to their associated sequence # TODO: Change this to be just sequence length # since that is all we really need # (recordIDs, records) = bypass_fasta_prep(OUTPUT_FASTA) # Second, concatenate all the associated metadata # This is a bit of a mess Save a bunch of time during teting. # @cache_it(limit=100000, expire=60*60*CACHE_HOURS) Take a single fasta (containing multiple GISAID records) and add a set of other fasta records stored in a given directory to that fasta. Write the output to a new file. Return both a set of unique record IDs and a dictionary of the records # Initialize empty lists for outputs A little helper to make check the following: 1. The record is in our master sequence set 2. The record is not flagged to be excluded # Read the gisaid fasta # Check if a sequence with the same name already exists # Keep track of the sequence names that have been processed # If it already exists, warn the user # Write the output fasta # Write the list of duplicates, we care for debugging issues # Transform records into a dictionary keyed on id, # as that will be easier to handle later # NOTE: This fucking sucks. # NOTE: The following logic is more or less deprecated, as we don't really # use additional fastas at this point and just pull things from # GISAID instead. That said, I'm keeping it in for now. # TODO: Check if everything works correctly without doing this, as it will # clean up the whole process quite a bit # Now, process each of the files in the directory that # contains non-gisaid fastas # Note: some of the files may be metadata files, # we only care about fastas for now # Keep track of how many sequences we add from additional files # Use the same logic as we did handling the gisaid fasta IMPORTANT: This function absolutely sucks. I'll try to break it apart into more sub-functions with time This function takes multiple metadata spreadsheets and sticks them together. Then it appropriately fixes Belgian samples so they behave the way that we want them to. It then writes to a new file. Also it prints out a bunch of stuff that needs to be fixed later on # Define some things that will be used later. # There is some inconsistency in how headers are labeled, # `renames` maps between those # `drop_cols` is the column names that we will take out of the final merge # Note: Maybe include "ZIP"? # First, we read in the GISAID metadata file Reduce a potentially massive metadata file to only include listed entries. # Reduce the metadata dataFrame so that it is more reasonable to work with # Second, look at every file in the # Only deal with excel spreadsheets for now # Make a new dataframe # Rename the columns appropriately # Slam in some "reasonable" assumptions: # our belgian sequences are probably european # they are also probably Belgian (some are French) # our ncov sequences are _hopefully_ ncov # full genome # they all come from human hosts # We are just filling in an empty column for sequence lenght # These aren't from GISAID, but they need a date to avoid # gumming up the works. We use today's date # Some things need to happen to every sequence individually # 1) remove year from sequence name (to match fasta) # 2) set country (if it isn't Belgium) # 3) set sequence length # strain name fix. I know this sucks # fix country # set length for each sequence, if it doesn't have a length # for some reason indicate it should be dropped # I don't know why this next line exists but it seems to be necessary for things to work # I'll try to figure out why later if it becomes an issue # determine division # Indicate missing data for columns for which we don't have data # Build the big strain name-zip dictionary # Read the mapping fils we need # Not a location fix, but while we are looking at the individual rows # we also should drop anything that we don't want. # Before we write, drop all the filenames # Drop duplicates # metadata = coarse_downsample(metadata) # print(metadata) # Europe / Belgium / Vilvoorde NOTE: this function fucking sucks # This identifies any sequences without a "location" # Trickle down # Set ZIP: # Fix location names Read all the .txt files in the sequence list directory This creates a sort of "master" list of all sequences that we ever might use # Name of the directory we care about # Empty set to store our output # Remove \n characters Read the exclude list to give us the set of what we should ignore. # Name of the file we are reading # Empty set to store our outuput # drop European, non-belgian sequences # drop DK and UK sequences # drop non-european sequences # drop the noted sequences This can be expanded later if we need it # Cast to str # Remove trailing dates from strain names # Remove leading "" Pull a country from a strain name # df = pd.concat([df,df2]) # print(f"Wah: {e}") # print(strainName) # Add other files here # print(s) # print(df.at[i]) Parse a set of files mapping municipality names to their province. Keyword arguments: case_json -- a string that gives the path (relative to project root) of the case json file that is being read Output: map -- a dictionary keying all named municipalities in Belgium to their province. Each municipality will be the key two times: once in Dutch and once in French. {"Leuven" : "VlaamsBrabant", "Louvain" : "VlaamsBrabant", ... } # Initialize the final dictionary to be returned # Use the json module to load the full file into memory :( # Add a small function that will clean up municipalities with parentheses # TODO: Handle all these poorly caught exceptions properly # Set both dutch and french names # Add the Dutch municipality name # Add the French municipality name Add diacritic marks to Liège make dictionaries taking zip code to province and municipality # print(build_strain_to_zip()) # sys.exit() | 2.265099 | 2 |
point_registration/compare_results.py | MarvinGravert/utils-computation | 0 | 6622873 |
from typing import Tuple
import numpy as np
from loguru import logger
from config.config import RANSAC_THRESHOLD, RANSAC_CONFIDENCE
from utils.check_cost_function import check_cost_function
from client import run_with_config
from modules.optimization import optimize
def show_Matrix(R, t):
# logger.info(f"""
# Rotationmatrix:
# {R[0,:]}
# {R[1,:]}
# {R[2,:]}
# Translationvektor:
# {t[0]}
# {t[1]}
# {t[2]}
# """)
print(np.hstack([R,t]))
from scipy.spatial.transform import Rotation
R = Rotation.from_matrix(R)
# logger.info(f"\n {R.as_matrix()}")
def get_data_nadine(filename="Corresparray.txt"):
import csv
with open(filename, 'r') as f:
first = [x for x in csv.reader(f, delimiter=',')]
# return len(first), len(second)
# t = np.loadtxt(filename, skiprows=1, delimiter=",")
p = first[1:4]
q = first[4:]
p = np.array(p).T
q = np.array(q).T
return (np.asfarray(p[:-1]), np.asfarray(q[:-1]))
algorithm_dict={
"type": "OPENCV",
"optimize": False, #boolean, False
"ransac": [0.15, 0.8] #[threshold, confidence]list of floats
}
if __name__ == "__main__":
logger.info("Starting Comparison")
"""
IMPORT DATA
"""
point_set_1, point_set_2 = get_data_nadine()
## data from her 3D experiments
R_nadine=np.array([[0.998815,-0.001533,0.010493],
[0.020139,1.014782,-0.003107],
[0.044471,0.009391, 1.001996 ]])
t_nadine=np.array([0.001209,0.017572,0.066353]).reshape([3, 1])
"""
RUN point registration
"""
R,t=run_with_config(point_set_1,point_set_2,algorithm_dict)
"""
Compare results
"""
temp=check_cost_function(point_set_1,point_set_2, R,t)
print(f"Pre optimisation Results: {temp}")
temp=check_cost_function(point_set_2,point_set_1, R_nadine,t_nadine)
print(f"Nadine: {temp}")
opt=optimize.Optimizer(point_set_1,point_set_2,R,t)
R_opt,t_opt=opt.find_optimal_rotation()
temp=check_cost_function(point_set_1,point_set_2, R_opt,t_opt)
print(f"Optimisation no correction: {temp}")
R_cor=opt.correct_rotation_matrix(R_opt)
temp=check_cost_function(point_set_1,point_set_2, R_cor,t_opt)
print(f"Optimisation with correction: {temp}")
show_Matrix(R_cor,t) |
from typing import Tuple
import numpy as np
from loguru import logger
from config.config import RANSAC_THRESHOLD, RANSAC_CONFIDENCE
from utils.check_cost_function import check_cost_function
from client import run_with_config
from modules.optimization import optimize
def show_Matrix(R, t):
# logger.info(f"""
# Rotationmatrix:
# {R[0,:]}
# {R[1,:]}
# {R[2,:]}
# Translationvektor:
# {t[0]}
# {t[1]}
# {t[2]}
# """)
print(np.hstack([R,t]))
from scipy.spatial.transform import Rotation
R = Rotation.from_matrix(R)
# logger.info(f"\n {R.as_matrix()}")
def get_data_nadine(filename="Corresparray.txt"):
import csv
with open(filename, 'r') as f:
first = [x for x in csv.reader(f, delimiter=',')]
# return len(first), len(second)
# t = np.loadtxt(filename, skiprows=1, delimiter=",")
p = first[1:4]
q = first[4:]
p = np.array(p).T
q = np.array(q).T
return (np.asfarray(p[:-1]), np.asfarray(q[:-1]))
algorithm_dict={
"type": "OPENCV",
"optimize": False, #boolean, False
"ransac": [0.15, 0.8] #[threshold, confidence]list of floats
}
if __name__ == "__main__":
logger.info("Starting Comparison")
"""
IMPORT DATA
"""
point_set_1, point_set_2 = get_data_nadine()
## data from her 3D experiments
R_nadine=np.array([[0.998815,-0.001533,0.010493],
[0.020139,1.014782,-0.003107],
[0.044471,0.009391, 1.001996 ]])
t_nadine=np.array([0.001209,0.017572,0.066353]).reshape([3, 1])
"""
RUN point registration
"""
R,t=run_with_config(point_set_1,point_set_2,algorithm_dict)
"""
Compare results
"""
temp=check_cost_function(point_set_1,point_set_2, R,t)
print(f"Pre optimisation Results: {temp}")
temp=check_cost_function(point_set_2,point_set_1, R_nadine,t_nadine)
print(f"Nadine: {temp}")
opt=optimize.Optimizer(point_set_1,point_set_2,R,t)
R_opt,t_opt=opt.find_optimal_rotation()
temp=check_cost_function(point_set_1,point_set_2, R_opt,t_opt)
print(f"Optimisation no correction: {temp}")
R_cor=opt.correct_rotation_matrix(R_opt)
temp=check_cost_function(point_set_1,point_set_2, R_cor,t_opt)
print(f"Optimisation with correction: {temp}")
show_Matrix(R_cor,t) | en | 0.393734 | # logger.info(f""" # Rotationmatrix: # {R[0,:]} # {R[1,:]} # {R[2,:]} # Translationvektor: # {t[0]} # {t[1]} # {t[2]} # """) # logger.info(f"\n {R.as_matrix()}") # return len(first), len(second) # t = np.loadtxt(filename, skiprows=1, delimiter=",") #boolean, False #[threshold, confidence]list of floats IMPORT DATA ## data from her 3D experiments RUN point registration Compare results | 2.509197 | 3 |
map/models.py | gadkaridarshan/ShpTest | 0 | 6622874 | from django.db import models
class Coordinate(models.Model):
Unique_ID = models.CharField(max_length=128,null=True, blank=True)
Address = models.CharField(max_length=500,null=True, blank=True)
Latitude = models.CharField(max_length=32,null=True, blank=True)
Longitude = models.CharField(max_length=32,null=True, blank=True)
Creation = models.DateTimeField(auto_now_add=True, blank=True)
| from django.db import models
class Coordinate(models.Model):
Unique_ID = models.CharField(max_length=128,null=True, blank=True)
Address = models.CharField(max_length=500,null=True, blank=True)
Latitude = models.CharField(max_length=32,null=True, blank=True)
Longitude = models.CharField(max_length=32,null=True, blank=True)
Creation = models.DateTimeField(auto_now_add=True, blank=True)
| none | 1 | 2.392008 | 2 | |
test/api_test.py | LLK/fastly-py | 4 | 6622875 | import unittest
import fastly
class APITest(unittest.TestCase):
def setUp(self):
self.api = fastly.API()
def test_purge(self):
self.assertTrue(self.api.purge_url('test.com', '/'))
def test_purge_by_key(self):
self.api.deauthenticate()
self.api.authenticate_by_key('TESTAPIKEY')
self.assertTrue(self.api.purge_key('test.com', 'foo'))
def test_cookie_purge_by_key(self):
self.api.deauthenticate()
self.api.authenticate_by_password('<EMAIL>', 'password')
with self.assertRaises(fastly.AuthenticationError):
self.api.purge_key('test.com', 'foo')
def test_auth_error(self):
self.api.deauthenticate()
with self.assertRaises(fastly.AuthenticationError):
self.api.conn.request('GET', '/current_customer')
def test_auth_key_success(self):
self.api.deauthenticate()
self.api.authenticate_by_key('TESTAPIKEY')
self.api.conn.request('GET', '/current_customer')
def test_auth_session_success(self):
self.api.deauthenticate()
self.api.authenticate_by_password('<EMAIL>', 'password')
self.api.conn.request('GET', '/current_customer')
if __name__ == '__main__':
unittest.main()
| import unittest
import fastly
class APITest(unittest.TestCase):
def setUp(self):
self.api = fastly.API()
def test_purge(self):
self.assertTrue(self.api.purge_url('test.com', '/'))
def test_purge_by_key(self):
self.api.deauthenticate()
self.api.authenticate_by_key('TESTAPIKEY')
self.assertTrue(self.api.purge_key('test.com', 'foo'))
def test_cookie_purge_by_key(self):
self.api.deauthenticate()
self.api.authenticate_by_password('<EMAIL>', 'password')
with self.assertRaises(fastly.AuthenticationError):
self.api.purge_key('test.com', 'foo')
def test_auth_error(self):
self.api.deauthenticate()
with self.assertRaises(fastly.AuthenticationError):
self.api.conn.request('GET', '/current_customer')
def test_auth_key_success(self):
self.api.deauthenticate()
self.api.authenticate_by_key('TESTAPIKEY')
self.api.conn.request('GET', '/current_customer')
def test_auth_session_success(self):
self.api.deauthenticate()
self.api.authenticate_by_password('<EMAIL>', 'password')
self.api.conn.request('GET', '/current_customer')
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.611288 | 3 | |
fit/utils.py | rembish/f | 10 | 6622876 | <reponame>rembish/f<gh_stars>1-10
from importlib import import_module
from inspect import getmembers, isclass
from pkgutil import iter_modules
def get_known(name, base_cls, key="type"):
main = import_module(name)
known = {}
for _, module_name, _ in iter_modules(main.__path__, "%s." % name):
module = import_module(module_name)
for _, obj in getmembers(module, isclass):
if issubclass(obj, base_cls) \
and getattr(obj, key, None) is not None:
known[getattr(obj, key)] = obj
return known
| from importlib import import_module
from inspect import getmembers, isclass
from pkgutil import iter_modules
def get_known(name, base_cls, key="type"):
main = import_module(name)
known = {}
for _, module_name, _ in iter_modules(main.__path__, "%s." % name):
module = import_module(module_name)
for _, obj in getmembers(module, isclass):
if issubclass(obj, base_cls) \
and getattr(obj, key, None) is not None:
known[getattr(obj, key)] = obj
return known | none | 1 | 2.476661 | 2 | |
src/accounts/serializers.py | NamHDT/Django-HDT | 0 | 6622877 | <filename>src/accounts/serializers.py
from rest_framework import serializers
from .models import Accounts
from django.contrib.auth.models import User
class AccountsSerializer(serializers.ModelSerializer):
class Meta:
model = Accounts
fields = '__all__' | <filename>src/accounts/serializers.py
from rest_framework import serializers
from .models import Accounts
from django.contrib.auth.models import User
class AccountsSerializer(serializers.ModelSerializer):
class Meta:
model = Accounts
fields = '__all__' | none | 1 | 1.692505 | 2 | |
src_pkg/generate.py | KaiyanD/Data_Generator | 0 | 6622878 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 14:39:42 2017
@author: kading
"""
# Import all dependencies.
import csv
import itertools
import json
from src_pkg.core import *
import config
rootpath = os.path.dirname(setup.path()) + "\\testdatageneration\\"
class Generate_Main:
def run(self,schema,bottlername_prefix,num_rows,fk_rootpath):
# Create the generator object.
gen = Generator()
# Create data frame to store fake data and metadata
table = pd.DataFrame()
metadata_DF = pd.DataFrame()
# Provide the number of rows
# Read table title and table columns
table_name = schema["title"]
col_dics = schema["columns"]
# Read through the columns dictionary
for col_dic in col_dics:
# Read column name
field_name = col_dic["name"]
meta = ["P"]*num_rows
# Read through each dictionary, for different type use different function to generate a list of data
if col_dic["type"] == "String":
if col_dic["sub-type"] == "Code":
ls = gen.generate_ls_code(col_dic["max_length"],num_rows,col_dic["partofKey"])
elif col_dic["sub-type"] == "Sentence":
ls = gen.generate_ls_sentence(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Word":
ls = gen.generate_ls_word(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Address":
ls = gen.generate_ls_address(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Currency":
ls = gen.generate_ls_currency(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "City":
ls = gen.generate_ls_city(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "State":
ls = gen.generate_ls_state(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Country":
ls = gen.generate_ls_country(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Postal_Code":
ls = gen.generate_ls_postalcode(col_dic["max_length"],num_rows)
elif col_dic["type"] == "Y/N":
ls = gen.generate_ls_yn(num_rows)
elif col_dic["type"] == "Datetime":
ls = gen.generate_ls_datetime(col_dic["format"],num_rows)
elif col_dic["type"] == "Integer":
if col_dic["sub-type"] == "Code":
ls = gen.generate_ls_digitcode(col_dic["max_length"],num_rows)
elif col_dic["type"] == "Decimal":
if col_dic["sub-type"] == "Latitude":
ls = gen.generate_ls_latitude(num_rows)
elif col_dic["sub-type"] == "Longitude":
ls = gen.generate_ls_longitude(num_rows)
elif col_dic["sub-type"] == "Decimal(18,5)":
ls = gen.generate_ls_decimal(num_rows)
elif col_dic["type"] == "Foreign_Key":
# ***code added for fix NSRP 1110
# ***updated schema json "refertotable" element by removing "Header" and removing "_" and all text in lowercase
if (fk_rootpath.endswith("TD_002_Incorrect_Data_File_Name")):
getIncorrectFileNames = [x.replace("__channel.csv", "") for x in os.listdir(fk_rootpath) if x.endswith("__channel.csv")]
for eachIncorrectFileName in getIncorrectFileNames:
referToTableCSV = eachIncorrectFileName + "__" + col_dic["refertotable"]
ls = gen.generate_ls_foreignkey(col_dic["name"], referToTableCSV, col_dic["refertocol"],num_rows, fk_rootpath)
else:
referToTableCSV = bottlername_prefix + "_" + col_dic["refertotable"]
ls = gen.generate_ls_foreignkey(col_dic["name"], referToTableCSV, col_dic["refertocol"], num_rows, fk_rootpath)
elif col_dic["type"] == "Categorical":
ls = gen.generate_ls_categorical(col_dic["options"],num_rows)
#print(len(ls))
# Once each list of data is created, read it into the table as a new column.
if type(ls) == list:
table[field_name] = ls
metadata_DF[field_name] = meta
else:
for x in range(len(field_name)):
table[field_name[x]] = ls[list(ls)[x]].values
metadata_DF[field_name[x]] = meta
exec("config.data_"+table_name+"=table")
return table, metadata_DF, table_name, num_rows
#f.close()
## Generate Mixed Pack Table
def mixedpack(self,schema,bottlername_prefix,fk_rootpath):
table = pd.DataFrame()
metadata_DF = pd.DataFrame()
table_name = schema["title"]
dic1 = schema["columns"][0]
dic2 = schema["columns"][1]
exec("config.refertable1=config.data_" + dic1["refertotable"].replace(".csv",""))
refervalue1 = list(config.refertable1.loc[config.refertable1[dic1['condition'][0]] == dic1['condition'][1], dic1["refertocol"][0]])
exec("config.refertable2=config.data_" + dic2["refertotable"].replace(".csv",""))
refervalue2 = list(config.refertable2.loc[config.refertable2[dic2['condition'][0]] == dic2['condition'][1], dic2["refertocol"][0]])
dic3 = schema["columns"][2]
numofpartition_ls = [random.choice(range(2,7)) for i in range(len(refervalue1))]
numofrows = sum(numofpartition_ls)
def numsum1(numofpartition):
ls = [random.uniform(1,10) for i in range(numofpartition)]
ratings_ls_unit = [round(x/sum(ls)*100,5) for x in ls]
return ratings_ls_unit
table[dic1['name'][0]] = list(itertools.chain(*(itertools.repeat(elem, n) for elem, n in zip(refervalue1, numofpartition_ls))))
metadata_DF[dic1['name'][0]] = ["P"]*numofrows
table[dic2['name'][0]] = list(itertools.chain(*(random.sample(refervalue2,n) for n in numofpartition_ls)))
metadata_DF[dic2['name'][0]] = ["P"]*numofrows
table[dic3['name']] = list(itertools.chain(*(numsum1(n) for n in numofpartition_ls)))
metadata_DF[dic3['name']] = ["P"]*numofrows
return table, metadata_DF, table_name, numofrows
class FileCreater:
def __init__(self, table,metadata_DF,table_name_all,filerootpath):
self.table = table
self.metadata_DF = metadata_DF
self.table_name_all = table_name_all
self.changer = Changer()
self.filerootpath = filerootpath
def positive_Data_Files(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
# Save table to csv, metadata to json.
table_copy.to_csv(self.filerootpath + "\\%s.csv"% self.table_name_all,index=False,encoding = "utf-8",sep=",",quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath +"\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","positive")
table_copy = None
metadata_DF_copy = None
def incorrect_File_Format_XLS(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
# Create a Pandas Excel writer using XlsxWriter as the engine.
excelwriter = pd.ExcelWriter(self.filerootpath + "\\%s.xls" % self.table_name_all,engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
table_copy.to_excel(excelwriter, sheet_name='Sheet1', index=False)
# Close the Pandas Excel writer and output the Excel file.
excelwriter.save()
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not csv but in excel")
table_copy = None
metadata_DF_copy = None
def incorrect_File_Format_TXT(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.txt" % self.table_name_all, index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not csv but in txt")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Not_Comma_Separated(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep="|", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not commaseparated")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Not_UFT8(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-16", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not utf8 but utf16")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Not_Double_Quoted(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_MINIMAL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not doublequoted")
table_copy = None
metadata_DF_copy = None
def neg_notallpresent(self,num):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
file = open(self.filerootpath + "\\%s.csv" % self.table_name_all,'r',encoding="utf8")
r = csv.reader(file)
lines = [l for l in r]
numofnotpresent = random.choice(range(num)) + 1
row_inds = random.sample(range(len(lines)),numofnotpresent)
for row_ind in row_inds:
col_ind = random.choice(range(len(lines[row_ind])))
del lines[row_ind][col_ind]
metadata_DF_copy.iloc[row_ind-1,col_ind] = "not present"
with open(self.filerootpath + "\\%s.csv" % self.table_name_all,"w",encoding="utf-8") as f:
writer = csv.writer(f,delimiter=",",lineterminator='\n')
writer.writerows(lines)
file.close()
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not allpresent")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Thousand_Comma_Separator(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_thousandseparatercomma(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: thousand separater as comma")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Thousand_Decimal_Separator(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_decimalseparatercomma(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: decimal separater as comma")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_Blank_Mandatory_Value(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_nulltoMandatory_nothing(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: null as ''")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_Space_Mandatory_Value(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_nulltoMandatory_space(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","null as ' '")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_NULL_Mandatory_Value(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_nulltoMandatory_Null(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: null as 'Null'")
table_copy = None
metadata_DF_copy = None
## Function to add "not in option" to categorical data.
def incorrect_Optional_Category(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_notinoptiontocat(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: 'not in option' for this categorical field")
table_copy = None
metadata_DF_copy = None
## Function to add special character to string,language has the following options: ["Simplified Chinese","Traditional Chinese","Spanish","German"]
def neg_addspecialcharactertostring(self, dicofcols,language):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_specialcharactertostring(dicofcols,table_copy,metadata_DF_copy,language)
if language == "Simplified Chinese":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains simplified Chinese character")
if language == "Traditional Chinese":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains traditional Chinese character")
if language == "Spanish":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains Spanish character")
if language == "German":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains German character")
table_copy = None
metadata_DF_copy = None
#Function to add all combinatins in one file
def all_In_One(self,difcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_alltoone(difcols,table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
file = open(self.filerootpath + "\\%s.csv" % self.table_name_all, 'r', encoding="utf8")
r = csv.reader(file)
lines = [l for l in r]
numofnotpresent = random.choice(range(1,len(table_copy.index)))
row_inds = random.sample(range(len(lines)), numofnotpresent)
for row_ind in row_inds:
if(all(metadata_DF_copy.iloc[row_ind -1]=="P")):
col_ind = random.choice(range(len(lines[row_ind])))
del lines[row_ind][col_ind]
metadata_DF_copy.iloc[row_ind-1 , col_ind] = "not present"
with open(self.filerootpath + "\\%s.csv" % self.table_name_all, "w",
encoding="utf-8") as f:
writer = csv.writer(f, delimiter=",", lineterminator='\n',quoting=csv.QUOTE_ALL)
writer.writerows(lines)
file.close()
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(
self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: All combinations in one file")
table_copy = None
metadata_DF_copy = None
def incorrect_Foreign_Key(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_IncorFK(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-8",
sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all, orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
"description", "Negative: Foreign key is incorrect")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_File_Name(self, eachNegFileNm):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % eachNegFileNm, index=False, encoding="utf-8",
sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % eachNegFileNm, orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % eachNegFileNm,
"description", "Negative: Filename format is incorrect")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Code(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_incorStringCode(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: incorrect code")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Language_wPunctuation(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_language_string(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,
encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(
self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains incorrect address,word,city and address")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Length(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_length_string(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains address,word,city and address more than max length")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Address_Currency_Language(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_language_addressAndcurrency(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,
encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(
self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains incorrect language for State, Country, Postal Code, Currency")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Address_Currency_Length(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_length_addressAndcurrency(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains State, Country, Postal Code, Currency more than max length")
table_copy = None
metadata_DF_copy = None
def incorrect_Digit_Code(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorDigitCode(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,
encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
"description", "negative: incorrect code")
table_copy = None
metadata_DF_copy = None
def neg_addduplicateprimarykey(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_duplicateprimarykey(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
"description", "negative: duplicated primary key")
#table_copy.to_csv(self.filerootpath + "output\\neg_duplicateprimarykey\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
#metadata_DF_copy.to_json(self.filerootpath + "output\\neg_duplicateprimarykey\\%s_metadata.json" % self.table_name_all, orient="index")
#self.changer.add_keyvaluetojson(self.filerootpath + "output\\neg_duplicateprimarykey\\%s_metadata.json" % self.table_name_all, "description","negative: Duplicate primary keys")
table_copy = None
metadata_DF_copy = None
def neg_blankmultipleprimarykey(self,dicofcols):
ls = [''.join(p["name"]) for p in dicofcols if p["partofKey"] == 1]
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_blankmultipleprimarykey(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-8", sep=",",quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,"description", "negative: Multiple blank primary keys")
# table_copy.to_csv(self.filerootpath + "output\\neg_blankprimarykey\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
# metadata_DF_copy.to_json(self.filerootpath + "output\\neg_blankprimarykey\\%s_metadata.json" % self.table_name_all,orient="index")
# self.changer.add_keyvaluetojson(self.filerootpath + "output\\neg_blankprimarykey\\%s_metadata.json"% self.table_name_all,"description","negative: Multiple blank primary keys")
table_copy = None
metadata_DF_copy = None | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 14:39:42 2017
@author: kading
"""
# Import all dependencies.
import csv
import itertools
import json
from src_pkg.core import *
import config
rootpath = os.path.dirname(setup.path()) + "\\testdatageneration\\"
class Generate_Main:
def run(self,schema,bottlername_prefix,num_rows,fk_rootpath):
# Create the generator object.
gen = Generator()
# Create data frame to store fake data and metadata
table = pd.DataFrame()
metadata_DF = pd.DataFrame()
# Provide the number of rows
# Read table title and table columns
table_name = schema["title"]
col_dics = schema["columns"]
# Read through the columns dictionary
for col_dic in col_dics:
# Read column name
field_name = col_dic["name"]
meta = ["P"]*num_rows
# Read through each dictionary, for different type use different function to generate a list of data
if col_dic["type"] == "String":
if col_dic["sub-type"] == "Code":
ls = gen.generate_ls_code(col_dic["max_length"],num_rows,col_dic["partofKey"])
elif col_dic["sub-type"] == "Sentence":
ls = gen.generate_ls_sentence(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Word":
ls = gen.generate_ls_word(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Address":
ls = gen.generate_ls_address(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Currency":
ls = gen.generate_ls_currency(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "City":
ls = gen.generate_ls_city(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "State":
ls = gen.generate_ls_state(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Country":
ls = gen.generate_ls_country(col_dic["max_length"],num_rows)
elif col_dic["sub-type"] == "Postal_Code":
ls = gen.generate_ls_postalcode(col_dic["max_length"],num_rows)
elif col_dic["type"] == "Y/N":
ls = gen.generate_ls_yn(num_rows)
elif col_dic["type"] == "Datetime":
ls = gen.generate_ls_datetime(col_dic["format"],num_rows)
elif col_dic["type"] == "Integer":
if col_dic["sub-type"] == "Code":
ls = gen.generate_ls_digitcode(col_dic["max_length"],num_rows)
elif col_dic["type"] == "Decimal":
if col_dic["sub-type"] == "Latitude":
ls = gen.generate_ls_latitude(num_rows)
elif col_dic["sub-type"] == "Longitude":
ls = gen.generate_ls_longitude(num_rows)
elif col_dic["sub-type"] == "Decimal(18,5)":
ls = gen.generate_ls_decimal(num_rows)
elif col_dic["type"] == "Foreign_Key":
# ***code added for fix NSRP 1110
# ***updated schema json "refertotable" element by removing "Header" and removing "_" and all text in lowercase
if (fk_rootpath.endswith("TD_002_Incorrect_Data_File_Name")):
getIncorrectFileNames = [x.replace("__channel.csv", "") for x in os.listdir(fk_rootpath) if x.endswith("__channel.csv")]
for eachIncorrectFileName in getIncorrectFileNames:
referToTableCSV = eachIncorrectFileName + "__" + col_dic["refertotable"]
ls = gen.generate_ls_foreignkey(col_dic["name"], referToTableCSV, col_dic["refertocol"],num_rows, fk_rootpath)
else:
referToTableCSV = bottlername_prefix + "_" + col_dic["refertotable"]
ls = gen.generate_ls_foreignkey(col_dic["name"], referToTableCSV, col_dic["refertocol"], num_rows, fk_rootpath)
elif col_dic["type"] == "Categorical":
ls = gen.generate_ls_categorical(col_dic["options"],num_rows)
#print(len(ls))
# Once each list of data is created, read it into the table as a new column.
if type(ls) == list:
table[field_name] = ls
metadata_DF[field_name] = meta
else:
for x in range(len(field_name)):
table[field_name[x]] = ls[list(ls)[x]].values
metadata_DF[field_name[x]] = meta
exec("config.data_"+table_name+"=table")
return table, metadata_DF, table_name, num_rows
#f.close()
## Generate Mixed Pack Table
def mixedpack(self,schema,bottlername_prefix,fk_rootpath):
table = pd.DataFrame()
metadata_DF = pd.DataFrame()
table_name = schema["title"]
dic1 = schema["columns"][0]
dic2 = schema["columns"][1]
exec("config.refertable1=config.data_" + dic1["refertotable"].replace(".csv",""))
refervalue1 = list(config.refertable1.loc[config.refertable1[dic1['condition'][0]] == dic1['condition'][1], dic1["refertocol"][0]])
exec("config.refertable2=config.data_" + dic2["refertotable"].replace(".csv",""))
refervalue2 = list(config.refertable2.loc[config.refertable2[dic2['condition'][0]] == dic2['condition'][1], dic2["refertocol"][0]])
dic3 = schema["columns"][2]
numofpartition_ls = [random.choice(range(2,7)) for i in range(len(refervalue1))]
numofrows = sum(numofpartition_ls)
def numsum1(numofpartition):
ls = [random.uniform(1,10) for i in range(numofpartition)]
ratings_ls_unit = [round(x/sum(ls)*100,5) for x in ls]
return ratings_ls_unit
table[dic1['name'][0]] = list(itertools.chain(*(itertools.repeat(elem, n) for elem, n in zip(refervalue1, numofpartition_ls))))
metadata_DF[dic1['name'][0]] = ["P"]*numofrows
table[dic2['name'][0]] = list(itertools.chain(*(random.sample(refervalue2,n) for n in numofpartition_ls)))
metadata_DF[dic2['name'][0]] = ["P"]*numofrows
table[dic3['name']] = list(itertools.chain(*(numsum1(n) for n in numofpartition_ls)))
metadata_DF[dic3['name']] = ["P"]*numofrows
return table, metadata_DF, table_name, numofrows
class FileCreater:
def __init__(self, table,metadata_DF,table_name_all,filerootpath):
self.table = table
self.metadata_DF = metadata_DF
self.table_name_all = table_name_all
self.changer = Changer()
self.filerootpath = filerootpath
def positive_Data_Files(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
# Save table to csv, metadata to json.
table_copy.to_csv(self.filerootpath + "\\%s.csv"% self.table_name_all,index=False,encoding = "utf-8",sep=",",quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath +"\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","positive")
table_copy = None
metadata_DF_copy = None
def incorrect_File_Format_XLS(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
# Create a Pandas Excel writer using XlsxWriter as the engine.
excelwriter = pd.ExcelWriter(self.filerootpath + "\\%s.xls" % self.table_name_all,engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
table_copy.to_excel(excelwriter, sheet_name='Sheet1', index=False)
# Close the Pandas Excel writer and output the Excel file.
excelwriter.save()
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not csv but in excel")
table_copy = None
metadata_DF_copy = None
def incorrect_File_Format_TXT(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.txt" % self.table_name_all, index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not csv but in txt")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Not_Comma_Separated(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep="|", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not commaseparated")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Not_UFT8(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-16", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not utf8 but utf16")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Not_Double_Quoted(self):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_MINIMAL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not doublequoted")
table_copy = None
metadata_DF_copy = None
def neg_notallpresent(self,num):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
file = open(self.filerootpath + "\\%s.csv" % self.table_name_all,'r',encoding="utf8")
r = csv.reader(file)
lines = [l for l in r]
numofnotpresent = random.choice(range(num)) + 1
row_inds = random.sample(range(len(lines)),numofnotpresent)
for row_ind in row_inds:
col_ind = random.choice(range(len(lines[row_ind])))
del lines[row_ind][col_ind]
metadata_DF_copy.iloc[row_ind-1,col_ind] = "not present"
with open(self.filerootpath + "\\%s.csv" % self.table_name_all,"w",encoding="utf-8") as f:
writer = csv.writer(f,delimiter=",",lineterminator='\n')
writer.writerows(lines)
file.close()
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: not allpresent")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Thousand_Comma_Separator(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_thousandseparatercomma(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: thousand separater as comma")
table_copy = None
metadata_DF_copy = None
def incorrect_Format_Thousand_Decimal_Separator(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_decimalseparatercomma(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: decimal separater as comma")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_Blank_Mandatory_Value(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_nulltoMandatory_nothing(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: null as ''")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_Space_Mandatory_Value(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_nulltoMandatory_space(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","null as ' '")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_NULL_Mandatory_Value(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_nulltoMandatory_Null(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: null as 'Null'")
table_copy = None
metadata_DF_copy = None
## Function to add "not in option" to categorical data.
def incorrect_Optional_Category(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_notinoptiontocat(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: 'not in option' for this categorical field")
table_copy = None
metadata_DF_copy = None
## Function to add special character to string,language has the following options: ["Simplified Chinese","Traditional Chinese","Spanish","German"]
def neg_addspecialcharactertostring(self, dicofcols,language):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_specialcharactertostring(dicofcols,table_copy,metadata_DF_copy,language)
if language == "Simplified Chinese":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains simplified Chinese character")
if language == "Traditional Chinese":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains traditional Chinese character")
if language == "Spanish":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains Spanish character")
if language == "German":
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","This string contains German character")
table_copy = None
metadata_DF_copy = None
#Function to add all combinatins in one file
def all_In_One(self,difcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_alltoone(difcols,table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
file = open(self.filerootpath + "\\%s.csv" % self.table_name_all, 'r', encoding="utf8")
r = csv.reader(file)
lines = [l for l in r]
numofnotpresent = random.choice(range(1,len(table_copy.index)))
row_inds = random.sample(range(len(lines)), numofnotpresent)
for row_ind in row_inds:
if(all(metadata_DF_copy.iloc[row_ind -1]=="P")):
col_ind = random.choice(range(len(lines[row_ind])))
del lines[row_ind][col_ind]
metadata_DF_copy.iloc[row_ind-1 , col_ind] = "not present"
with open(self.filerootpath + "\\%s.csv" % self.table_name_all, "w",
encoding="utf-8") as f:
writer = csv.writer(f, delimiter=",", lineterminator='\n',quoting=csv.QUOTE_ALL)
writer.writerows(lines)
file.close()
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(
self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: All combinations in one file")
table_copy = None
metadata_DF_copy = None
def incorrect_Foreign_Key(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_IncorFK(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-8",
sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all, orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
"description", "Negative: Foreign key is incorrect")
table_copy = None
metadata_DF_copy = None
def incorrect_Data_File_Name(self, eachNegFileNm):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy.to_csv(self.filerootpath + "\\%s.csv" % eachNegFileNm, index=False, encoding="utf-8",
sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % eachNegFileNm, orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % eachNegFileNm,
"description", "Negative: Filename format is incorrect")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Code(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy,metadata_DF_copy = self.changer.add_incorStringCode(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json"% self.table_name_all,"description","negative: incorrect code")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Language_wPunctuation(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_language_string(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,
encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(
self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains incorrect address,word,city and address")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Length(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_length_string(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains address,word,city and address more than max length")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Address_Currency_Language(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_language_addressAndcurrency(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,
encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(
self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains incorrect language for State, Country, Postal Code, Currency")
table_copy = None
metadata_DF_copy = None
def incorrect_String_Address_Currency_Length(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorstring_length_addressAndcurrency(dicofcols, table_copy,metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all, "description",
"negative: This contains State, Country, Postal Code, Currency more than max length")
table_copy = None
metadata_DF_copy = None
def incorrect_Digit_Code(self, dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_incorDigitCode(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,
encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
## Need to edit metadata_DF
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
"description", "negative: incorrect code")
table_copy = None
metadata_DF_copy = None
def neg_addduplicateprimarykey(self,dicofcols):
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_duplicateprimarykey(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,
"description", "negative: duplicated primary key")
#table_copy.to_csv(self.filerootpath + "output\\neg_duplicateprimarykey\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
#metadata_DF_copy.to_json(self.filerootpath + "output\\neg_duplicateprimarykey\\%s_metadata.json" % self.table_name_all, orient="index")
#self.changer.add_keyvaluetojson(self.filerootpath + "output\\neg_duplicateprimarykey\\%s_metadata.json" % self.table_name_all, "description","negative: Duplicate primary keys")
table_copy = None
metadata_DF_copy = None
def neg_blankmultipleprimarykey(self,dicofcols):
ls = [''.join(p["name"]) for p in dicofcols if p["partofKey"] == 1]
table_copy = self.table.copy()
metadata_DF_copy = self.metadata_DF.copy()
table_copy, metadata_DF_copy = self.changer.add_blankmultipleprimarykey(dicofcols, table_copy, metadata_DF_copy)
table_copy.to_csv(self.filerootpath + "\\%s.csv" % self.table_name_all, index=False, encoding="utf-8", sep=",",quoting=csv.QUOTE_ALL)
metadata_DF_copy.to_json(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,orient="index")
self.changer.add_keyvaluetojson(self.filerootpath + "\\%s_metadata.json" % self.table_name_all,"description", "negative: Multiple blank primary keys")
# table_copy.to_csv(self.filerootpath + "output\\neg_blankprimarykey\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL)
# metadata_DF_copy.to_json(self.filerootpath + "output\\neg_blankprimarykey\\%s_metadata.json" % self.table_name_all,orient="index")
# self.changer.add_keyvaluetojson(self.filerootpath + "output\\neg_blankprimarykey\\%s_metadata.json"% self.table_name_all,"description","negative: Multiple blank primary keys")
table_copy = None
metadata_DF_copy = None | en | 0.51477 | # -*- coding: utf-8 -*- Created on Mon Oct 23 14:39:42 2017 @author: kading # Import all dependencies. # Create the generator object. # Create data frame to store fake data and metadata # Provide the number of rows # Read table title and table columns # Read through the columns dictionary # Read column name # Read through each dictionary, for different type use different function to generate a list of data # ***code added for fix NSRP 1110 # ***updated schema json "refertotable" element by removing "Header" and removing "_" and all text in lowercase #print(len(ls)) # Once each list of data is created, read it into the table as a new column. #f.close() ## Generate Mixed Pack Table # Save table to csv, metadata to json. # Create a Pandas Excel writer using XlsxWriter as the engine. # Convert the dataframe to an XlsxWriter Excel object. # Close the Pandas Excel writer and output the Excel file. ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Function to add "not in option" to categorical data. ## Need to edit metadata_DF ## Function to add special character to string,language has the following options: ["Simplified Chinese","Traditional Chinese","Spanish","German"] #Function to add all combinatins in one file ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF ## Need to edit metadata_DF #table_copy.to_csv(self.filerootpath + "output\\neg_duplicateprimarykey\\%s.csv" % self.table_name_all,index=False, encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL) #metadata_DF_copy.to_json(self.filerootpath + "output\\neg_duplicateprimarykey\\%s_metadata.json" % self.table_name_all, orient="index") #self.changer.add_keyvaluetojson(self.filerootpath + "output\\neg_duplicateprimarykey\\%s_metadata.json" % self.table_name_all, "description","negative: Duplicate primary keys") # table_copy.to_csv(self.filerootpath + "output\\neg_blankprimarykey\\%s.csv" % self.table_name_all, index=False,encoding="utf-8", sep=",", quoting=csv.QUOTE_ALL) # metadata_DF_copy.to_json(self.filerootpath + "output\\neg_blankprimarykey\\%s_metadata.json" % self.table_name_all,orient="index") # self.changer.add_keyvaluetojson(self.filerootpath + "output\\neg_blankprimarykey\\%s_metadata.json"% self.table_name_all,"description","negative: Multiple blank primary keys") | 2.863192 | 3 |
src/perlin_test.py | jkunimune15/kodi-analysis | 0 | 6622879 | <reponame>jkunimune15/kodi-analysis
import matplotlib.pyplot as plt
import numpy as np
import perlin
r = np.sqrt(np.random.random(1000000))
t = 2*np.pi*np.random.random(1000000)
x, y = r*np.cos(t), r*np.sin(t)
dx, dy = np.zeros(x.shape), np.zeros(y.shape)
for n in range(0, 3):
dx += perlin.perlin(x, y, 2**(-n), 0.1*2**(-2*n))
dy += perlin.perlin(x, y, 2**(-n), 0.1*2**(-2*n))
plt.hist2d(x + dx, y + dy, bins=72, range=[[-1.1, 1.1], [-1.1, 1.1]])
plt.axis('square')
plt.show()
| import matplotlib.pyplot as plt
import numpy as np
import perlin
r = np.sqrt(np.random.random(1000000))
t = 2*np.pi*np.random.random(1000000)
x, y = r*np.cos(t), r*np.sin(t)
dx, dy = np.zeros(x.shape), np.zeros(y.shape)
for n in range(0, 3):
dx += perlin.perlin(x, y, 2**(-n), 0.1*2**(-2*n))
dy += perlin.perlin(x, y, 2**(-n), 0.1*2**(-2*n))
plt.hist2d(x + dx, y + dy, bins=72, range=[[-1.1, 1.1], [-1.1, 1.1]])
plt.axis('square')
plt.show() | none | 1 | 2.959404 | 3 | |
algorithms/greed/greed.py | emrysf/Algorithms | 2 | 6622880 | states_needed = set(['mt', 'wa', 'or', 'id', 'nv', 'ut', 'ca', 'az'])
stations = {}
stations['kone'] = set(['id', 'nv', 'ut'])
stations['ktwo'] = set(['wa', 'id', 'mt'])
stations['kthree'] = set(['or', 'nv', 'ca'])
stations['kfour'] = set(['nv', 'ut'])
stations['kfive'] = set(['ca', 'az'])
final_stations = set()
while states_needed:
best_station = None
station_cover = set()
for station, states in stations.items():
cover = states_needed & states
if len(cover) > len(station_cover):
best_station = station
station_cover = cover
states_needed -= station_cover
final_stations.add(best_station)
print(final_stations)
| states_needed = set(['mt', 'wa', 'or', 'id', 'nv', 'ut', 'ca', 'az'])
stations = {}
stations['kone'] = set(['id', 'nv', 'ut'])
stations['ktwo'] = set(['wa', 'id', 'mt'])
stations['kthree'] = set(['or', 'nv', 'ca'])
stations['kfour'] = set(['nv', 'ut'])
stations['kfive'] = set(['ca', 'az'])
final_stations = set()
while states_needed:
best_station = None
station_cover = set()
for station, states in stations.items():
cover = states_needed & states
if len(cover) > len(station_cover):
best_station = station
station_cover = cover
states_needed -= station_cover
final_stations.add(best_station)
print(final_stations)
| none | 1 | 3.387102 | 3 | |
5.py | prof-paradox/project-euler | 0 | 6622881 | <filename>5.py
# Calculates smallest positive number that is evenly divisible by all of the numbers from 1 to 20
''' Inefficient method - takes about 2.5 mins
---------------------------------------------
min_mult = None
num = 21
while(True):
for i in range(2, 21):
if num % i != 0:
break
else:
min_mult = num
if type(min_mult) == int:
break
else:
num += 1
print(min_mult)
---------------------------------------------
'''
# Optimized version below
# -------------------------------------------
import math
def isPrime(n):
for i in range(2, round(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
factor_dict = {}
min_mult = 1
for i in range(2, 21):
factor_ls = []
stg = f"{i} = "
if isPrime(i):
stg += f"{i}^1, "
factor_ls.append(i)
else:
for j in range(2, i // 2 + 1):
if isPrime(j) and (i % j) == 0:
temp = i
exp = 0
while(temp % j == 0):
exp += 1
temp /= j
factor_ls.append(j)
stg += f"{j}^{exp}" + ", "
print(stg[:len(stg)-2])
print(factor_ls)
factor_dict[i] = factor_ls
for i in range(2, 21):
max_pow = 0
for num in factor_dict:
factor_count = factor_dict[num].count(i)
if factor_count > max_pow:
max_pow = factor_count
min_mult *= i ** max_pow
print(min_mult)
| <filename>5.py
# Calculates smallest positive number that is evenly divisible by all of the numbers from 1 to 20
''' Inefficient method - takes about 2.5 mins
---------------------------------------------
min_mult = None
num = 21
while(True):
for i in range(2, 21):
if num % i != 0:
break
else:
min_mult = num
if type(min_mult) == int:
break
else:
num += 1
print(min_mult)
---------------------------------------------
'''
# Optimized version below
# -------------------------------------------
import math
def isPrime(n):
for i in range(2, round(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
factor_dict = {}
min_mult = 1
for i in range(2, 21):
factor_ls = []
stg = f"{i} = "
if isPrime(i):
stg += f"{i}^1, "
factor_ls.append(i)
else:
for j in range(2, i // 2 + 1):
if isPrime(j) and (i % j) == 0:
temp = i
exp = 0
while(temp % j == 0):
exp += 1
temp /= j
factor_ls.append(j)
stg += f"{j}^{exp}" + ", "
print(stg[:len(stg)-2])
print(factor_ls)
factor_dict[i] = factor_ls
for i in range(2, 21):
max_pow = 0
for num in factor_dict:
factor_count = factor_dict[num].count(i)
if factor_count > max_pow:
max_pow = factor_count
min_mult *= i ** max_pow
print(min_mult)
| en | 0.517029 | # Calculates smallest positive number that is evenly divisible by all of the numbers from 1 to 20 Inefficient method - takes about 2.5 mins
---------------------------------------------
min_mult = None
num = 21
while(True):
for i in range(2, 21):
if num % i != 0:
break
else:
min_mult = num
if type(min_mult) == int:
break
else:
num += 1
print(min_mult)
--------------------------------------------- # Optimized version below # ------------------------------------------- | 3.722107 | 4 |
control_panel/__init__.py | uzum/cran-orchestrator | 0 | 6622882 | <gh_stars>0
from .server import CPServer
| from .server import CPServer | none | 1 | 1.100991 | 1 | |
troupon/payment/api.py | andela/troupon | 14 | 6622883 | """Generic API configuration."""
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework import permissions
from deals.models import Advertiser
from payment.models import Purchases
from payment.serializers import TransactionSerializer
class TransationsList(generics.ListAPIView):
""" A merchant can see a list of all transactions that contained
items he/she is advertising on the site.
"""
serializer_class = TransactionSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self):
advertiser_id = self.request.user.profile.merchant.advertiser_ptr.id
advertiser = get_object_or_404(Advertiser, pk=advertiser_id)
return Purchases.objects.filter(advertiser=advertiser)
class TransactionsDetails(generics.ListAPIView):
"""Using a transaction ID, a merchant can see the details of a particular transaction.
"""
serializer_class = TransactionSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self):
purchase_id = self.kwargs.get('pk')
advertiser_id = self.request.user.profile.merchant.advertiser_ptr.id
advertiser = get_object_or_404(Advertiser, pk=advertiser_id)
return Purchases.objects.filter(advertiser=advertiser, id=purchase_id)
| """Generic API configuration."""
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework import permissions
from deals.models import Advertiser
from payment.models import Purchases
from payment.serializers import TransactionSerializer
class TransationsList(generics.ListAPIView):
""" A merchant can see a list of all transactions that contained
items he/she is advertising on the site.
"""
serializer_class = TransactionSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self):
advertiser_id = self.request.user.profile.merchant.advertiser_ptr.id
advertiser = get_object_or_404(Advertiser, pk=advertiser_id)
return Purchases.objects.filter(advertiser=advertiser)
class TransactionsDetails(generics.ListAPIView):
"""Using a transaction ID, a merchant can see the details of a particular transaction.
"""
serializer_class = TransactionSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self):
purchase_id = self.kwargs.get('pk')
advertiser_id = self.request.user.profile.merchant.advertiser_ptr.id
advertiser = get_object_or_404(Advertiser, pk=advertiser_id)
return Purchases.objects.filter(advertiser=advertiser, id=purchase_id)
| en | 0.865413 | Generic API configuration. A merchant can see a list of all transactions that contained items he/she is advertising on the site. Using a transaction ID, a merchant can see the details of a particular transaction. | 2.402708 | 2 |
examples/modules/object_tracker/opencv_object_tracker.py | jagin/dvg-utils | 7 | 6622884 | import cv2
from .centroid_tracker import CentroidTracker
class OpencvObjectTracker:
def __init__(self, max_disappeared=20, max_distance=80, tracker_type="kcf"):
# Initialize the frame dimensions (we'll set them as soon as we read the first frame from the video)
self.w = None
self.h = None
self.tracker_type = tracker_type
# Initialize a dictionary that maps strings to their corresponding OpenCV object tracker implementations
self.OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# Instantiate our centroid tracker, then initialize a list to store each of our OpenCV correlation trackers,
# followed by a dictionary to map each unique object ID to a TrackableObject
self.centroid_tracker = CentroidTracker(max_disappeared, max_distance)
self.trackers = []
self.object_tracks = {}
def track(self, frame, object_locations):
# If the frame dimensions are empty, set them
if self.w is None or self.h is None:
(self.h, self.w) = frame.shape[:2]
# Initialize our list of bounding box rectangles returned by either
# (1) our object detector or
# (2) the correlation trackers
rects = []
# Check to see if there are detected object locations from object detector to aid our tracker
if object_locations:
# Initialize our new set of object trackers
self.trackers = []
# Loop over the detections
for detection in object_locations:
(start_x, start_y, end_x, end_y) = detection[0:4]
# Grab the appropriate object tracker using our dictionary of OpenCV object tracker objects
tracker = self.OPENCV_OBJECT_TRACKERS[self.tracker_type]()
tracker.init(frame, (start_x, start_y, end_x - start_x, end_y - start_y))
# Add the tracker to our list of trackers so we can utilize it during skip frames
self.trackers.append(tracker)
else:
# Loop over the trackers
for tracker in self.trackers:
# Update the tracker and grab the updated position
(success, box) = tracker.update(frame)
# Check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
# Unpack the position object
start_x = int(x)
start_y = int(y)
end_x = int(x + w)
end_y = int(y + h)
# Add the bounding box coordinates to the rectangles list
rects.append((start_x, start_y, end_x, end_y))
# Use the centroid tracker to associate the
# (1) old object centroids with
# (2) the newly computed object centroids
objects, bbox_dims = self.centroid_tracker.update(rects)
current_objects = []
# Loop over the tracked objects
for (object_id, centroid) in objects.items():
# Check to see if a trackable object exists for the current object ID
to = self.object_tracks.get(object_id, None)
# If there is no existing trackable object, create one
if to is None:
to = {
"object_id": object_id,
"centroids": [centroid],
"bbox_dims": bbox_dims[object_id],
}
# Otherwise, there is a trackable object so we can update it
else:
to["centroids"].append(centroid)
current_objects.append(to)
# Store the trackable object in our dictionary
self.object_tracks[object_id] = to
return current_objects
| import cv2
from .centroid_tracker import CentroidTracker
class OpencvObjectTracker:
def __init__(self, max_disappeared=20, max_distance=80, tracker_type="kcf"):
# Initialize the frame dimensions (we'll set them as soon as we read the first frame from the video)
self.w = None
self.h = None
self.tracker_type = tracker_type
# Initialize a dictionary that maps strings to their corresponding OpenCV object tracker implementations
self.OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# Instantiate our centroid tracker, then initialize a list to store each of our OpenCV correlation trackers,
# followed by a dictionary to map each unique object ID to a TrackableObject
self.centroid_tracker = CentroidTracker(max_disappeared, max_distance)
self.trackers = []
self.object_tracks = {}
def track(self, frame, object_locations):
# If the frame dimensions are empty, set them
if self.w is None or self.h is None:
(self.h, self.w) = frame.shape[:2]
# Initialize our list of bounding box rectangles returned by either
# (1) our object detector or
# (2) the correlation trackers
rects = []
# Check to see if there are detected object locations from object detector to aid our tracker
if object_locations:
# Initialize our new set of object trackers
self.trackers = []
# Loop over the detections
for detection in object_locations:
(start_x, start_y, end_x, end_y) = detection[0:4]
# Grab the appropriate object tracker using our dictionary of OpenCV object tracker objects
tracker = self.OPENCV_OBJECT_TRACKERS[self.tracker_type]()
tracker.init(frame, (start_x, start_y, end_x - start_x, end_y - start_y))
# Add the tracker to our list of trackers so we can utilize it during skip frames
self.trackers.append(tracker)
else:
# Loop over the trackers
for tracker in self.trackers:
# Update the tracker and grab the updated position
(success, box) = tracker.update(frame)
# Check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
# Unpack the position object
start_x = int(x)
start_y = int(y)
end_x = int(x + w)
end_y = int(y + h)
# Add the bounding box coordinates to the rectangles list
rects.append((start_x, start_y, end_x, end_y))
# Use the centroid tracker to associate the
# (1) old object centroids with
# (2) the newly computed object centroids
objects, bbox_dims = self.centroid_tracker.update(rects)
current_objects = []
# Loop over the tracked objects
for (object_id, centroid) in objects.items():
# Check to see if a trackable object exists for the current object ID
to = self.object_tracks.get(object_id, None)
# If there is no existing trackable object, create one
if to is None:
to = {
"object_id": object_id,
"centroids": [centroid],
"bbox_dims": bbox_dims[object_id],
}
# Otherwise, there is a trackable object so we can update it
else:
to["centroids"].append(centroid)
current_objects.append(to)
# Store the trackable object in our dictionary
self.object_tracks[object_id] = to
return current_objects
| en | 0.797429 | # Initialize the frame dimensions (we'll set them as soon as we read the first frame from the video) # Initialize a dictionary that maps strings to their corresponding OpenCV object tracker implementations # Instantiate our centroid tracker, then initialize a list to store each of our OpenCV correlation trackers, # followed by a dictionary to map each unique object ID to a TrackableObject # If the frame dimensions are empty, set them # Initialize our list of bounding box rectangles returned by either # (1) our object detector or # (2) the correlation trackers # Check to see if there are detected object locations from object detector to aid our tracker # Initialize our new set of object trackers # Loop over the detections # Grab the appropriate object tracker using our dictionary of OpenCV object tracker objects # Add the tracker to our list of trackers so we can utilize it during skip frames # Loop over the trackers # Update the tracker and grab the updated position # Check to see if the tracking was a success # Unpack the position object # Add the bounding box coordinates to the rectangles list # Use the centroid tracker to associate the # (1) old object centroids with # (2) the newly computed object centroids # Loop over the tracked objects # Check to see if a trackable object exists for the current object ID # If there is no existing trackable object, create one # Otherwise, there is a trackable object so we can update it # Store the trackable object in our dictionary | 3.007855 | 3 |
Examples/DebugBase.py | whitfija/ACTUAL_GAME | 0 | 6622885 | #Base Class
class MyP5t:
definitely __init__( name):
self.name = "Billy"
def pet():
print("Oh no, your pet " + name + " ran away!")
def meow(self):
print("Meow meow")
class MyCat:
def init(self, name, age): #Make this not override __init__
self.name = name
self.age = age
def bark(self):
print("meow")
def get_dogs_age(self):
print(self.age)
| #Base Class
class MyP5t:
definitely __init__( name):
self.name = "Billy"
def pet():
print("Oh no, your pet " + name + " ran away!")
def meow(self):
print("Meow meow")
class MyCat:
def init(self, name, age): #Make this not override __init__
self.name = name
self.age = age
def bark(self):
print("meow")
def get_dogs_age(self):
print(self.age)
| en | 0.565854 | #Base Class #Make this not override __init__ | 3.78419 | 4 |
play_with_interface.py | marshallmcdonnell/journals | 0 | 6622886 | #!/usr/bin/env python
import unittest
from journals.databases.icat.sns.interface import SnsICatInterface
if __name__=="__main__":
conn = SnsICatInterface()
#print(conn.get_instruments())
print(conn.get_experiments('NOM'))
#print(conn.get_experiments_meta('NOM'))
#print(conn.get_experiments_id_and_title('NOM'))
#print(conn.get_experiments_id_and_date('NOM'))
#print(conn.get_runs_all('NOM','IPTS-17210'))
#print(conn.get_runs('NOM','IPTS-17210'))
#print(conn.get_runs_meta('NOM','IPTS-17210'))
#print(conn.get_run_number_and_title('NOM','IPTS-17210'))
#print(conn.get_user_experiments('ntm'))
#print(conn.get_runs_meta('NOM', 'IPTS-8814'))
| #!/usr/bin/env python
import unittest
from journals.databases.icat.sns.interface import SnsICatInterface
if __name__=="__main__":
conn = SnsICatInterface()
#print(conn.get_instruments())
print(conn.get_experiments('NOM'))
#print(conn.get_experiments_meta('NOM'))
#print(conn.get_experiments_id_and_title('NOM'))
#print(conn.get_experiments_id_and_date('NOM'))
#print(conn.get_runs_all('NOM','IPTS-17210'))
#print(conn.get_runs('NOM','IPTS-17210'))
#print(conn.get_runs_meta('NOM','IPTS-17210'))
#print(conn.get_run_number_and_title('NOM','IPTS-17210'))
#print(conn.get_user_experiments('ntm'))
#print(conn.get_runs_meta('NOM', 'IPTS-8814'))
| ru | 0.193945 | #!/usr/bin/env python #print(conn.get_instruments()) #print(conn.get_experiments_meta('NOM')) #print(conn.get_experiments_id_and_title('NOM')) #print(conn.get_experiments_id_and_date('NOM')) #print(conn.get_runs_all('NOM','IPTS-17210')) #print(conn.get_runs('NOM','IPTS-17210')) #print(conn.get_runs_meta('NOM','IPTS-17210')) #print(conn.get_run_number_and_title('NOM','IPTS-17210')) #print(conn.get_user_experiments('ntm')) #print(conn.get_runs_meta('NOM', 'IPTS-8814')) | 1.971396 | 2 |
tests/test_s3_filer.py | Zcowyzrg/tesk-core | 2 | 6622887 | import os
import pytest
import boto3
from tesk_core.filer_s3 import S3Transput
from tesk_core.extract_endpoint import extract_endpoint
from moto import mock_s3
from unittest.mock import patch, mock_open
@pytest.fixture()
def moto_boto():
with mock_s3():
client = boto3.resource('s3',endpoint_url="http://s3.amazonaws.com")
client.create_bucket(Bucket='tesk')
client.Bucket('tesk').put_object(Bucket='tesk', Key='folder/file.txt', Body='')
client.Bucket('tesk').put_object(Bucket='tesk', Key='folder1/folder2/file.txt', Body='')
yield
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "s3://tesk/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
])
def test_get_bucket_name_and_file_path( moto_boto, path, url, ftype,expected):
"""
Check if the bucket name and path is extracted correctly for file and folders
"""
trans = S3Transput(path, url, ftype)
assert trans.get_bucket_name_and_file_path() == expected
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://mybucket.s3.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://mybucket.s3-aws-region.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/mybucket/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/mybucket/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "s3://mybucket/folder/file.txt","FILE",1),
("/home/user/filer_test/", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "s3://tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://mybucket.s3.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "http://mybucket.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "http://s3.amazonaws.com/mybucket/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "http://s3-aws-region.amazonaws.com/mybucket/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "s3://mybucket/folder1/folder2","DIRECTORY",1)
])
def test_check_if_bucket_exists(moto_boto, path, url, ftype, expected):
"""
Check if the bucket exists
"""
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans = S3Transput(path, url, ftype)
assert trans.check_if_bucket_exists(client) == expected
# @patch('tesk_core.filer.os.makedirs')
# @patch('builtins.open')
# @patch('s3transfer.utils.OSUtils.rename_file')
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file_new.txt","FILE",1),
])
def test_s3_download_file( moto_boto, path, url, ftype, expected, fs, caplog):
"""
Checking for successful/failed file download from Object storage server
"""
with S3Transput(path, url, ftype) as trans:
assert trans.download_file() == expected
if expected:
assert "Not Found" in caplog.text
else:
assert os.path.exists(path) == True
@patch('tesk_core.filer.os.makedirs')
@patch('builtins.open')
@patch('s3transfer.utils.OSUtils.rename_file')
@patch("tesk_core.filer_s3.extract_endpoint", return_value="http://s3.amazonaws.com")
@pytest.mark.parametrize("path, url, ftype,expected", [
("filer_test/", "http://tesk.s3.amazonaws.com/folder1/","DIRECTORY",0),
("filer_test/", "http://tesk.s3-aws-region.amazonaws.com/folder1/","DIRECTORY",0),
("filer_test/", "http://s3.amazonaws.com/tesk/folder1/","DIRECTORY",0),
("filer_test/", "http://s3-aws-region.amazonaws.com/tesk/folder1/","DIRECTORY",0),
("filer_test/", "s3://tesk/folder1/","DIRECTORY",0),
("filer_test/", "http://tesk.s3.amazonaws.com/folder10/folder20","DIRECTORY",1),
("filer_test/", "http://tesk.s3-aws-region.amazonaws.com/folder10/folder20","DIRECTORY",1),
("filer_test/", "http://s3.amazonaws.com/tesk/folder10/folder20","DIRECTORY",1),
("filer_test/", "http://s3-aws-region.amazonaws.com/tesk/folder10/folder20","DIRECTORY",1),
("filer_test/", "s3://tesk/folder10/folder20","DIRECTORY",1)
])
def test_s3_download_directory( mock_extract_endpoint,mock_makedirs, mock_open, mock_rename, path, url, ftype,
expected, moto_boto, caplog):
"""
test case to check directory download from Object storage server
"""
with S3Transput(path, url, ftype) as trans:
assert trans.download_dir() == expected
print(mock_rename.mock_calls)
if expected:
assert "Invalid file path" in caplog.text
else:
'''
s3 object path http://tesk.s3.amazonaws.com/folder1/ will contain 'folder2', checking if the 'folder2'
is present in the download folder.
'''
mock_rename.assert_called_once_with('filer_test/folder2', exist_ok=True)
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file_new.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "s3://tesk/folder/file.txt","FILE",1),
])
def test_s3_upload_file( moto_boto, path, url, ftype, expected,fs, caplog):
"""
Testing successful/failed file upload to object storage server
"""
fs.create_file("/home/user/filer_test/file.txt")
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans = S3Transput(path, url, ftype)
trans.bucket_obj = client.Bucket(trans.bucket)
assert trans.upload_file() == expected
if expected:
assert "File upload failed for" in caplog.text
else:
'''
Checking if the file was uploaded, if the object is found, load() method will return None
otherwise an exception will be raised.
'''
assert client.Object('tesk', 'folder/file.txt').load() == None
@pytest.mark.parametrize("path, url, ftype,expected", [
("tests", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",0),
("tests", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",0),
("tests", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("tests", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("tests", "s3://tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test_new/", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "s3://tesk/folder1/folder2","DIRECTORY",1)
])
def test_s3_upload_directory(path, url, ftype, expected, moto_boto, caplog):
"""
Checking for successful and failed Directory upload to object storage server
"""
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans = S3Transput(path, url, ftype)
trans.bucket_obj = client.Bucket(trans.bucket)
assert trans.upload_dir() == expected
if expected:
assert "File upload failed for" in caplog.text
else:
'''
Checking if the file was uploaded, if the object is found load() method will return None
otherwise an exception will be raised.
'''
assert client.Object('tesk', 'folder1/folder2/test_filer.py').load() == None
def test_upload_directory_for_unknown_file_type(moto_boto, fs, monkeypatch, caplog):
"""
Checking whether an exception is raised when the object type is neither file or directory
If the exception is raised, an error message will be logged.
"""
monkeypatch.setattr(os.path, 'isfile', lambda _:False)
fs.create_file("/home/user/filer_test/text.txt")
url, ftype = "s3://tesk/folder10/folder20","DIRECTORY"
path = "/home/user/filer_test/"
trans = S3Transput(path, url, ftype)
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans.bucket_obj = client.Bucket(trans.bucket)
trans.upload_dir()
assert "Object is neither file or directory" in caplog.text
@patch("tesk_core.filer.os.path.exists", return_value=1)
def test_extract_url_from_config_file(mock_path_exists):
"""
Testing extraction of endpoint url from default file location
"""
read_data = '\n'.join(["[default]", "endpoint_url = http://s3-aws-region.amazonaws.com"])
with patch("builtins.open", mock_open(read_data=read_data), create=True) as mock_file:
mock_file.return_value.__iter__.return_value = read_data.splitlines()
assert extract_endpoint() == "http://s3-aws-region.amazonaws.com"
mock_file.assert_called_once_with("~/.aws/config", encoding=None)
@patch.dict(os.environ, {"AWS_CONFIG_FILE": "~/.aws/config"})
def test_extract_url_from_environ_variable():
"""
Testing successful extraction of endpoint url read from file path saved on enviornment variable
"""
read_data = '\n'.join(["[default]","endpoint_url = http://s3-aws-region.amazonaws.com"])
with patch("builtins.open", mock_open(read_data=read_data),create=True) as mock_file:
mock_file.return_value.__iter__.return_value = read_data.splitlines()
assert (extract_endpoint() == "http://s3-aws-region.amazonaws.com")
mock_file.assert_called_once_with(os.environ["AWS_CONFIG_FILE"], encoding=None)
| import os
import pytest
import boto3
from tesk_core.filer_s3 import S3Transput
from tesk_core.extract_endpoint import extract_endpoint
from moto import mock_s3
from unittest.mock import patch, mock_open
@pytest.fixture()
def moto_boto():
with mock_s3():
client = boto3.resource('s3',endpoint_url="http://s3.amazonaws.com")
client.create_bucket(Bucket='tesk')
client.Bucket('tesk').put_object(Bucket='tesk', Key='folder/file.txt', Body='')
client.Bucket('tesk').put_object(Bucket='tesk', Key='folder1/folder2/file.txt', Body='')
yield
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",
("tesk","folder/file.txt")),
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
("/home/user/filer_test/file.txt", "s3://tesk/folder1/folder2","DIRECTORY",
("tesk","folder1/folder2")),
])
def test_get_bucket_name_and_file_path( moto_boto, path, url, ftype,expected):
"""
Check if the bucket name and path is extracted correctly for file and folders
"""
trans = S3Transput(path, url, ftype)
assert trans.get_bucket_name_and_file_path() == expected
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://mybucket.s3.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://mybucket.s3-aws-region.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/mybucket/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/mybucket/folder/file.txt","FILE",1),
("/home/user/filer_test/file.txt", "s3://mybucket/folder/file.txt","FILE",1),
("/home/user/filer_test/", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "s3://tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test/", "http://mybucket.s3.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "http://mybucket.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "http://s3.amazonaws.com/mybucket/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "http://s3-aws-region.amazonaws.com/mybucket/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test/", "s3://mybucket/folder1/folder2","DIRECTORY",1)
])
def test_check_if_bucket_exists(moto_boto, path, url, ftype, expected):
"""
Check if the bucket exists
"""
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans = S3Transput(path, url, ftype)
assert trans.check_if_bucket_exists(client) == expected
# @patch('tesk_core.filer.os.makedirs')
# @patch('builtins.open')
# @patch('s3transfer.utils.OSUtils.rename_file')
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file_new.txt","FILE",1),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file_new.txt","FILE",1),
])
def test_s3_download_file( moto_boto, path, url, ftype, expected, fs, caplog):
"""
Checking for successful/failed file download from Object storage server
"""
with S3Transput(path, url, ftype) as trans:
assert trans.download_file() == expected
if expected:
assert "Not Found" in caplog.text
else:
assert os.path.exists(path) == True
@patch('tesk_core.filer.os.makedirs')
@patch('builtins.open')
@patch('s3transfer.utils.OSUtils.rename_file')
@patch("tesk_core.filer_s3.extract_endpoint", return_value="http://s3.amazonaws.com")
@pytest.mark.parametrize("path, url, ftype,expected", [
("filer_test/", "http://tesk.s3.amazonaws.com/folder1/","DIRECTORY",0),
("filer_test/", "http://tesk.s3-aws-region.amazonaws.com/folder1/","DIRECTORY",0),
("filer_test/", "http://s3.amazonaws.com/tesk/folder1/","DIRECTORY",0),
("filer_test/", "http://s3-aws-region.amazonaws.com/tesk/folder1/","DIRECTORY",0),
("filer_test/", "s3://tesk/folder1/","DIRECTORY",0),
("filer_test/", "http://tesk.s3.amazonaws.com/folder10/folder20","DIRECTORY",1),
("filer_test/", "http://tesk.s3-aws-region.amazonaws.com/folder10/folder20","DIRECTORY",1),
("filer_test/", "http://s3.amazonaws.com/tesk/folder10/folder20","DIRECTORY",1),
("filer_test/", "http://s3-aws-region.amazonaws.com/tesk/folder10/folder20","DIRECTORY",1),
("filer_test/", "s3://tesk/folder10/folder20","DIRECTORY",1)
])
def test_s3_download_directory( mock_extract_endpoint,mock_makedirs, mock_open, mock_rename, path, url, ftype,
expected, moto_boto, caplog):
"""
test case to check directory download from Object storage server
"""
with S3Transput(path, url, ftype) as trans:
assert trans.download_dir() == expected
print(mock_rename.mock_calls)
if expected:
assert "Invalid file path" in caplog.text
else:
'''
s3 object path http://tesk.s3.amazonaws.com/folder1/ will contain 'folder2', checking if the 'folder2'
is present in the download folder.
'''
mock_rename.assert_called_once_with('filer_test/folder2', exist_ok=True)
@pytest.mark.parametrize("path, url, ftype,expected", [
("/home/user/filer_test/file.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file.txt", "s3://tesk/folder/file.txt","FILE",0),
("/home/user/filer_test/file_new.txt", "http://tesk.s3.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "http://tesk.s3-aws-region.amazonaws.com/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "http://s3.amazonaws.com/tesk/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "http://s3-aws-region.amazonaws.com/tesk/folder/file.txt","FILE",1),
("/home/user/filer_test/file_new.txt", "s3://tesk/folder/file.txt","FILE",1),
])
def test_s3_upload_file( moto_boto, path, url, ftype, expected,fs, caplog):
"""
Testing successful/failed file upload to object storage server
"""
fs.create_file("/home/user/filer_test/file.txt")
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans = S3Transput(path, url, ftype)
trans.bucket_obj = client.Bucket(trans.bucket)
assert trans.upload_file() == expected
if expected:
assert "File upload failed for" in caplog.text
else:
'''
Checking if the file was uploaded, if the object is found, load() method will return None
otherwise an exception will be raised.
'''
assert client.Object('tesk', 'folder/file.txt').load() == None
@pytest.mark.parametrize("path, url, ftype,expected", [
("tests", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",0),
("tests", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",0),
("tests", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("tests", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",0),
("tests", "s3://tesk/folder1/folder2","DIRECTORY",0),
("/home/user/filer_test_new/", "http://tesk.s3.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "http://tesk.s3-aws-region.amazonaws.com/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "http://s3.amazonaws.com/tesk/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "http://s3-aws-region.amazonaws.com/tesk/folder1/folder2","DIRECTORY",1),
("/home/user/filer_test_new/", "s3://tesk/folder1/folder2","DIRECTORY",1)
])
def test_s3_upload_directory(path, url, ftype, expected, moto_boto, caplog):
"""
Checking for successful and failed Directory upload to object storage server
"""
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans = S3Transput(path, url, ftype)
trans.bucket_obj = client.Bucket(trans.bucket)
assert trans.upload_dir() == expected
if expected:
assert "File upload failed for" in caplog.text
else:
'''
Checking if the file was uploaded, if the object is found load() method will return None
otherwise an exception will be raised.
'''
assert client.Object('tesk', 'folder1/folder2/test_filer.py').load() == None
def test_upload_directory_for_unknown_file_type(moto_boto, fs, monkeypatch, caplog):
"""
Checking whether an exception is raised when the object type is neither file or directory
If the exception is raised, an error message will be logged.
"""
monkeypatch.setattr(os.path, 'isfile', lambda _:False)
fs.create_file("/home/user/filer_test/text.txt")
url, ftype = "s3://tesk/folder10/folder20","DIRECTORY"
path = "/home/user/filer_test/"
trans = S3Transput(path, url, ftype)
client = boto3.resource('s3', endpoint_url="http://s3.amazonaws.com")
trans.bucket_obj = client.Bucket(trans.bucket)
trans.upload_dir()
assert "Object is neither file or directory" in caplog.text
@patch("tesk_core.filer.os.path.exists", return_value=1)
def test_extract_url_from_config_file(mock_path_exists):
"""
Testing extraction of endpoint url from default file location
"""
read_data = '\n'.join(["[default]", "endpoint_url = http://s3-aws-region.amazonaws.com"])
with patch("builtins.open", mock_open(read_data=read_data), create=True) as mock_file:
mock_file.return_value.__iter__.return_value = read_data.splitlines()
assert extract_endpoint() == "http://s3-aws-region.amazonaws.com"
mock_file.assert_called_once_with("~/.aws/config", encoding=None)
@patch.dict(os.environ, {"AWS_CONFIG_FILE": "~/.aws/config"})
def test_extract_url_from_environ_variable():
"""
Testing successful extraction of endpoint url read from file path saved on enviornment variable
"""
read_data = '\n'.join(["[default]","endpoint_url = http://s3-aws-region.amazonaws.com"])
with patch("builtins.open", mock_open(read_data=read_data),create=True) as mock_file:
mock_file.return_value.__iter__.return_value = read_data.splitlines()
assert (extract_endpoint() == "http://s3-aws-region.amazonaws.com")
mock_file.assert_called_once_with(os.environ["AWS_CONFIG_FILE"], encoding=None)
| en | 0.816234 | Check if the bucket name and path is extracted correctly for file and folders Check if the bucket exists # @patch('tesk_core.filer.os.makedirs') # @patch('builtins.open') # @patch('s3transfer.utils.OSUtils.rename_file') Checking for successful/failed file download from Object storage server test case to check directory download from Object storage server s3 object path http://tesk.s3.amazonaws.com/folder1/ will contain 'folder2', checking if the 'folder2' is present in the download folder. Testing successful/failed file upload to object storage server Checking if the file was uploaded, if the object is found, load() method will return None otherwise an exception will be raised. Checking for successful and failed Directory upload to object storage server Checking if the file was uploaded, if the object is found load() method will return None otherwise an exception will be raised. Checking whether an exception is raised when the object type is neither file or directory If the exception is raised, an error message will be logged. Testing extraction of endpoint url from default file location Testing successful extraction of endpoint url read from file path saved on enviornment variable | 1.945114 | 2 |
remedy/scrapers/scraper.py | AllieDeford/radremedy | 0 | 6622888 | <gh_stars>0
"""
scraper.py
Scraper template used throughout the application.
"""
from radrecord import RadRecord
class Scraper(object):
"""
This class is a template or better said interface,
all scrapers should inherit from this class.
Currently it promises that all scrapers will have a source
and a scrape method.
This is going to be used to automate the scraping of multiple websites.
So somewhere there will be some code that looks like this:
for scraper in scrapers:
data = scraper.run()
db.save(data)
print('Finished scraping %s' % scraper.source)
The source is used to categorize records in the database.
All subclasses should override the scrape method.
*DO NOT OVERRIDE THE run METHOD*. The run method makes
sure that the data returned is valid and might add things
like logging and concurrency in the future.
In the future it might provide some other functionality that might help
other scrapers.
"""
def __init__(self, source):
"""
Initialization function
Args:
self: self explaitory
source: The data source. Most of the time this will be
the site's name.d
"""
self.source = source
@staticmethod
def valid(data):
"""
It makes sure that all data returned by the scrapers
is valid. Making it safer and easier to save
them into the database.
In order for the data to be valid, it must follow our
data format:
https://github.com/radremedy/radremedy/wiki/RAD-Record-Format
We assert this by returning a list of RadRecord instances.
Args:
data: data to be checked
Returns:
A boolean indicated whether or not the data is valid
"""
# TODO: this might be iterating too much, might be better
# to check while appending data instead of at the end
for d in data:
if d is not None and not issubclass(d.__class__, RadRecord):
return False
else:
return True
def scrape(self):
"""
Method that should return the data collected from the
source.
Returns:
the data collected, and it should be valid
Raises:
NotImplemented: this method is not yet implemented
"""
raise NotImplemented('%s does not seem to know how to scrape' % self.__class__)
def _validated_scrape(self):
"""
Runs the scraper and validates the data,
throwing an exception if it isn't valid.
Returns:
the data in RAD-Record Format
Raises:
TypeError: the data is not in RAD-Record Format
"""
data = self.scrape()
if Scraper.valid(data):
return data
else:
raise TypeError('All scrapers should return the right data type')
def run(self):
"""
Run the scraper and collect the data. Please do not override.
Returns:
the data collected by the scraper
"""
return self._validated_scrape()
| """
scraper.py
Scraper template used throughout the application.
"""
from radrecord import RadRecord
class Scraper(object):
"""
This class is a template or better said interface,
all scrapers should inherit from this class.
Currently it promises that all scrapers will have a source
and a scrape method.
This is going to be used to automate the scraping of multiple websites.
So somewhere there will be some code that looks like this:
for scraper in scrapers:
data = scraper.run()
db.save(data)
print('Finished scraping %s' % scraper.source)
The source is used to categorize records in the database.
All subclasses should override the scrape method.
*DO NOT OVERRIDE THE run METHOD*. The run method makes
sure that the data returned is valid and might add things
like logging and concurrency in the future.
In the future it might provide some other functionality that might help
other scrapers.
"""
def __init__(self, source):
"""
Initialization function
Args:
self: self explaitory
source: The data source. Most of the time this will be
the site's name.d
"""
self.source = source
@staticmethod
def valid(data):
"""
It makes sure that all data returned by the scrapers
is valid. Making it safer and easier to save
them into the database.
In order for the data to be valid, it must follow our
data format:
https://github.com/radremedy/radremedy/wiki/RAD-Record-Format
We assert this by returning a list of RadRecord instances.
Args:
data: data to be checked
Returns:
A boolean indicated whether or not the data is valid
"""
# TODO: this might be iterating too much, might be better
# to check while appending data instead of at the end
for d in data:
if d is not None and not issubclass(d.__class__, RadRecord):
return False
else:
return True
def scrape(self):
"""
Method that should return the data collected from the
source.
Returns:
the data collected, and it should be valid
Raises:
NotImplemented: this method is not yet implemented
"""
raise NotImplemented('%s does not seem to know how to scrape' % self.__class__)
def _validated_scrape(self):
"""
Runs the scraper and validates the data,
throwing an exception if it isn't valid.
Returns:
the data in RAD-Record Format
Raises:
TypeError: the data is not in RAD-Record Format
"""
data = self.scrape()
if Scraper.valid(data):
return data
else:
raise TypeError('All scrapers should return the right data type')
def run(self):
"""
Run the scraper and collect the data. Please do not override.
Returns:
the data collected by the scraper
"""
return self._validated_scrape() | en | 0.834586 | scraper.py Scraper template used throughout the application. This class is a template or better said interface, all scrapers should inherit from this class. Currently it promises that all scrapers will have a source and a scrape method. This is going to be used to automate the scraping of multiple websites. So somewhere there will be some code that looks like this: for scraper in scrapers: data = scraper.run() db.save(data) print('Finished scraping %s' % scraper.source) The source is used to categorize records in the database. All subclasses should override the scrape method. *DO NOT OVERRIDE THE run METHOD*. The run method makes sure that the data returned is valid and might add things like logging and concurrency in the future. In the future it might provide some other functionality that might help other scrapers. Initialization function Args: self: self explaitory source: The data source. Most of the time this will be the site's name.d It makes sure that all data returned by the scrapers is valid. Making it safer and easier to save them into the database. In order for the data to be valid, it must follow our data format: https://github.com/radremedy/radremedy/wiki/RAD-Record-Format We assert this by returning a list of RadRecord instances. Args: data: data to be checked Returns: A boolean indicated whether or not the data is valid # TODO: this might be iterating too much, might be better # to check while appending data instead of at the end Method that should return the data collected from the source. Returns: the data collected, and it should be valid Raises: NotImplemented: this method is not yet implemented Runs the scraper and validates the data, throwing an exception if it isn't valid. Returns: the data in RAD-Record Format Raises: TypeError: the data is not in RAD-Record Format Run the scraper and collect the data. Please do not override. Returns: the data collected by the scraper | 3.437552 | 3 |
src/analytical_validation/validators/intermediate_precision_validator.py | abxsantos/analytical-validation-backend | 0 | 6622889 | import numpy
import pandas
from statsmodels.formula.api import ols
import statsmodels.api as statsmodels
from analytical_validation.exceptions import IncorrectIntermediatePrecisionData
class IntermediatePrecision(object):
"""
Example:
>>> analytical_data = [0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12,]
>>> intercept = 0.0001
>>> slope = 20.2
>>> alpha = 0.05
>>> intermediate_precision = IntermediatePrecision(analytical_data, intercept, slope, alpha)
>>> intermediate_precision_is_valid = intermediate_precision.validate_intermediate_precision
"""
def __init__(self, analytical_data, intercept, slope, alpha=0.05):
"""
The intermediate precision is the proximity between the results obtained in an analysis of the same sample, in
the same laboratory, in at least two different days and between two different analysts.
This class is used to validate the precision of an analytical method given the analytical and concentration data
of different days and analysts ordered inside a dictionary containing the data inside a list of lists.
:param analytical_data:
:type analytical_data: list
:param intercept:
:type intercept: float
:param slope:
:type slope: float
:param alpha:
:type alpha: float
"""
self.original_analytical_data = analytical_data
self.intercept = intercept
self.slope = slope
self.alpha = alpha
self.calculated_concentration = []
self.two_way_anova_result = None
self.is_intermediate_precise = False
def calculate_obtained_concentrations(self):
"""
Calculate the concentration given a validated linear regression slope, intercept and intermediate precision
analytical data.
:return: Concentration data calculated with the regression coefficients.
"""
self.calculated_concentration = []
if isinstance(self.original_analytical_data, list) is False:
raise IncorrectIntermediatePrecisionData()
for value in self.original_analytical_data:
if value is None:
self.calculated_concentration.append(value)
elif isinstance(value, float) or isinstance(value, int):
self.calculated_concentration.append(value * self.slope + self.intercept)
else:
raise IncorrectIntermediatePrecisionData()
def two_way_anova(self):
"""
Creates the two-way ANOVA object containing statistical
properties of the intermediate precision given data a set.
"""
data_set_length = len(self.calculated_concentration)
data_frame = pandas.DataFrame({"days": numpy.repeat(["day 1", "day 2"], data_set_length // 2),
"analyst": numpy.tile(
numpy.repeat(["analyst a", "analyst b"], data_set_length // 4), 2),
"concentration": self.calculated_concentration})
model = ols('concentration ~ C(days) + C(analyst) + C(days):C(analyst)', data=data_frame).fit()
self.two_way_anova_result = statsmodels.stats.anova_lm(model, typ=2)
def validate_intermediate_precision(self):
"""
Validates the intermediate precision of given data.
:return: True if the given data is valid. False otherwise.
:rtype: bool
"""
self.calculate_obtained_concentrations()
self.two_way_anova()
# TODO: Check conditionals for intermediate precision acceptance
self.is_intermediate_precise = True
| import numpy
import pandas
from statsmodels.formula.api import ols
import statsmodels.api as statsmodels
from analytical_validation.exceptions import IncorrectIntermediatePrecisionData
class IntermediatePrecision(object):
"""
Example:
>>> analytical_data = [0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12,]
>>> intercept = 0.0001
>>> slope = 20.2
>>> alpha = 0.05
>>> intermediate_precision = IntermediatePrecision(analytical_data, intercept, slope, alpha)
>>> intermediate_precision_is_valid = intermediate_precision.validate_intermediate_precision
"""
def __init__(self, analytical_data, intercept, slope, alpha=0.05):
"""
The intermediate precision is the proximity between the results obtained in an analysis of the same sample, in
the same laboratory, in at least two different days and between two different analysts.
This class is used to validate the precision of an analytical method given the analytical and concentration data
of different days and analysts ordered inside a dictionary containing the data inside a list of lists.
:param analytical_data:
:type analytical_data: list
:param intercept:
:type intercept: float
:param slope:
:type slope: float
:param alpha:
:type alpha: float
"""
self.original_analytical_data = analytical_data
self.intercept = intercept
self.slope = slope
self.alpha = alpha
self.calculated_concentration = []
self.two_way_anova_result = None
self.is_intermediate_precise = False
def calculate_obtained_concentrations(self):
"""
Calculate the concentration given a validated linear regression slope, intercept and intermediate precision
analytical data.
:return: Concentration data calculated with the regression coefficients.
"""
self.calculated_concentration = []
if isinstance(self.original_analytical_data, list) is False:
raise IncorrectIntermediatePrecisionData()
for value in self.original_analytical_data:
if value is None:
self.calculated_concentration.append(value)
elif isinstance(value, float) or isinstance(value, int):
self.calculated_concentration.append(value * self.slope + self.intercept)
else:
raise IncorrectIntermediatePrecisionData()
def two_way_anova(self):
"""
Creates the two-way ANOVA object containing statistical
properties of the intermediate precision given data a set.
"""
data_set_length = len(self.calculated_concentration)
data_frame = pandas.DataFrame({"days": numpy.repeat(["day 1", "day 2"], data_set_length // 2),
"analyst": numpy.tile(
numpy.repeat(["analyst a", "analyst b"], data_set_length // 4), 2),
"concentration": self.calculated_concentration})
model = ols('concentration ~ C(days) + C(analyst) + C(days):C(analyst)', data=data_frame).fit()
self.two_way_anova_result = statsmodels.stats.anova_lm(model, typ=2)
def validate_intermediate_precision(self):
"""
Validates the intermediate precision of given data.
:return: True if the given data is valid. False otherwise.
:rtype: bool
"""
self.calculate_obtained_concentrations()
self.two_way_anova()
# TODO: Check conditionals for intermediate precision acceptance
self.is_intermediate_precise = True
| en | 0.699843 | Example: >>> analytical_data = [0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12,] >>> intercept = 0.0001 >>> slope = 20.2 >>> alpha = 0.05 >>> intermediate_precision = IntermediatePrecision(analytical_data, intercept, slope, alpha) >>> intermediate_precision_is_valid = intermediate_precision.validate_intermediate_precision The intermediate precision is the proximity between the results obtained in an analysis of the same sample, in the same laboratory, in at least two different days and between two different analysts. This class is used to validate the precision of an analytical method given the analytical and concentration data of different days and analysts ordered inside a dictionary containing the data inside a list of lists. :param analytical_data: :type analytical_data: list :param intercept: :type intercept: float :param slope: :type slope: float :param alpha: :type alpha: float Calculate the concentration given a validated linear regression slope, intercept and intermediate precision analytical data. :return: Concentration data calculated with the regression coefficients. Creates the two-way ANOVA object containing statistical properties of the intermediate precision given data a set. Validates the intermediate precision of given data. :return: True if the given data is valid. False otherwise. :rtype: bool # TODO: Check conditionals for intermediate precision acceptance | 3.411063 | 3 |
easelenium/ui/generator/page_object_class.py | kirillstrelkov/easyselenium | 1 | 6622890 | <reponame>kirillstrelkov/easyselenium
# coding=utf8
import os
from easelenium.ui.file_utils import safe_create_path, save_file
from easelenium.utils import LINESEP, get_match
from selenium.webdriver.common.by import By
# TODO: move to f string and get rid of u strings
def get_by_as_code_str(by):
if by == By.LINK_TEXT:
return "By.LINK_TEXT"
elif by == By.CLASS_NAME:
return "By.CLASS_NAME"
elif by == By.CSS_SELECTOR:
return "By.CSS_SELECTOR"
elif by == By.XPATH:
return "By.XPATH"
elif by == By.ID:
return "By.ID"
else:
raise NotImplementedError
def get_by_from_code_str(by_as_string):
if by_as_string == "By.LINK_TEXT":
return By.LINK_TEXT
elif by_as_string == "By.CLASS_NAME":
return By.CLASS_NAME
elif by_as_string == "By.CSS_SELECTOR":
return By.CSS_SELECTOR
elif by_as_string == "By.XPATH":
return By.XPATH
elif by_as_string == "By.ID":
return By.ID
else:
raise NotImplementedError
class PageObjectClassField(object):
def __init__(self, name, by, selector, location, dimensions):
self.name = name
self.by = by
self.selector = selector
self.location = location
self.dimensions = dimensions
def __eq__(self, other):
return (other is not None) and (
self.name == other.name
and self.by == other.by
and self.selector == other.selector
)
def __repr__(self):
return str(self)
def __str__(self):
return f"PageObjectClassField({self.__dict__})"
class PageObjectClass(object):
IMAGE_FOLDER = "img"
TEMPLATE = """# coding=utf8
from selenium.webdriver.common.by import By
from easelenium.base_page_object import BasePageObject
class {name}(BasePageObject):
# Please do NOT remove auto-generated comments
# Url: {url}
# Area: {area}
# File path: {file_path}
# Image path: {img_path}
{fields_as_code}
"""
def __init__(
self,
name,
url,
fields,
area=None,
file_path=None,
img_path=None,
img_as_png=None,
):
self.name = name
self.url = url
self.fields = fields
self.area = area
self.file_path = file_path
self.img_as_png = img_as_png
self.img_path = img_path
def save(self, new_folder=None):
if new_folder:
py_filename = os.path.basename(self.file_path)
img_filename = os.path.basename(self.img_path)
self.file_path = os.path.abspath(os.path.join(new_folder, py_filename))
self.img_path = os.path.abspath(
os.path.join(new_folder, self.IMAGE_FOLDER, img_filename)
)
safe_create_path(self.file_path)
safe_create_path(self.img_path)
save_file(self.file_path, self._get_file_content())
save_file(self.img_path, self.img_as_png, False)
def _get_file_content(self):
kwargs = self.__dict__.copy()
fields_as_code = self._get_fields_as_code()
if len(fields_as_code.strip()) == 0:
fields_as_code = " pass" + LINESEP
kwargs["fields_as_code"] = fields_as_code
return self.TEMPLATE.format(**kwargs)
def _get_fields_as_code(self):
single_line = " {name} = ({by_as_code}, u'{selector}') # {comment}"
lines = []
for field in self.fields:
lines.append(
single_line.format(
**{
"name": field.name,
"by_as_code": get_by_as_code_str(field.by),
"selector": field.selector.replace("'", "\\'"),
"comment": "location: %s dimensions: %s"
% (field.location, field.dimensions),
}
)
)
return LINESEP.join(lines)
@classmethod
def parse_string_to_po_class(cls, string):
# class {name}(object):
# Please do NOT remove auto-generated comments
# Url: {url}
# Area: {area}
# Image path: {img_path}
name_regexp = r"class (\w+)\(BasePageObject\):"
url_regexp = r"Url: (.+)"
area_regexp = r"Area: \(?([\w, ]+)\)?"
img_path_regexp = r"Image path: (.+)"
file_path_regexp = r"File path: (.+)"
fields_regexp = r"\s+(\w+) = (.+) # location: (.+) dimensions: (.+)"
name = get_match(name_regexp, string)
url = get_match(url_regexp, string)
area = eval(get_match(area_regexp, string))
img_path = get_match(img_path_regexp, string)
file_path = get_match(file_path_regexp, string)
tmp_fields = get_match(fields_regexp, string, False)
fields = []
if tmp_fields:
for (
field_name,
field_by_and_selector,
field_location,
field_dimensions,
) in tmp_fields:
by, selector = eval(field_by_and_selector)
location = eval(field_location)
dimensions = eval(field_dimensions)
po_class_field = PageObjectClassField(
field_name, by, selector, location, dimensions
)
fields.append(po_class_field)
return PageObjectClass(name, url, fields, area, file_path, img_path)
def __eq__(self, other):
return (
self.name == other.name
and self.url == other.url
and self.fields == other.fields
and self.area == other.area
and self.file_path == other.file_path
and self.img_path == other.img_path
)
def __repr__(self):
return str(self)
def __str__(self):
return f"PageObjectClass({self.__dict__})"
| # coding=utf8
import os
from easelenium.ui.file_utils import safe_create_path, save_file
from easelenium.utils import LINESEP, get_match
from selenium.webdriver.common.by import By
# TODO: move to f string and get rid of u strings
def get_by_as_code_str(by):
if by == By.LINK_TEXT:
return "By.LINK_TEXT"
elif by == By.CLASS_NAME:
return "By.CLASS_NAME"
elif by == By.CSS_SELECTOR:
return "By.CSS_SELECTOR"
elif by == By.XPATH:
return "By.XPATH"
elif by == By.ID:
return "By.ID"
else:
raise NotImplementedError
def get_by_from_code_str(by_as_string):
if by_as_string == "By.LINK_TEXT":
return By.LINK_TEXT
elif by_as_string == "By.CLASS_NAME":
return By.CLASS_NAME
elif by_as_string == "By.CSS_SELECTOR":
return By.CSS_SELECTOR
elif by_as_string == "By.XPATH":
return By.XPATH
elif by_as_string == "By.ID":
return By.ID
else:
raise NotImplementedError
class PageObjectClassField(object):
def __init__(self, name, by, selector, location, dimensions):
self.name = name
self.by = by
self.selector = selector
self.location = location
self.dimensions = dimensions
def __eq__(self, other):
return (other is not None) and (
self.name == other.name
and self.by == other.by
and self.selector == other.selector
)
def __repr__(self):
return str(self)
def __str__(self):
return f"PageObjectClassField({self.__dict__})"
class PageObjectClass(object):
IMAGE_FOLDER = "img"
TEMPLATE = """# coding=utf8
from selenium.webdriver.common.by import By
from easelenium.base_page_object import BasePageObject
class {name}(BasePageObject):
# Please do NOT remove auto-generated comments
# Url: {url}
# Area: {area}
# File path: {file_path}
# Image path: {img_path}
{fields_as_code}
"""
def __init__(
self,
name,
url,
fields,
area=None,
file_path=None,
img_path=None,
img_as_png=None,
):
self.name = name
self.url = url
self.fields = fields
self.area = area
self.file_path = file_path
self.img_as_png = img_as_png
self.img_path = img_path
def save(self, new_folder=None):
if new_folder:
py_filename = os.path.basename(self.file_path)
img_filename = os.path.basename(self.img_path)
self.file_path = os.path.abspath(os.path.join(new_folder, py_filename))
self.img_path = os.path.abspath(
os.path.join(new_folder, self.IMAGE_FOLDER, img_filename)
)
safe_create_path(self.file_path)
safe_create_path(self.img_path)
save_file(self.file_path, self._get_file_content())
save_file(self.img_path, self.img_as_png, False)
def _get_file_content(self):
kwargs = self.__dict__.copy()
fields_as_code = self._get_fields_as_code()
if len(fields_as_code.strip()) == 0:
fields_as_code = " pass" + LINESEP
kwargs["fields_as_code"] = fields_as_code
return self.TEMPLATE.format(**kwargs)
def _get_fields_as_code(self):
single_line = " {name} = ({by_as_code}, u'{selector}') # {comment}"
lines = []
for field in self.fields:
lines.append(
single_line.format(
**{
"name": field.name,
"by_as_code": get_by_as_code_str(field.by),
"selector": field.selector.replace("'", "\\'"),
"comment": "location: %s dimensions: %s"
% (field.location, field.dimensions),
}
)
)
return LINESEP.join(lines)
@classmethod
def parse_string_to_po_class(cls, string):
# class {name}(object):
# Please do NOT remove auto-generated comments
# Url: {url}
# Area: {area}
# Image path: {img_path}
name_regexp = r"class (\w+)\(BasePageObject\):"
url_regexp = r"Url: (.+)"
area_regexp = r"Area: \(?([\w, ]+)\)?"
img_path_regexp = r"Image path: (.+)"
file_path_regexp = r"File path: (.+)"
fields_regexp = r"\s+(\w+) = (.+) # location: (.+) dimensions: (.+)"
name = get_match(name_regexp, string)
url = get_match(url_regexp, string)
area = eval(get_match(area_regexp, string))
img_path = get_match(img_path_regexp, string)
file_path = get_match(file_path_regexp, string)
tmp_fields = get_match(fields_regexp, string, False)
fields = []
if tmp_fields:
for (
field_name,
field_by_and_selector,
field_location,
field_dimensions,
) in tmp_fields:
by, selector = eval(field_by_and_selector)
location = eval(field_location)
dimensions = eval(field_dimensions)
po_class_field = PageObjectClassField(
field_name, by, selector, location, dimensions
)
fields.append(po_class_field)
return PageObjectClass(name, url, fields, area, file_path, img_path)
def __eq__(self, other):
return (
self.name == other.name
and self.url == other.url
and self.fields == other.fields
and self.area == other.area
and self.file_path == other.file_path
and self.img_path == other.img_path
)
def __repr__(self):
return str(self)
def __str__(self):
return f"PageObjectClass({self.__dict__})" | en | 0.517848 | # coding=utf8 # TODO: move to f string and get rid of u strings # coding=utf8 from selenium.webdriver.common.by import By from easelenium.base_page_object import BasePageObject class {name}(BasePageObject): # Please do NOT remove auto-generated comments # Url: {url} # Area: {area} # File path: {file_path} # Image path: {img_path} {fields_as_code} # {comment}" # class {name}(object): # Please do NOT remove auto-generated comments # Url: {url} # Area: {area} # Image path: {img_path} # location: (.+) dimensions: (.+)" | 2.471511 | 2 |
donkeycar/parts/serial_arduino.py | arigadget/donkeycar | 2 | 6622891 | import serial
import time
class Serial_sense():
'''
get sensor information from Arduino via serial USB interface
'''
def __init__(self, dev='/dev/ttyUSB0', baudrate=115200, poll_delay=0.03):
self.status = b'F'
self.dev = dev
self.baudrate = baudrate
self.serial_port = serial.Serial(self.dev, self.baudrate)
time.sleep(1)
self.poll_delay = poll_delay
self.on = True
def update(self):
while self.on:
self.poll()
time.sleep(self.poll_delay)
def poll(self):
try:
self.status = self.serial_port.read()
#print(self.status)
except:
print('failed to read serial USB interface!!')
def run_threaded(self):
return self.status
def run(self):
self.poll()
return self.status
def shutdown(self):
self.serial_port.close()
self.on = False
| import serial
import time
class Serial_sense():
'''
get sensor information from Arduino via serial USB interface
'''
def __init__(self, dev='/dev/ttyUSB0', baudrate=115200, poll_delay=0.03):
self.status = b'F'
self.dev = dev
self.baudrate = baudrate
self.serial_port = serial.Serial(self.dev, self.baudrate)
time.sleep(1)
self.poll_delay = poll_delay
self.on = True
def update(self):
while self.on:
self.poll()
time.sleep(self.poll_delay)
def poll(self):
try:
self.status = self.serial_port.read()
#print(self.status)
except:
print('failed to read serial USB interface!!')
def run_threaded(self):
return self.status
def run(self):
self.poll()
return self.status
def shutdown(self):
self.serial_port.close()
self.on = False
| en | 0.44846 | get sensor information from Arduino via serial USB interface #print(self.status) | 3.104156 | 3 |
import_export_open_civic_data/models.py | adborden/WeVoteBase | 0 | 6622892 | <reponame>adborden/WeVoteBase
# import_export_open_civic_data/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
# https://github.com/opencivicdata/python-opencivicdata-django
# There are models for the ocd data types
# Other Open Civic Data identifiers that refer to the same division -- for example, those that refer to other
# political divisions whose boundaries are defined to be coterminous with this one.
# For example, ocd-division/country:us/state:wy will include an alias of ocd-division/country:us/state:wy/cd:1,
# since Wyoming has only one Congressional district.
#
# Division Identifiers here:
# Master CSV files with ocd-division-ids
# https://github.com/opencivicdata/ocd-division-ids/tree/master/identifiers
# https://raw.githubusercontent.com/opencivicdata/ocd-division-ids/master/identifiers/country-us.csv
# id,name,sameAs,sameAsNote,validThrough,census_geoid,census_geoid_12,census_geoid_14,openstates_district,placeholder_id,sch_dist_stateid,state_id
# ocd-division/country:us,United States,,,,,,,,,,
# ocd-division/country:us/court_of_appeals:1,United States Court of Appeals for 1st Circuit,,,,,,,,,,
# ocd-division/country:us/court_of_appeals:1/district_court:maine,United States District Court for District of Maine,,,,,,,,,,
# TODO create importer and table to cache this data
### Pulling out geographic divisions
# country / state /
# cd # congressional district, uses census_geoid ex/ ocd-division/country:us/state:ca/cd:12
# circuit_court
# county
# council_district
# school_district
# precinct
# parish
# council_district
# school_district
# precinct
# ward
# council_district
# school_district
# precinct
# place - uses census_geoid
# sldl # State legislature district, lower
# sldu # State legislature district, upper
# country / territory /
# municipio
# sldl # State legislature district, lower
# sldu # State legislature district, upper
| # import_export_open_civic_data/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
# https://github.com/opencivicdata/python-opencivicdata-django
# There are models for the ocd data types
# Other Open Civic Data identifiers that refer to the same division -- for example, those that refer to other
# political divisions whose boundaries are defined to be coterminous with this one.
# For example, ocd-division/country:us/state:wy will include an alias of ocd-division/country:us/state:wy/cd:1,
# since Wyoming has only one Congressional district.
#
# Division Identifiers here:
# Master CSV files with ocd-division-ids
# https://github.com/opencivicdata/ocd-division-ids/tree/master/identifiers
# https://raw.githubusercontent.com/opencivicdata/ocd-division-ids/master/identifiers/country-us.csv
# id,name,sameAs,sameAsNote,validThrough,census_geoid,census_geoid_12,census_geoid_14,openstates_district,placeholder_id,sch_dist_stateid,state_id
# ocd-division/country:us,United States,,,,,,,,,,
# ocd-division/country:us/court_of_appeals:1,United States Court of Appeals for 1st Circuit,,,,,,,,,,
# ocd-division/country:us/court_of_appeals:1/district_court:maine,United States District Court for District of Maine,,,,,,,,,,
# TODO create importer and table to cache this data
### Pulling out geographic divisions
# country / state /
# cd # congressional district, uses census_geoid ex/ ocd-division/country:us/state:ca/cd:12
# circuit_court
# county
# council_district
# school_district
# precinct
# parish
# council_district
# school_district
# precinct
# ward
# council_district
# school_district
# precinct
# place - uses census_geoid
# sldl # State legislature district, lower
# sldu # State legislature district, upper
# country / territory /
# municipio
# sldl # State legislature district, lower
# sldu # State legislature district, upper | en | 0.753975 | # import_export_open_civic_data/models.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- # https://github.com/opencivicdata/python-opencivicdata-django # There are models for the ocd data types # Other Open Civic Data identifiers that refer to the same division -- for example, those that refer to other # political divisions whose boundaries are defined to be coterminous with this one. # For example, ocd-division/country:us/state:wy will include an alias of ocd-division/country:us/state:wy/cd:1, # since Wyoming has only one Congressional district. # # Division Identifiers here: # Master CSV files with ocd-division-ids # https://github.com/opencivicdata/ocd-division-ids/tree/master/identifiers # https://raw.githubusercontent.com/opencivicdata/ocd-division-ids/master/identifiers/country-us.csv # id,name,sameAs,sameAsNote,validThrough,census_geoid,census_geoid_12,census_geoid_14,openstates_district,placeholder_id,sch_dist_stateid,state_id # ocd-division/country:us,United States,,,,,,,,,, # ocd-division/country:us/court_of_appeals:1,United States Court of Appeals for 1st Circuit,,,,,,,,,, # ocd-division/country:us/court_of_appeals:1/district_court:maine,United States District Court for District of Maine,,,,,,,,,, # TODO create importer and table to cache this data ### Pulling out geographic divisions # country / state / # cd # congressional district, uses census_geoid ex/ ocd-division/country:us/state:ca/cd:12 # circuit_court # county # council_district # school_district # precinct # parish # council_district # school_district # precinct # ward # council_district # school_district # precinct # place - uses census_geoid # sldl # State legislature district, lower # sldu # State legislature district, upper # country / territory / # municipio # sldl # State legislature district, lower # sldu # State legislature district, upper | 2.076555 | 2 |
Visualization_Script/discriminator_attention_visual(Single).py | aesrgan/A-ESRGAN | 58 | 6622893 | from basicsr.utils.registry import ARCH_REGISTRY
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
import torch
class add_attn(nn.Module):
def __init__(self, x_channels, g_channels=256):
super(add_attn, self).__init__()
self.W = nn.Sequential(
nn.Conv2d(x_channels, x_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(x_channels))
self.theta = nn.Conv2d(x_channels, x_channels, kernel_size=2, stride=2, padding=0, bias=False)
self.phi = nn.Conv2d(g_channels, x_channels, kernel_size=1, stride=1, padding=0, bias=True)
self.psi = nn.Conv2d(x_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode='bilinear', align_corners=False)
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = torch.sigmoid(self.psi(f))
sigm_psi_f = F.interpolate(sigm_psi_f, size=input_size[2:], mode='bilinear', align_corners=False)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class unetCat(nn.Module):
def __init__(self, dim_in, dim_out):
super(unetCat, self).__init__()
norm = spectral_norm
self.convU = norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1, bias=False))
def forward(self, input_1, input_2):
# Upsampling
input_2 = F.interpolate(input_2, scale_factor=2, mode='bilinear', align_corners=False)
output_2 = F.leaky_relu(self.convU(input_2), negative_slope=0.2, inplace=True)
offset = output_2.size()[2] - input_1.size()[2]
padding = 2 * [offset // 2, offset // 2]
output_1 = F.pad(input_1, padding)
y = torch.cat([output_1, output_2], 1)
return y
class UNetDiscriminatorAesrgan(nn.Module):
"""Defines a U-Net discriminator with spectral normalization (SN)"""
def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
super(UNetDiscriminatorAesrgan, self).__init__()
norm = spectral_norm
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 3, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 3, 2, 1, bias=False))
# Center
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 3, 2, 1, bias=False))
self.gating = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 1, 1, 1, bias=False))
# attention Blocks
self.attn_1 = add_attn(x_channels=num_feat * 4, g_channels=num_feat * 4)
self.attn_2 = add_attn(x_channels=num_feat * 2, g_channels=num_feat * 4)
self.attn_3 = add_attn(x_channels=num_feat, g_channels=num_feat * 4)
# Cat
self.cat_1 = unetCat(dim_in=num_feat * 8, dim_out=num_feat * 4)
self.cat_2 = unetCat(dim_in=num_feat * 4, dim_out=num_feat * 2)
self.cat_3 = unetCat(dim_in=num_feat * 2, dim_out=num_feat)
# upsample
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
# extra
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
def forward(self, x):
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
gated = F.leaky_relu(self.gating(x3), negative_slope=0.2, inplace=True)
# Attention
attn1, ly1 = self.attn_1(x2, gated)
attn2, ly2 = self.attn_2(x1, gated)
attn3, ly3 = self.attn_3(x0, gated)
return ly1, ly2, ly3
if __name__ == "__main__":
from torchsummary import summary
from PIL import Image
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', default=r"..\inputs\img_015_SRF_4_HR.png", help='image path')
parser.add_argument(
'--model_path', default=r"..\experiments\pretrained_models\single", help='single model list path')
parser.add_argument('--save_path', default=r".\Visual", help='path to save the heat map')
args = parser.parse_args()
uNet = UNetDiscriminatorAesrgan(3, num_feat=64, skip_connection=True)
import numpy as np
imgpath = args.img_path
modelpath = args.model_path
save_dir = args.save_path
import cv2
import torchvision.transforms as transforms
img = cv2.imread(imgpath)
import os
import shutil
if not os.path.exists(save_dir):
os.mkdir(save_dir)
else:
shutil.rmtree(save_dir)
os.mkdir(save_dir)
for i in range(5000, 400000, 5000):
path = modelpath + r"\net_d_" + str(i) + ".pth"
l = torch.load(path)
p = uNet.state_dict()
n = {}
n["params"] = {}
for k in l["params"]:
k1 = k.replace("layer0.", "")
k2 = k1.replace("layer1.", "")
n["params"][k2] = l["params"][k]
uNet.load_state_dict(n["params"], strict=True)
input = transforms.ToTensor()(img)
input = input.unsqueeze(0)
AttentionLayer1, AttentionLayer2, AttentionLayer3 = uNet(input)
A1 = AttentionLayer1.detach().numpy()
A1 = np.squeeze(A1)
A1 = A1 * 255
A1 = cv2.applyColorMap(np.uint8(A1), cv2.COLORMAP_JET)
save_path = save_dir + "\A1_" + str(i) + ".png"
cv2.imwrite(save_path, A1)
A2 = AttentionLayer2.detach().numpy()
A2 = np.squeeze(A2)
A2 = A2 * 255
A2 = cv2.applyColorMap(np.uint8(A2), cv2.COLORMAP_JET)
save_path = save_dir + "\A2_" + str(i) + ".png"
cv2.imwrite(save_path, A2)
A3 = AttentionLayer3.detach().numpy()
A3 = np.squeeze(A3)
A3 = A3 * 255
A3 = cv2.applyColorMap(np.uint8(A3), cv2.COLORMAP_JET)
save_path = save_dir + "\A3_" + str(i) + ".png"
cv2.imwrite(save_path, A3) | from basicsr.utils.registry import ARCH_REGISTRY
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
import torch
class add_attn(nn.Module):
def __init__(self, x_channels, g_channels=256):
super(add_attn, self).__init__()
self.W = nn.Sequential(
nn.Conv2d(x_channels, x_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(x_channels))
self.theta = nn.Conv2d(x_channels, x_channels, kernel_size=2, stride=2, padding=0, bias=False)
self.phi = nn.Conv2d(g_channels, x_channels, kernel_size=1, stride=1, padding=0, bias=True)
self.psi = nn.Conv2d(x_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode='bilinear', align_corners=False)
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = torch.sigmoid(self.psi(f))
sigm_psi_f = F.interpolate(sigm_psi_f, size=input_size[2:], mode='bilinear', align_corners=False)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class unetCat(nn.Module):
def __init__(self, dim_in, dim_out):
super(unetCat, self).__init__()
norm = spectral_norm
self.convU = norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1, bias=False))
def forward(self, input_1, input_2):
# Upsampling
input_2 = F.interpolate(input_2, scale_factor=2, mode='bilinear', align_corners=False)
output_2 = F.leaky_relu(self.convU(input_2), negative_slope=0.2, inplace=True)
offset = output_2.size()[2] - input_1.size()[2]
padding = 2 * [offset // 2, offset // 2]
output_1 = F.pad(input_1, padding)
y = torch.cat([output_1, output_2], 1)
return y
class UNetDiscriminatorAesrgan(nn.Module):
"""Defines a U-Net discriminator with spectral normalization (SN)"""
def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
super(UNetDiscriminatorAesrgan, self).__init__()
norm = spectral_norm
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 3, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 3, 2, 1, bias=False))
# Center
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 3, 2, 1, bias=False))
self.gating = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 1, 1, 1, bias=False))
# attention Blocks
self.attn_1 = add_attn(x_channels=num_feat * 4, g_channels=num_feat * 4)
self.attn_2 = add_attn(x_channels=num_feat * 2, g_channels=num_feat * 4)
self.attn_3 = add_attn(x_channels=num_feat, g_channels=num_feat * 4)
# Cat
self.cat_1 = unetCat(dim_in=num_feat * 8, dim_out=num_feat * 4)
self.cat_2 = unetCat(dim_in=num_feat * 4, dim_out=num_feat * 2)
self.cat_3 = unetCat(dim_in=num_feat * 2, dim_out=num_feat)
# upsample
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
# extra
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
def forward(self, x):
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
gated = F.leaky_relu(self.gating(x3), negative_slope=0.2, inplace=True)
# Attention
attn1, ly1 = self.attn_1(x2, gated)
attn2, ly2 = self.attn_2(x1, gated)
attn3, ly3 = self.attn_3(x0, gated)
return ly1, ly2, ly3
if __name__ == "__main__":
from torchsummary import summary
from PIL import Image
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', default=r"..\inputs\img_015_SRF_4_HR.png", help='image path')
parser.add_argument(
'--model_path', default=r"..\experiments\pretrained_models\single", help='single model list path')
parser.add_argument('--save_path', default=r".\Visual", help='path to save the heat map')
args = parser.parse_args()
uNet = UNetDiscriminatorAesrgan(3, num_feat=64, skip_connection=True)
import numpy as np
imgpath = args.img_path
modelpath = args.model_path
save_dir = args.save_path
import cv2
import torchvision.transforms as transforms
img = cv2.imread(imgpath)
import os
import shutil
if not os.path.exists(save_dir):
os.mkdir(save_dir)
else:
shutil.rmtree(save_dir)
os.mkdir(save_dir)
for i in range(5000, 400000, 5000):
path = modelpath + r"\net_d_" + str(i) + ".pth"
l = torch.load(path)
p = uNet.state_dict()
n = {}
n["params"] = {}
for k in l["params"]:
k1 = k.replace("layer0.", "")
k2 = k1.replace("layer1.", "")
n["params"][k2] = l["params"][k]
uNet.load_state_dict(n["params"], strict=True)
input = transforms.ToTensor()(img)
input = input.unsqueeze(0)
AttentionLayer1, AttentionLayer2, AttentionLayer3 = uNet(input)
A1 = AttentionLayer1.detach().numpy()
A1 = np.squeeze(A1)
A1 = A1 * 255
A1 = cv2.applyColorMap(np.uint8(A1), cv2.COLORMAP_JET)
save_path = save_dir + "\A1_" + str(i) + ".png"
cv2.imwrite(save_path, A1)
A2 = AttentionLayer2.detach().numpy()
A2 = np.squeeze(A2)
A2 = A2 * 255
A2 = cv2.applyColorMap(np.uint8(A2), cv2.COLORMAP_JET)
save_path = save_dir + "\A2_" + str(i) + ".png"
cv2.imwrite(save_path, A2)
A3 = AttentionLayer3.detach().numpy()
A3 = np.squeeze(A3)
A3 = A3 * 255
A3 = cv2.applyColorMap(np.uint8(A3), cv2.COLORMAP_JET)
save_path = save_dir + "\A3_" + str(i) + ".png"
cv2.imwrite(save_path, A3) | en | 0.733094 | # Upsampling Defines a U-Net discriminator with spectral normalization (SN) # Center # attention Blocks # Cat # upsample # extra # Attention | 2.42933 | 2 |
MSquared/SElaser.py | mmsutula/hwserver | 0 | 6622894 | <gh_stars>0
from MSquared.msquared import *
init(__name__, \
solstis_addr = None, \
emm_addr = None)
# add solstis_addr and emm_addr as ('xxx.xxx.xxx.xxx',xxxxx), specifying the IP and port et in the web interface | from MSquared.msquared import *
init(__name__, \
solstis_addr = None, \
emm_addr = None)
# add solstis_addr and emm_addr as ('xxx.xxx.xxx.xxx',xxxxx), specifying the IP and port et in the web interface | en | 0.548707 | # add solstis_addr and emm_addr as ('xxx.xxx.xxx.xxx',xxxxx), specifying the IP and port et in the web interface | 1.447101 | 1 |
plot/figure_3.py | aneeshnaik/spam | 0 | 6622895 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: 2018
Author: <NAME>
Description: Figure 3 in Naik et al., 2019.
"""
from matplotlib import rcParams
import matplotlib.pyplot as plt
import spam
import numpy as np
rcParams['text.usetex'] = True
rcParams['font.size'] = 8
green1 = '#EDF8E9'
green2 = '#BAE4B3'
green3 = '#10C476'
green4 = '#31A354'
green5 = '#006D2C'
comp = '#A33159'
def figure_3():
"""
Figure showing priors used.
Two panels: Upper shows stellar mass / halo mass relation, while lower
shows concentration / halo mass relation. 1 sigma and 2 sigma regions are
shown as coloured regions, while a dashed line also shows previous 1 sigma
region, before broadening to account for f(R).
"""
# set up figure
fig = plt.figure(figsize=(3.5, 7))
ax1 = fig.add_axes([0.15, 0.45, 0.7, 0.35])
ax2 = fig.add_axes([0.15, 0.1, 0.7, 0.35])
# halo masses (x axis in both panels)
Msun = 1.989e+30
M_h = np.logspace(9, 14)*Msun
x = np.log10(M_h/Msun)
# calculate stellar masses
y = np.log10(spam.fit.prior.SHM(M_h)/Msun)
sig_old = spam.fit.prior.err_SHM(M_h)
sig = sig_old + 0.2 # f(R) broadening
# plot regions and lines in top panel
ax1.fill_between(x, y-2*sig, y+2*sig, color=green2)
ax1.fill_between(x, y-sig, y+sig, color=green4)
ax1.plot(x, y, c='k')
ax1.plot(x, y-sig_old, ls='dashed', c='lightgrey')
ax1.plot(x, y+sig_old, ls='dashed', c='lightgrey')
# calculate concentrations
y = spam.fit.prior.CMR(M_h)
sig_old = 0.11
sig = sig_old + 0.1 # f(R) broadening
# plot regions and lines in lower panel
ax2.fill_between(x, y-2*sig, y+2*sig, color=green2)
ax2.fill_between(x, y-sig, y+sig, color=green4)
ax2.plot(x, y, c='k')
ax2.plot(x, y+sig_old, ls='dashed', c='lightgrey')
ax2.plot(x, y-sig_old, ls='dashed', c='lightgrey')
# line up x axis limits in both panels
ax2.set_xlim(ax1.get_xlim())
# axis labels
ax1.set_ylabel(r"$\log_{10} M_*\ [M_\odot]$")
ax2.set_ylabel(r"$\log_{10} c_\mathrm{vir}$")
ax2.set_xlabel(r"$\log_{10} M_\mathrm{halo}\ [M_\odot]$")
return fig
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: 2018
Author: <NAME>
Description: Figure 3 in Naik et al., 2019.
"""
from matplotlib import rcParams
import matplotlib.pyplot as plt
import spam
import numpy as np
rcParams['text.usetex'] = True
rcParams['font.size'] = 8
green1 = '#EDF8E9'
green2 = '#BAE4B3'
green3 = '#10C476'
green4 = '#31A354'
green5 = '#006D2C'
comp = '#A33159'
def figure_3():
"""
Figure showing priors used.
Two panels: Upper shows stellar mass / halo mass relation, while lower
shows concentration / halo mass relation. 1 sigma and 2 sigma regions are
shown as coloured regions, while a dashed line also shows previous 1 sigma
region, before broadening to account for f(R).
"""
# set up figure
fig = plt.figure(figsize=(3.5, 7))
ax1 = fig.add_axes([0.15, 0.45, 0.7, 0.35])
ax2 = fig.add_axes([0.15, 0.1, 0.7, 0.35])
# halo masses (x axis in both panels)
Msun = 1.989e+30
M_h = np.logspace(9, 14)*Msun
x = np.log10(M_h/Msun)
# calculate stellar masses
y = np.log10(spam.fit.prior.SHM(M_h)/Msun)
sig_old = spam.fit.prior.err_SHM(M_h)
sig = sig_old + 0.2 # f(R) broadening
# plot regions and lines in top panel
ax1.fill_between(x, y-2*sig, y+2*sig, color=green2)
ax1.fill_between(x, y-sig, y+sig, color=green4)
ax1.plot(x, y, c='k')
ax1.plot(x, y-sig_old, ls='dashed', c='lightgrey')
ax1.plot(x, y+sig_old, ls='dashed', c='lightgrey')
# calculate concentrations
y = spam.fit.prior.CMR(M_h)
sig_old = 0.11
sig = sig_old + 0.1 # f(R) broadening
# plot regions and lines in lower panel
ax2.fill_between(x, y-2*sig, y+2*sig, color=green2)
ax2.fill_between(x, y-sig, y+sig, color=green4)
ax2.plot(x, y, c='k')
ax2.plot(x, y+sig_old, ls='dashed', c='lightgrey')
ax2.plot(x, y-sig_old, ls='dashed', c='lightgrey')
# line up x axis limits in both panels
ax2.set_xlim(ax1.get_xlim())
# axis labels
ax1.set_ylabel(r"$\log_{10} M_*\ [M_\odot]$")
ax2.set_ylabel(r"$\log_{10} c_\mathrm{vir}$")
ax2.set_xlabel(r"$\log_{10} M_\mathrm{halo}\ [M_\odot]$")
return fig
| en | 0.859012 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created: 2018 Author: <NAME> Description: Figure 3 in Naik et al., 2019. Figure showing priors used. Two panels: Upper shows stellar mass / halo mass relation, while lower shows concentration / halo mass relation. 1 sigma and 2 sigma regions are shown as coloured regions, while a dashed line also shows previous 1 sigma region, before broadening to account for f(R). # set up figure # halo masses (x axis in both panels) # calculate stellar masses # f(R) broadening # plot regions and lines in top panel # calculate concentrations # f(R) broadening # plot regions and lines in lower panel # line up x axis limits in both panels # axis labels | 2.243927 | 2 |
train.py | Yonder-OSS/ICLR-CV-Transfer-Learning | 0 | 6622896 | <filename>train.py
# This script can be used to train any deep learning model on the BigEarthNet.
#
# To run the code, you need to provide a json file for configurations of the training.
#
# Original Author: <NAME>, http://www.user.tu-berlin.de/gencersumbul/ <EMAIL>
# Usage: train.py [CONFIG_FILE_PATH]
from __future__ import print_function
# TODO - allow randomness during actual training
SEED = 42
import random as rn
rn.seed(SEED)
import numpy as np
np.random.seed(SEED)
import tensorflow as tf
tf.set_random_seed(SEED)
from src.data import ZindiDataset
from src.model import VggFcnBaseModel, Resnet152FcnBaseModel
import os
import argparse
import json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def run_model(args):
with tf.Session() as sess:
# check if GPU is available
print("GPU is available: {}".format(tf.test.is_gpu_available()))
iterator = ZindiDataset(
TFRecord_paths = args['tr_tf_record_files'],
batch_size = args['batch_size'],
nb_epoch = args['nb_epoch'],
shuffle_buffer_size = args['shuffle_buffer_size'],
).batch_iterator
nb_iteration = int(np.ceil(float(args['training_size'] * args['nb_epoch']) / args['batch_size']))
iterator_ins = iterator.get_next()
# load VGG base model and restore weights
model = VggFcnBaseModel()
#model.load_pretrained(args['model_file'], sess)
# add segmentation head, only initialize tensors in its scope
model.build_segmentation_head(session=sess)
model.define_loss(freeze = args['load_frozen'])
model.define_optimizer(
sess,
args['learning_rate'],
freeze = args['load_frozen'],
exponential_decay = args['exponential_decay']
)
model.load_pretrained(args['model_file'], sess)
# prepare model for unfreezing
# model.define_loss(freeze = args['freeze'])
# model.define_optimizer(
# sess,
# args['learning_rate'],
# freeze = args['freeze'],
# exponential_decay = args['exponential_decay']
# )
# set up model saver
model_saver = tf.train.Saver(max_to_keep = 5, var_list=tf.global_variables())
#_, metric_means, metric_update_ops = get_metrics(model.multi_hot_label, model.predictions, model.probabilities)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(os.path.join(args['out_dir'], 'logs', 'training'), sess.graph)
iteration_idx = 0
progress_bar = tf.contrib.keras.utils.Progbar(target = nb_iteration, stateful_metrics = ['train_loss', 'val_loss'])
while True:
try:
batch_dict = sess.run(iterator_ins)
except tf.errors.OutOfRangeError:
break
#_, _, batch_loss, batch_summary = sess.run([train_op, metric_update_ops, model.train_loss, summary_op],
_, train_loss, val_loss, batch_summary = sess.run([model.train_op, model.train_loss, model.val_loss, summary_op],
feed_dict = model.feed_dict(batch_dict, is_training=True))
iteration_idx += 1
summary_writer.add_summary(batch_summary, iteration_idx)
if (iteration_idx % args['save_checkpoint_per_iteration'] == 0) and (iteration_idx >= args['save_checkpoint_after_iteration']):
model_saver.save(sess, os.path.join(args['out_dir'], 'models', 'iteration'), iteration_idx)
progress_bar.update(iteration_idx, values=[('train_loss', train_loss), ('val_loss', val_loss)])
model_saver.save(sess, os.path.join(args['out_dir'], 'models', 'iteration'), iteration_idx)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description= 'Training script')
parser.add_argument('configs', help= 'json config file')
parser_args = parser.parse_args()
with open(os.path.realpath(parser_args.configs), 'rb') as f:
model_args = json.load(f)
run_model(model_args)
| <filename>train.py
# This script can be used to train any deep learning model on the BigEarthNet.
#
# To run the code, you need to provide a json file for configurations of the training.
#
# Original Author: <NAME>, http://www.user.tu-berlin.de/gencersumbul/ <EMAIL>
# Usage: train.py [CONFIG_FILE_PATH]
from __future__ import print_function
# TODO - allow randomness during actual training
SEED = 42
import random as rn
rn.seed(SEED)
import numpy as np
np.random.seed(SEED)
import tensorflow as tf
tf.set_random_seed(SEED)
from src.data import ZindiDataset
from src.model import VggFcnBaseModel, Resnet152FcnBaseModel
import os
import argparse
import json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def run_model(args):
with tf.Session() as sess:
# check if GPU is available
print("GPU is available: {}".format(tf.test.is_gpu_available()))
iterator = ZindiDataset(
TFRecord_paths = args['tr_tf_record_files'],
batch_size = args['batch_size'],
nb_epoch = args['nb_epoch'],
shuffle_buffer_size = args['shuffle_buffer_size'],
).batch_iterator
nb_iteration = int(np.ceil(float(args['training_size'] * args['nb_epoch']) / args['batch_size']))
iterator_ins = iterator.get_next()
# load VGG base model and restore weights
model = VggFcnBaseModel()
#model.load_pretrained(args['model_file'], sess)
# add segmentation head, only initialize tensors in its scope
model.build_segmentation_head(session=sess)
model.define_loss(freeze = args['load_frozen'])
model.define_optimizer(
sess,
args['learning_rate'],
freeze = args['load_frozen'],
exponential_decay = args['exponential_decay']
)
model.load_pretrained(args['model_file'], sess)
# prepare model for unfreezing
# model.define_loss(freeze = args['freeze'])
# model.define_optimizer(
# sess,
# args['learning_rate'],
# freeze = args['freeze'],
# exponential_decay = args['exponential_decay']
# )
# set up model saver
model_saver = tf.train.Saver(max_to_keep = 5, var_list=tf.global_variables())
#_, metric_means, metric_update_ops = get_metrics(model.multi_hot_label, model.predictions, model.probabilities)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(os.path.join(args['out_dir'], 'logs', 'training'), sess.graph)
iteration_idx = 0
progress_bar = tf.contrib.keras.utils.Progbar(target = nb_iteration, stateful_metrics = ['train_loss', 'val_loss'])
while True:
try:
batch_dict = sess.run(iterator_ins)
except tf.errors.OutOfRangeError:
break
#_, _, batch_loss, batch_summary = sess.run([train_op, metric_update_ops, model.train_loss, summary_op],
_, train_loss, val_loss, batch_summary = sess.run([model.train_op, model.train_loss, model.val_loss, summary_op],
feed_dict = model.feed_dict(batch_dict, is_training=True))
iteration_idx += 1
summary_writer.add_summary(batch_summary, iteration_idx)
if (iteration_idx % args['save_checkpoint_per_iteration'] == 0) and (iteration_idx >= args['save_checkpoint_after_iteration']):
model_saver.save(sess, os.path.join(args['out_dir'], 'models', 'iteration'), iteration_idx)
progress_bar.update(iteration_idx, values=[('train_loss', train_loss), ('val_loss', val_loss)])
model_saver.save(sess, os.path.join(args['out_dir'], 'models', 'iteration'), iteration_idx)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description= 'Training script')
parser.add_argument('configs', help= 'json config file')
parser_args = parser.parse_args()
with open(os.path.realpath(parser_args.configs), 'rb') as f:
model_args = json.load(f)
run_model(model_args)
| en | 0.589574 | # This script can be used to train any deep learning model on the BigEarthNet. # # To run the code, you need to provide a json file for configurations of the training. # # Original Author: <NAME>, http://www.user.tu-berlin.de/gencersumbul/ <EMAIL> # Usage: train.py [CONFIG_FILE_PATH] # TODO - allow randomness during actual training # check if GPU is available # load VGG base model and restore weights #model.load_pretrained(args['model_file'], sess) # add segmentation head, only initialize tensors in its scope # prepare model for unfreezing # model.define_loss(freeze = args['freeze']) # model.define_optimizer( # sess, # args['learning_rate'], # freeze = args['freeze'], # exponential_decay = args['exponential_decay'] # ) # set up model saver #_, metric_means, metric_update_ops = get_metrics(model.multi_hot_label, model.predictions, model.probabilities) #_, _, batch_loss, batch_summary = sess.run([train_op, metric_update_ops, model.train_loss, summary_op], | 2.655203 | 3 |
build/lib/Tests/Validation/test_datadual.py | enjoyneer87/SciDataTool | 24 | 6622897 | <filename>build/lib/Tests/Validation/test_datadual.py
import pytest
from SciDataTool import (
DataTime,
Data1D,
DataLinspace,
DataFreq,
DataDual,
)
import numpy as np
from numpy.testing import assert_array_almost_equal
@pytest.mark.validation
def test_datadual():
f = 50
time = np.linspace(0, 1 / f, 10, endpoint=False)
Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=1 / f,
number=10,
include_endpoint=False,
)
field = 3 * np.cos(2 * np.pi * f * time + 3 * np.pi / 4)
Field = DataTime(
name="field",
symbol="X",
axes=[Time],
values=field,
unit="m",
)
freqs = np.array([50])
Freqs = Data1D(
name="freqs",
unit="Hz",
values=freqs,
)
field_ft = np.array([3 * np.cos(3 * np.pi / 4) * (1 - 1j)])
Field_ft = DataFreq(
name="field",
symbol="X",
axes=[Freqs],
values=field_ft,
unit="m",
)
Field_dual = DataDual(
name="field",
symbol="X",
axes_dt=[Time],
values_dt=field,
axes_df=[Freqs],
values_df=field_ft,
unit="m",
)
result = Field_dual.get_along("time")
assert_array_almost_equal(result["X"], field)
assert_array_almost_equal(result["time"], time)
result = Field_dual.get_along("freqs")
assert_array_almost_equal(result["X"], field_ft)
assert_array_almost_equal(result["freqs"], freqs)
result = Field_dual.get_magnitude_along("freqs")
assert_array_almost_equal(result["X"], np.array([3]))
Field_dual.plot_2D_Data("time")
Field_dual.plot_2D_Data("freqs")
# test to_datadual
Field_dual_1 = Field.to_datadual(datafreq=Field_ft)
Field_dual_2 = Field_ft.to_datadual(datatime=Field)
assert_array_almost_equal(Field_dual_1.values_dt, Field.values)
assert_array_almost_equal(Field_dual_1.values_df, Field_ft.values)
assert_array_almost_equal(Field_dual_2.values_df, Field_ft.values)
assert_array_almost_equal(Field_dual_2.values_dt, Field.values)
Field_dual_1.plot_2D_Data("time")
Field_dual_1.plot_2D_Data("freqs")
Field_dual_2.plot_2D_Data("time")
Field_dual_2.plot_2D_Data("freqs")
if __name__ == "__main__":
test_datadual()
| <filename>build/lib/Tests/Validation/test_datadual.py
import pytest
from SciDataTool import (
DataTime,
Data1D,
DataLinspace,
DataFreq,
DataDual,
)
import numpy as np
from numpy.testing import assert_array_almost_equal
@pytest.mark.validation
def test_datadual():
f = 50
time = np.linspace(0, 1 / f, 10, endpoint=False)
Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=1 / f,
number=10,
include_endpoint=False,
)
field = 3 * np.cos(2 * np.pi * f * time + 3 * np.pi / 4)
Field = DataTime(
name="field",
symbol="X",
axes=[Time],
values=field,
unit="m",
)
freqs = np.array([50])
Freqs = Data1D(
name="freqs",
unit="Hz",
values=freqs,
)
field_ft = np.array([3 * np.cos(3 * np.pi / 4) * (1 - 1j)])
Field_ft = DataFreq(
name="field",
symbol="X",
axes=[Freqs],
values=field_ft,
unit="m",
)
Field_dual = DataDual(
name="field",
symbol="X",
axes_dt=[Time],
values_dt=field,
axes_df=[Freqs],
values_df=field_ft,
unit="m",
)
result = Field_dual.get_along("time")
assert_array_almost_equal(result["X"], field)
assert_array_almost_equal(result["time"], time)
result = Field_dual.get_along("freqs")
assert_array_almost_equal(result["X"], field_ft)
assert_array_almost_equal(result["freqs"], freqs)
result = Field_dual.get_magnitude_along("freqs")
assert_array_almost_equal(result["X"], np.array([3]))
Field_dual.plot_2D_Data("time")
Field_dual.plot_2D_Data("freqs")
# test to_datadual
Field_dual_1 = Field.to_datadual(datafreq=Field_ft)
Field_dual_2 = Field_ft.to_datadual(datatime=Field)
assert_array_almost_equal(Field_dual_1.values_dt, Field.values)
assert_array_almost_equal(Field_dual_1.values_df, Field_ft.values)
assert_array_almost_equal(Field_dual_2.values_df, Field_ft.values)
assert_array_almost_equal(Field_dual_2.values_dt, Field.values)
Field_dual_1.plot_2D_Data("time")
Field_dual_1.plot_2D_Data("freqs")
Field_dual_2.plot_2D_Data("time")
Field_dual_2.plot_2D_Data("freqs")
if __name__ == "__main__":
test_datadual()
| pt | 0.58501 | # test to_datadual | 2.226044 | 2 |
converter/setup.py | thpts/tls-cbor-benchmark | 0 | 6622898 | <filename>converter/setup.py
from setuptools import setup
setup(
name='cbor_converter',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
packages=['cbor_converter'],
install_requires=[
'pyasn1',
'pyasn1-modules'
],
test_suite='test',
entry_points={
'console_scripts': [
'cbor_converter = cbor_converter.cli:main'
]
}
)
| <filename>converter/setup.py
from setuptools import setup
setup(
name='cbor_converter',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
packages=['cbor_converter'],
install_requires=[
'pyasn1',
'pyasn1-modules'
],
test_suite='test',
entry_points={
'console_scripts': [
'cbor_converter = cbor_converter.cli:main'
]
}
)
| none | 1 | 1.37359 | 1 | |
python/ml4ir/applications/ranking/model/scoring/pointwise_scoring.py | ducouloa/ml4ir | 70 | 6622899 | from ml4ir.base.model.scoring.scoring_base import ScoringBase
class PointwiseScoring(ScoringBase):
def get_scoring_fn(self, **kwargs):
"""
Define a pointwise ranking scoring function with specified architecture
"""
def _scoring_fn(features):
return self.architecture_op(features)
return _scoring_fn
| from ml4ir.base.model.scoring.scoring_base import ScoringBase
class PointwiseScoring(ScoringBase):
def get_scoring_fn(self, **kwargs):
"""
Define a pointwise ranking scoring function with specified architecture
"""
def _scoring_fn(features):
return self.architecture_op(features)
return _scoring_fn
| en | 0.624081 | Define a pointwise ranking scoring function with specified architecture | 2.396487 | 2 |
apps/plea/tests/test_forms.py | uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas | 3 | 6622900 | <filename>apps/plea/tests/test_forms.py
import redgreenunittest as unittest
from ..forms import CompanyFinancesForm
class TestCompanyForm(unittest.TestCase):
def test_trading_period_not_specified_other_fields_not_required(self):
form = CompanyFinancesForm({})
form.is_valid()
self.assertIn('trading_period', form.errors)
self.assertEqual(len(form.errors.items()), 1)
def test_trading_period_specified_other_fields_required(self):
form = CompanyFinancesForm({'trading_period': False})
form.is_valid()
self.assertEquals(len(form.errors.items()), 3)
self.assertIn('number_of_employees', form.errors)
self.assertIn('gross_turnover', form.errors)
self.assertIn('net_turnover', form.errors)
| <filename>apps/plea/tests/test_forms.py
import redgreenunittest as unittest
from ..forms import CompanyFinancesForm
class TestCompanyForm(unittest.TestCase):
def test_trading_period_not_specified_other_fields_not_required(self):
form = CompanyFinancesForm({})
form.is_valid()
self.assertIn('trading_period', form.errors)
self.assertEqual(len(form.errors.items()), 1)
def test_trading_period_specified_other_fields_required(self):
form = CompanyFinancesForm({'trading_period': False})
form.is_valid()
self.assertEquals(len(form.errors.items()), 3)
self.assertIn('number_of_employees', form.errors)
self.assertIn('gross_turnover', form.errors)
self.assertIn('net_turnover', form.errors)
| none | 1 | 2.810515 | 3 | |
allure-pytest/test/steps/parameters/step_default_parameters_test.py | vdsbenoit/allure-python | 1 | 6622901 | import allure
@allure.step("First step")
def step_with_parameters(arg_param, kwarg_param=None):
pass
def test_defined_default_parameter():
"""
>>> from allure_commons.utils import represent
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_defined_default_parameter',
... has_step('First step',
... has_parameter('arg_param', represent(1)),
... has_parameter('kwarg_param', represent(2)),
... )
... )
... )
"""
step_with_parameters(1, kwarg_param=2)
def test_not_defined_default_parameter():
"""
>>> from allure_commons.utils import represent
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_not_defined_default_parameter',
... has_step('First step',
... has_parameter('arg_param', represent(1)),
... has_parameter('kwarg_param', represent(None)),
... )
... )
... )
"""
step_with_parameters(1)
def test_default_parameter_in_args():
"""
>>> from allure_commons.utils import represent
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_default_parameter_in_args',
... has_step('First step',
... has_parameter('arg_param', represent(1)),
... has_parameter('kwarg_param', represent(2)),
... )
... )
... )
"""
step_with_parameters(1, 2)
| import allure
@allure.step("First step")
def step_with_parameters(arg_param, kwarg_param=None):
pass
def test_defined_default_parameter():
"""
>>> from allure_commons.utils import represent
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_defined_default_parameter',
... has_step('First step',
... has_parameter('arg_param', represent(1)),
... has_parameter('kwarg_param', represent(2)),
... )
... )
... )
"""
step_with_parameters(1, kwarg_param=2)
def test_not_defined_default_parameter():
"""
>>> from allure_commons.utils import represent
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_not_defined_default_parameter',
... has_step('First step',
... has_parameter('arg_param', represent(1)),
... has_parameter('kwarg_param', represent(None)),
... )
... )
... )
"""
step_with_parameters(1)
def test_default_parameter_in_args():
"""
>>> from allure_commons.utils import represent
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_default_parameter_in_args',
... has_step('First step',
... has_parameter('arg_param', represent(1)),
... has_parameter('kwarg_param', represent(2)),
... )
... )
... )
"""
step_with_parameters(1, 2)
| en | 0.214895 | >>> from allure_commons.utils import represent >>> allure_report = getfixture('allure_report') >>> assert_that(allure_report, ... has_test_case('test_defined_default_parameter', ... has_step('First step', ... has_parameter('arg_param', represent(1)), ... has_parameter('kwarg_param', represent(2)), ... ) ... ) ... ) >>> from allure_commons.utils import represent >>> allure_report = getfixture('allure_report') >>> assert_that(allure_report, ... has_test_case('test_not_defined_default_parameter', ... has_step('First step', ... has_parameter('arg_param', represent(1)), ... has_parameter('kwarg_param', represent(None)), ... ) ... ) ... ) >>> from allure_commons.utils import represent >>> allure_report = getfixture('allure_report') >>> assert_that(allure_report, ... has_test_case('test_default_parameter_in_args', ... has_step('First step', ... has_parameter('arg_param', represent(1)), ... has_parameter('kwarg_param', represent(2)), ... ) ... ) ... ) | 2.484377 | 2 |
api/qmrandom.py | unhaltable/txti | 0 | 6622902 | <reponame>unhaltable/txti
import random
import requests
class qmrandom(random.Random):
def __init__(self):
self.nums = []
super(qmrandom, self).__init__()
def seed(self, x=None):
pass
def random(self):
if (len(self.nums) == 0):
self._getNums()
return int(self.nums.pop(), 16) / (16 ** 8)
def getstate(self):
pass
def setstate(self):
pass
def getrandbits(self, k):
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
num = (k + 7) // 8 # bits / 8 and rounded up
r = requests.get("https://qrng.anu.edu.au/API/jsonI.php?length={}&type=uint8&".format(num),
verify=False)
n = r.json()['data']
i = 0
for l in range(len(n)):
i += n[l] << (8 * l)
return i >> (num * 8 - k)
def _getNums(self):
r = requests.get("https://qrng.anu.edu.au/API/jsonI.php?length=100&type=hex16&size=4", verify=False)
j = r.json()
self.nums = j["data"]
_inst = qmrandom()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == "__main__":
x = qmrandom()
for i in range(4):
print(x.randint(1, 16))
| import random
import requests
class qmrandom(random.Random):
def __init__(self):
self.nums = []
super(qmrandom, self).__init__()
def seed(self, x=None):
pass
def random(self):
if (len(self.nums) == 0):
self._getNums()
return int(self.nums.pop(), 16) / (16 ** 8)
def getstate(self):
pass
def setstate(self):
pass
def getrandbits(self, k):
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
num = (k + 7) // 8 # bits / 8 and rounded up
r = requests.get("https://qrng.anu.edu.au/API/jsonI.php?length={}&type=uint8&".format(num),
verify=False)
n = r.json()['data']
i = 0
for l in range(len(n)):
i += n[l] << (8 * l)
return i >> (num * 8 - k)
def _getNums(self):
r = requests.get("https://qrng.anu.edu.au/API/jsonI.php?length=100&type=hex16&size=4", verify=False)
j = r.json()
self.nums = j["data"]
_inst = qmrandom()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == "__main__":
x = qmrandom()
for i in range(4):
print(x.randint(1, 16)) | en | 0.797366 | # bits / 8 and rounded up | 3.098287 | 3 |
com/Leetcode/1344.AngleBetweenHandsofaClock.py | samkitsheth95/InterviewPrep | 0 | 6622903 | class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
am = minutes * 6
ah = ((hour * 30) + (minutes/60) * 30) % 360
if am > ah:
return min(am - ah, (360 - am) + ah)
else:
return min(ah - am, (360 - ah) + am)
def angleClockLeetcodeSol(self, hour: int, minutes: int) -> float:
one_min_angle = 6
one_hour_angle = 30
minutes_angle = one_min_angle * minutes
hour_angle = (hour % 12 + minutes / 60) * one_hour_angle
diff = abs(hour_angle - minutes_angle)
return min(diff, 360 - diff)
sol = Solution()
print(sol.angleClock(hour=12, minutes=30))
| class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
am = minutes * 6
ah = ((hour * 30) + (minutes/60) * 30) % 360
if am > ah:
return min(am - ah, (360 - am) + ah)
else:
return min(ah - am, (360 - ah) + am)
def angleClockLeetcodeSol(self, hour: int, minutes: int) -> float:
one_min_angle = 6
one_hour_angle = 30
minutes_angle = one_min_angle * minutes
hour_angle = (hour % 12 + minutes / 60) * one_hour_angle
diff = abs(hour_angle - minutes_angle)
return min(diff, 360 - diff)
sol = Solution()
print(sol.angleClock(hour=12, minutes=30))
| none | 1 | 3.668363 | 4 | |
examples/adh_Savannah/adh.py | jonghyunharrylee/PCGA | 16 | 6622904 | # Savannah
import os
import numpy as np
from shutil import copy2, rmtree
from time import time
from multiprocessing import Pool
import setup_savannah
'''
three operations
1. write inputs
2. run simul
3. read input
'''
class Model:
def __init__(self,params = None):
self.idx = 0
self.homedir = os.path.abspath('./')
self.deletedir = True
from psutil import cpu_count # physcial cpu counts
self.ncores = cpu_count(logical=False)
self.ntsim = 1
##instantiate the class that describes the forward problem geometry, boundary conditions, initial conditions
# inflow discharge and free surface elevation at the boundary
self.Q_b = 6873.5
self.z_f = 97.14
if params is not None:
if 'deletedir' in params:
self.deletedir = params['deletedir']
if 'homedir' in params:
self.homedir = params['homedir']
if 'ncores' in params:
self.ncores = params['ncores']
self.adh_version = params['adh_version']
self.adh_exec = params['adh_exec']
self.pre_adh_exec = params['pre_adh_exec']
self.adh_grid = params['adh_grid']
self.adh_rect = params['adh_rect']
self.adh_mesh = params['adh_mesh']
self.adh_bc = params['adh_bc']
if 'adh_ntsim' in params: self.ntsim = params['adh_ntsim']
# inflow discharge and free surface elevation at the boundary
# needed for writing initial condtions potentailly
if 'z_f' in params: self.z_f = params['z_f']
if 'Q_b' in params: self.Q_b = params['Q_b']
self.velocity_obs_file = params['velocity_obs_file']
self.elevation_obs_file = params['elevation_obs_file']
self.true_soln_file_h5 = None if 'true_soln_file_h5' not in params else params['true_soln_file_h5']
self.true_soln_meshbase = None if 'true_soln_meshbase' not in params else params['true_soln_meshbase']
self.sim_dir = './simul' if 'sim_dir' not in params else params['sim_dir']
def create_dir(self,idx=None):
if idx is None:
idx = self.idx
mydir = os.path.join(self.sim_dir,"simul{0:04d}".format(idx))
mydir = os.path.abspath(os.path.join(self.homedir, mydir))
if not os.path.exists(mydir):
os.makedirs(mydir)
#sim_prefix= "./sim_files/savannah_gridgen_new_nx501_ny41" #basename of adh mesh and files for simulation
#sim_prefix = './sim_files/savannah_gridgen_new_nx501_ny41.bc'
#if self.adh_version < 5.:
sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41")
#else:
# sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41")
copy2(self.adh_mesh, sim_prefix + '.3dm')
copy2(self.adh_bc, sim_prefix + '.bc')
return mydir, sim_prefix
def run_model(self,bathy,idx=0):
'''run adh
'''
sim_dir, sim_prefix = self.create_dir(idx)
#print(sim_dir)
##instantiate the inverse problem which controls the forward model simulation
forward_prob = setup_savannah.SavannahRiver(grid_file=self.adh_grid,
rect_file=self.adh_rect,
initial_free_surface_elevation=self.z_f)
# mesh_file=self.adh_mesh,
##write out the base mesh, input file, and initial condition file
forward_prob.writeMesh(sim_prefix)
forward_prob.writeBCFile(sim_prefix)
forward_prob.writeHotFile(sim_prefix)
##get the measurement locations
velocity_obs_loc = np.loadtxt(self.velocity_obs_file)
elev_obs_loc = np.loadtxt(self.elevation_obs_file)
##instantiate the inverse problem which controls the forward model simulation
prm = setup_savannah.SavannahRiverProblem(forward_prob.mesh,
forward_prob,
velocity_obs_loc,
elev_obs_loc,
ntsim=self.ntsim,
sim_prefix=sim_prefix,
debug_rigid_lid=False,
pre_adh_path=self.pre_adh_exec,
adh_path=self.adh_exec,
true_soln_file_h5=self.true_soln_file_h5,
true_soln_meshbase=self.true_soln_meshbase,
Q_b=self.Q_b,
z_f=self.z_f)
# AdH_version=self.adh_version,
t0 = 0.
x_true = prm.get_true_solution(t0)
# measurment matrix
H_meas = prm.get_measurement_matrix(t0)
x_dummy = x_true.copy()
#z_in = x_true[:prm.nn]
bathy = bathy.reshape(-1)
x_dummy[:prm.nn] = bathy
x_dummy[prm.nn:] = prm.compute_velocity(bathy, t0)
if self.deletedir:
rmtree(sim_dir, ignore_errors=True)
return H_meas.dot(x_dummy)
def run(self,bathy,par,ncores=None):
if ncores is None:
ncores = self.ncores
method_args = range(bathy.shape[1])
args_map = [(bathy[:, arg:arg + 1], arg) for arg in method_args]
if par:
pool = Pool(processes=ncores)
simul_obs = pool.map(self, args_map)
else:
simul_obs =[]
for item in args_map:
simul_obs.append(self(item))
return np.array(simul_obs).T
#pool.close()
#pool.join()
def __call__(self,args):
return self.run_model(args[0],args[1])
#return args[0](args[1], args[2])
#return self.run_model(self,bathy,idx)
#def run_in_parallel(self,args):
# return args[0].run_model(args[1], args[2])
if __name__ == '__main__':
import adh
import numpy as np
from time import time
params = {'sim_dir':'./simul',
'adh_exec':'./bin/v4/adh',
'pre_adh_exec':'./bin/v4/pre_adh',
'adh_version':4.5,
'adh_grid':'./mesh_files/grid_savannah_river_nx501_ny41',
'adh_rect':'./mesh_files/rect_savannah_river_nx501_ny41',
'adh_mesh':'./sim_files/savannah_gridgen_new_nx501_ny41.3dm',
'adh_bc':'./sim_files/savannah_gridgen_new_nx501_ny41.bc',
'velocity_obs_file':'./observation_files/observation_loc_drogue12345_50ft.dat',
'elevation_obs_file':'./observation_files/observation_loc_none.dat',
'true_soln_file_h5':'./true_files/savannah_gridgen_true_nx501_ny41_p0.h5',
'true_soln_meshbase':'./true_files/savannah_gridgen_true_nx501_ny41'
}
bathy = np.loadtxt("true.txt")
bathy = np.array(bathy).reshape(-1, 1)
par = False # parallelization false
mymodel = adh.Model(params)
print('1) single run')
#simul_obs = mymodel.run(bathy,False)
#simul_obs = mymodel.run_model(bathy)
ncores = 2
nrelzs = 2
print('2) parallel run with ncores = %d' % ncores)
par = True # parallelization false
bathyrelz = np.zeros((np.size(bathy,0),nrelzs),'d')
for i in range(nrelzs):
bathyrelz[:,i:i+1] = bathy + 0.1*np.random.randn(np.size(bathy,0),1)
simul_obs_all = mymodel.run(bathyrelz,True,ncores)
#
#simul_obs_all = pool.map(run_in_parallel, args_map)
#pool.close()
#pool.join()
#simul_obs_all = mymodel.run(bathyrelz,par,ncores = ncores)
#simul_obs = run_in_parallel(args_map[0])
#print(simul_obs_all)
# use all the physcal cores if not specify ncores
#print('3) parallel run with all the physical cores')
#simul_obs_all = mymodel.run(bathyrelz,par)
#print(simul_obs_all)
| # Savannah
import os
import numpy as np
from shutil import copy2, rmtree
from time import time
from multiprocessing import Pool
import setup_savannah
'''
three operations
1. write inputs
2. run simul
3. read input
'''
class Model:
def __init__(self,params = None):
self.idx = 0
self.homedir = os.path.abspath('./')
self.deletedir = True
from psutil import cpu_count # physcial cpu counts
self.ncores = cpu_count(logical=False)
self.ntsim = 1
##instantiate the class that describes the forward problem geometry, boundary conditions, initial conditions
# inflow discharge and free surface elevation at the boundary
self.Q_b = 6873.5
self.z_f = 97.14
if params is not None:
if 'deletedir' in params:
self.deletedir = params['deletedir']
if 'homedir' in params:
self.homedir = params['homedir']
if 'ncores' in params:
self.ncores = params['ncores']
self.adh_version = params['adh_version']
self.adh_exec = params['adh_exec']
self.pre_adh_exec = params['pre_adh_exec']
self.adh_grid = params['adh_grid']
self.adh_rect = params['adh_rect']
self.adh_mesh = params['adh_mesh']
self.adh_bc = params['adh_bc']
if 'adh_ntsim' in params: self.ntsim = params['adh_ntsim']
# inflow discharge and free surface elevation at the boundary
# needed for writing initial condtions potentailly
if 'z_f' in params: self.z_f = params['z_f']
if 'Q_b' in params: self.Q_b = params['Q_b']
self.velocity_obs_file = params['velocity_obs_file']
self.elevation_obs_file = params['elevation_obs_file']
self.true_soln_file_h5 = None if 'true_soln_file_h5' not in params else params['true_soln_file_h5']
self.true_soln_meshbase = None if 'true_soln_meshbase' not in params else params['true_soln_meshbase']
self.sim_dir = './simul' if 'sim_dir' not in params else params['sim_dir']
def create_dir(self,idx=None):
if idx is None:
idx = self.idx
mydir = os.path.join(self.sim_dir,"simul{0:04d}".format(idx))
mydir = os.path.abspath(os.path.join(self.homedir, mydir))
if not os.path.exists(mydir):
os.makedirs(mydir)
#sim_prefix= "./sim_files/savannah_gridgen_new_nx501_ny41" #basename of adh mesh and files for simulation
#sim_prefix = './sim_files/savannah_gridgen_new_nx501_ny41.bc'
#if self.adh_version < 5.:
sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41")
#else:
# sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41")
copy2(self.adh_mesh, sim_prefix + '.3dm')
copy2(self.adh_bc, sim_prefix + '.bc')
return mydir, sim_prefix
def run_model(self,bathy,idx=0):
'''run adh
'''
sim_dir, sim_prefix = self.create_dir(idx)
#print(sim_dir)
##instantiate the inverse problem which controls the forward model simulation
forward_prob = setup_savannah.SavannahRiver(grid_file=self.adh_grid,
rect_file=self.adh_rect,
initial_free_surface_elevation=self.z_f)
# mesh_file=self.adh_mesh,
##write out the base mesh, input file, and initial condition file
forward_prob.writeMesh(sim_prefix)
forward_prob.writeBCFile(sim_prefix)
forward_prob.writeHotFile(sim_prefix)
##get the measurement locations
velocity_obs_loc = np.loadtxt(self.velocity_obs_file)
elev_obs_loc = np.loadtxt(self.elevation_obs_file)
##instantiate the inverse problem which controls the forward model simulation
prm = setup_savannah.SavannahRiverProblem(forward_prob.mesh,
forward_prob,
velocity_obs_loc,
elev_obs_loc,
ntsim=self.ntsim,
sim_prefix=sim_prefix,
debug_rigid_lid=False,
pre_adh_path=self.pre_adh_exec,
adh_path=self.adh_exec,
true_soln_file_h5=self.true_soln_file_h5,
true_soln_meshbase=self.true_soln_meshbase,
Q_b=self.Q_b,
z_f=self.z_f)
# AdH_version=self.adh_version,
t0 = 0.
x_true = prm.get_true_solution(t0)
# measurment matrix
H_meas = prm.get_measurement_matrix(t0)
x_dummy = x_true.copy()
#z_in = x_true[:prm.nn]
bathy = bathy.reshape(-1)
x_dummy[:prm.nn] = bathy
x_dummy[prm.nn:] = prm.compute_velocity(bathy, t0)
if self.deletedir:
rmtree(sim_dir, ignore_errors=True)
return H_meas.dot(x_dummy)
def run(self,bathy,par,ncores=None):
if ncores is None:
ncores = self.ncores
method_args = range(bathy.shape[1])
args_map = [(bathy[:, arg:arg + 1], arg) for arg in method_args]
if par:
pool = Pool(processes=ncores)
simul_obs = pool.map(self, args_map)
else:
simul_obs =[]
for item in args_map:
simul_obs.append(self(item))
return np.array(simul_obs).T
#pool.close()
#pool.join()
def __call__(self,args):
return self.run_model(args[0],args[1])
#return args[0](args[1], args[2])
#return self.run_model(self,bathy,idx)
#def run_in_parallel(self,args):
# return args[0].run_model(args[1], args[2])
if __name__ == '__main__':
import adh
import numpy as np
from time import time
params = {'sim_dir':'./simul',
'adh_exec':'./bin/v4/adh',
'pre_adh_exec':'./bin/v4/pre_adh',
'adh_version':4.5,
'adh_grid':'./mesh_files/grid_savannah_river_nx501_ny41',
'adh_rect':'./mesh_files/rect_savannah_river_nx501_ny41',
'adh_mesh':'./sim_files/savannah_gridgen_new_nx501_ny41.3dm',
'adh_bc':'./sim_files/savannah_gridgen_new_nx501_ny41.bc',
'velocity_obs_file':'./observation_files/observation_loc_drogue12345_50ft.dat',
'elevation_obs_file':'./observation_files/observation_loc_none.dat',
'true_soln_file_h5':'./true_files/savannah_gridgen_true_nx501_ny41_p0.h5',
'true_soln_meshbase':'./true_files/savannah_gridgen_true_nx501_ny41'
}
bathy = np.loadtxt("true.txt")
bathy = np.array(bathy).reshape(-1, 1)
par = False # parallelization false
mymodel = adh.Model(params)
print('1) single run')
#simul_obs = mymodel.run(bathy,False)
#simul_obs = mymodel.run_model(bathy)
ncores = 2
nrelzs = 2
print('2) parallel run with ncores = %d' % ncores)
par = True # parallelization false
bathyrelz = np.zeros((np.size(bathy,0),nrelzs),'d')
for i in range(nrelzs):
bathyrelz[:,i:i+1] = bathy + 0.1*np.random.randn(np.size(bathy,0),1)
simul_obs_all = mymodel.run(bathyrelz,True,ncores)
#
#simul_obs_all = pool.map(run_in_parallel, args_map)
#pool.close()
#pool.join()
#simul_obs_all = mymodel.run(bathyrelz,par,ncores = ncores)
#simul_obs = run_in_parallel(args_map[0])
#print(simul_obs_all)
# use all the physcal cores if not specify ncores
#print('3) parallel run with all the physical cores')
#simul_obs_all = mymodel.run(bathyrelz,par)
#print(simul_obs_all)
| en | 0.508431 | # Savannah three operations
1. write inputs
2. run simul
3. read input # physcial cpu counts ##instantiate the class that describes the forward problem geometry, boundary conditions, initial conditions # inflow discharge and free surface elevation at the boundary # inflow discharge and free surface elevation at the boundary # needed for writing initial condtions potentailly #sim_prefix= "./sim_files/savannah_gridgen_new_nx501_ny41" #basename of adh mesh and files for simulation #sim_prefix = './sim_files/savannah_gridgen_new_nx501_ny41.bc' #if self.adh_version < 5.: #else: # sim_prefix = os.path.abspath(mydir + "/savannah_gridgen_new_nx501_ny41") run adh #print(sim_dir) ##instantiate the inverse problem which controls the forward model simulation # mesh_file=self.adh_mesh, ##write out the base mesh, input file, and initial condition file ##get the measurement locations ##instantiate the inverse problem which controls the forward model simulation # AdH_version=self.adh_version, # measurment matrix #z_in = x_true[:prm.nn] #pool.close() #pool.join() #return args[0](args[1], args[2]) #return self.run_model(self,bathy,idx) #def run_in_parallel(self,args): # return args[0].run_model(args[1], args[2]) # parallelization false #simul_obs = mymodel.run(bathy,False) #simul_obs = mymodel.run_model(bathy) # parallelization false # #simul_obs_all = pool.map(run_in_parallel, args_map) #pool.close() #pool.join() #simul_obs_all = mymodel.run(bathyrelz,par,ncores = ncores) #simul_obs = run_in_parallel(args_map[0]) #print(simul_obs_all) # use all the physcal cores if not specify ncores #print('3) parallel run with all the physical cores') #simul_obs_all = mymodel.run(bathyrelz,par) #print(simul_obs_all) | 2.382494 | 2 |
fast_tmp/utils/pydantic/__init__.py | Chise1/fastapi-cli | 5 | 6622905 | from fast_tmp.utils.pydantic.creator import pydantic_model_creator # noqa
from fast_tmp.utils.pydantic.creator import pydantic_queryset_creator # type: ignore
| from fast_tmp.utils.pydantic.creator import pydantic_model_creator # noqa
from fast_tmp.utils.pydantic.creator import pydantic_queryset_creator # type: ignore
| it | 0.280569 | # noqa # type: ignore | 1.121656 | 1 |
webdriver_manager/archive.py | mlouielu/webdriver_manager | 4 | 6622906 | import tarfile
import zipfile
class Archive(object):
def __init__(self, path: str):
self.file_path = path
def unpack(self, directory):
if self.file_path.endswith(".zip"):
return self.__extract_zip(directory)
elif self.file_path.endswith(".tar.gz"):
return self.__extract_tar_file(directory)
def __extract_zip(self, to_directory):
archive = zipfile.ZipFile(self.file_path)
try:
archive.extractall(to_directory)
except Exception as e:
if e.args[0] not in [26, 13] and e.args[1] not in ['Text file busy', 'Permission denied']:
raise e
return archive.namelist()
def __extract_tar_file(self, to_directory):
try:
tar = tarfile.open(self.file_path, mode="r:gz")
except tarfile.ReadError:
tar = tarfile.open(self.file_path, mode="r:bz2")
members = tar.getmembers()
tar.extractall(to_directory)
tar.close()
return [x.name for x in members]
| import tarfile
import zipfile
class Archive(object):
def __init__(self, path: str):
self.file_path = path
def unpack(self, directory):
if self.file_path.endswith(".zip"):
return self.__extract_zip(directory)
elif self.file_path.endswith(".tar.gz"):
return self.__extract_tar_file(directory)
def __extract_zip(self, to_directory):
archive = zipfile.ZipFile(self.file_path)
try:
archive.extractall(to_directory)
except Exception as e:
if e.args[0] not in [26, 13] and e.args[1] not in ['Text file busy', 'Permission denied']:
raise e
return archive.namelist()
def __extract_tar_file(self, to_directory):
try:
tar = tarfile.open(self.file_path, mode="r:gz")
except tarfile.ReadError:
tar = tarfile.open(self.file_path, mode="r:bz2")
members = tar.getmembers()
tar.extractall(to_directory)
tar.close()
return [x.name for x in members]
| none | 1 | 3.307179 | 3 | |
retinanet/predict_script.py | pk00095/retinanet | 0 | 6622907 | <gh_stars>0
"""Summary
"""
from tensorflow import keras
from tensorflow.keras.applications.resnet import preprocess_input as resnet_normalize_image
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.load_default()
interpolation_options = {
'nearest':cv2.INTER_NEAREST,
'linear':cv2.INTER_LINEAR,
'cubic':cv2.INTER_CUBIC,
'area':cv2.INTER_AREA,
'lanczos4':cv2.INTER_LANCZOS4
}
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def load_model(checkpoint_path):
"""Summary
Args:
checkpoint_path (string): path to prediction model checkpoint
Returns:
tf.keras.model.Model: The prediction model
"""
pred_model = keras.models.load_model(
filepath=checkpoint_path,
compile=False)
print('Weights are Loaded ...')
return pred_model
def pad_resize(image, height, width, scale):
"""Summary
Args:
image (TYPE): Description
height (TYPE): Description
width (TYPE): Description
scale (TYPE): Description
Returns:
numpy nd.array: Description
"""
# pad image
padded_image = np.zeros(shape=(height, width,3), dtype=image.dtype)
h,w,_ = image.shape
padded_image[:h,:w,:] = image
# resize image
resized_image = cv2.resize(padded_image, None, fx=scale, fy=scale).astype(keras.backend.floatx())
return resized_image
def predict(model, image_path, min_side=800, max_side=1333):
"""takes an image, passes through model and returns bboxes, scores, labels
Args:
model (tf.keras.model.Model): the prediction model object
image_path (str): path to image to run prediction on
min_side (int, optional): minimum dimension, defaults to 800
max_side (int, optional): maximum dimension, defaults to 1333
Returns:
TYPE: bboxes(x1,y1,x2,y2), confidence, labels
"""
images = list()
h_max, w_max = 0,0
image_list = [image_path]
for img_path in image_list:
im = np.array(keras.preprocessing.image.load_img(path=img_path))
h,w, _ = im.shape
h_max = max(h_max, h)
w_max = max(w_max, w)
images.append(im)
smallest_side = min(h_max, w_max)
scale = min_side / smallest_side
largest_side = max(h_max, w_max)
# scale = tf.cond(largest_side * tf.cast(scale, tf.int32) > 1333, lambda: 1333 / largest_side, , lambda: scale)
if largest_side * scale > max_side:
scale = max_side / largest_side
images_batch = list(map(lambda x:pad_resize(x, h_max, w_max, scale), images))
images_batch = resnet_normalize_image(np.array(images_batch))
bbox, confidence, label = model.predict(images_batch)
return bbox[0].astype(int)/scale, confidence[0], label[0], im
def annotate_image(image_path, bboxes, scores, labels, threshold=0.5, label_dict=None):
"""Summary
Args:
image_path (str): path to image to annotate
bboxes (TYPE): Description
scores (TYPE): Description
labels (TYPE): Description
threshold (float, optional): Description
label_dict (None, optional): Description
Returns:
TYPE: Description
"""
image = Image.open(image_path)
Imagedraw = ImageDraw.Draw(image)
for box, label, score in zip(bboxes, labels, scores):
if score < threshold:
continue
(left,top,right,bottom) = box
label_to_display = label
if isinstance(label_dict, dict):
label_to_display = label_dict[label]
caption = "{}|{:.3f}".format(label_to_display, score)
#draw_caption(draw, b, caption)
colortofill = STANDARD_COLORS[label]
Imagedraw.rectangle([left,top,right,bottom], fill=None, outline=colortofill)
display_str_heights = font.getsize(caption)[1]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * display_str_heights
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
text_width, text_height = font.getsize(caption)
margin = np.ceil(0.05 * text_height)
Imagedraw.rectangle([(left, text_bottom-text_height-2*margin), (left+text_width,text_bottom)], fill=colortofill)
Imagedraw.text((left+margin, text_bottom-text_height-margin),caption,fill='black',font=font)
return image
if __name__ == '__main__':
pred_model = load_model('./checkpoints/prediction')
bbox, confidence, label, im = predict(pred_model, './aerial-vehicles-dataset/images/DJI_0005-0041.jpg')
# import pdb; pdb.set_trace()
# print(bbox.shape, bbox.shape, confidence.shape, label.shape, im.shape)
annotated_image = annotate_image(
image_array=im,
bboxes=bbox,
scores=confidence,
labels=label,
threshold=0.5,
label_dict=None)
annotated_image.save('annotated.jpg')
| """Summary
"""
from tensorflow import keras
from tensorflow.keras.applications.resnet import preprocess_input as resnet_normalize_image
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.load_default()
interpolation_options = {
'nearest':cv2.INTER_NEAREST,
'linear':cv2.INTER_LINEAR,
'cubic':cv2.INTER_CUBIC,
'area':cv2.INTER_AREA,
'lanczos4':cv2.INTER_LANCZOS4
}
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def load_model(checkpoint_path):
"""Summary
Args:
checkpoint_path (string): path to prediction model checkpoint
Returns:
tf.keras.model.Model: The prediction model
"""
pred_model = keras.models.load_model(
filepath=checkpoint_path,
compile=False)
print('Weights are Loaded ...')
return pred_model
def pad_resize(image, height, width, scale):
"""Summary
Args:
image (TYPE): Description
height (TYPE): Description
width (TYPE): Description
scale (TYPE): Description
Returns:
numpy nd.array: Description
"""
# pad image
padded_image = np.zeros(shape=(height, width,3), dtype=image.dtype)
h,w,_ = image.shape
padded_image[:h,:w,:] = image
# resize image
resized_image = cv2.resize(padded_image, None, fx=scale, fy=scale).astype(keras.backend.floatx())
return resized_image
def predict(model, image_path, min_side=800, max_side=1333):
"""takes an image, passes through model and returns bboxes, scores, labels
Args:
model (tf.keras.model.Model): the prediction model object
image_path (str): path to image to run prediction on
min_side (int, optional): minimum dimension, defaults to 800
max_side (int, optional): maximum dimension, defaults to 1333
Returns:
TYPE: bboxes(x1,y1,x2,y2), confidence, labels
"""
images = list()
h_max, w_max = 0,0
image_list = [image_path]
for img_path in image_list:
im = np.array(keras.preprocessing.image.load_img(path=img_path))
h,w, _ = im.shape
h_max = max(h_max, h)
w_max = max(w_max, w)
images.append(im)
smallest_side = min(h_max, w_max)
scale = min_side / smallest_side
largest_side = max(h_max, w_max)
# scale = tf.cond(largest_side * tf.cast(scale, tf.int32) > 1333, lambda: 1333 / largest_side, , lambda: scale)
if largest_side * scale > max_side:
scale = max_side / largest_side
images_batch = list(map(lambda x:pad_resize(x, h_max, w_max, scale), images))
images_batch = resnet_normalize_image(np.array(images_batch))
bbox, confidence, label = model.predict(images_batch)
return bbox[0].astype(int)/scale, confidence[0], label[0], im
def annotate_image(image_path, bboxes, scores, labels, threshold=0.5, label_dict=None):
"""Summary
Args:
image_path (str): path to image to annotate
bboxes (TYPE): Description
scores (TYPE): Description
labels (TYPE): Description
threshold (float, optional): Description
label_dict (None, optional): Description
Returns:
TYPE: Description
"""
image = Image.open(image_path)
Imagedraw = ImageDraw.Draw(image)
for box, label, score in zip(bboxes, labels, scores):
if score < threshold:
continue
(left,top,right,bottom) = box
label_to_display = label
if isinstance(label_dict, dict):
label_to_display = label_dict[label]
caption = "{}|{:.3f}".format(label_to_display, score)
#draw_caption(draw, b, caption)
colortofill = STANDARD_COLORS[label]
Imagedraw.rectangle([left,top,right,bottom], fill=None, outline=colortofill)
display_str_heights = font.getsize(caption)[1]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * display_str_heights
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
text_width, text_height = font.getsize(caption)
margin = np.ceil(0.05 * text_height)
Imagedraw.rectangle([(left, text_bottom-text_height-2*margin), (left+text_width,text_bottom)], fill=colortofill)
Imagedraw.text((left+margin, text_bottom-text_height-margin),caption,fill='black',font=font)
return image
if __name__ == '__main__':
pred_model = load_model('./checkpoints/prediction')
bbox, confidence, label, im = predict(pred_model, './aerial-vehicles-dataset/images/DJI_0005-0041.jpg')
# import pdb; pdb.set_trace()
# print(bbox.shape, bbox.shape, confidence.shape, label.shape, im.shape)
annotated_image = annotate_image(
image_array=im,
bboxes=bbox,
scores=confidence,
labels=label,
threshold=0.5,
label_dict=None)
annotated_image.save('annotated.jpg') | en | 0.546029 | Summary Summary Args: checkpoint_path (string): path to prediction model checkpoint Returns: tf.keras.model.Model: The prediction model Summary Args: image (TYPE): Description height (TYPE): Description width (TYPE): Description scale (TYPE): Description Returns: numpy nd.array: Description # pad image # resize image takes an image, passes through model and returns bboxes, scores, labels Args: model (tf.keras.model.Model): the prediction model object image_path (str): path to image to run prediction on min_side (int, optional): minimum dimension, defaults to 800 max_side (int, optional): maximum dimension, defaults to 1333 Returns: TYPE: bboxes(x1,y1,x2,y2), confidence, labels # scale = tf.cond(largest_side * tf.cast(scale, tf.int32) > 1333, lambda: 1333 / largest_side, , lambda: scale) Summary Args: image_path (str): path to image to annotate bboxes (TYPE): Description scores (TYPE): Description labels (TYPE): Description threshold (float, optional): Description label_dict (None, optional): Description Returns: TYPE: Description #draw_caption(draw, b, caption) # Each display_str has a top and bottom margin of 0.05x. # import pdb; pdb.set_trace() # print(bbox.shape, bbox.shape, confidence.shape, label.shape, im.shape) | 2.130264 | 2 |
yt_ddl.py | Wecros/youtube_dash_dl | 0 | 6622908 | from requests import get, Session # requests
import sys #
from bs4 import BeautifulSoup # beautifulsoup4 / lxml
import urllib.parse #
import os #
from mpegdash.parser import MPEGDASHParser # mpegdash
from datetime import datetime, timezone
from tqdm import tqdm # tqdm
import platform
import shutil
import re
import argparse
import asyncio
import aiohttp
import random
import subprocess
import multiprocessing
async def fetch(session, url, i, folder, pbar, sem):
async with sem, session.get(url) as response:
resp = await response.read()
with open(os.path.join(folder, f"{str(i)}.ts"), "wb") as f:
f.write(resp)
pbar.update()
async def get_segments(total_segments, video_base, audio_base, tempdir):
pbar = tqdm(total=2*len(total_segments), desc="Downloading segments")
async with aiohttp.ClientSession() as session:
sem = asyncio.Semaphore(12)
tasks = []
for i in total_segments:
tasks.append(asyncio.create_task(fetch(session, f"{video_base}/{i}", i, os.path.join(tempdir, "temp-video"), pbar, sem)))
tasks.append(asyncio.create_task(fetch(session, f"{audio_base}/{i}", i, os.path.join(tempdir, "temp-audio"), pbar, sem)))
await asyncio.wait(tasks)
pbar.close()
def process_segments(params):
ffmpeg_executable = params[0]
tempdir = params[1]
i = params[2]
cmd_avseg = [
ffmpeg_executable,
"-y",
"-i",
f"{os.path.join(tempdir, 'temp-video', f'{i}.ts')}",
"-i",
f"{os.path.join(tempdir, 'temp-audio', f'{i}.ts')}",
"-c",
"copy",
f"{os.path.join(tempdir, 'avseg')}/{i}.ts"
]
proc = subprocess.Popen(cmd_avseg, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
proc.communicate()
def get_mpd_data(video_url):
with Session() as s:
raw_page = s.get(video_url)
soup = BeautifulSoup(raw_page.text, 'lxml')
script_obj = str(soup(id="player-wrap")[0].find_all("script")[1]).split("\\\"")
for i in range(len(script_obj)):
if script_obj[i] == "dashManifestUrl":
mpd_url = script_obj[i+2]
break
mpd_url = urllib.parse.unquote(mpd_url).replace("\/", "/")
mpd_file_content = s.get(mpd_url)
return mpd_file_content.text
def get_best_representation(mpd_data):
best_video = None
best_video_res = 0
best_audio = None
best_audio_sample = 0
mpd = MPEGDASHParser.parse(mpd_data)
for period in mpd.periods:
for adaptationset in period.adaptation_sets:
for rep in adaptationset.representations:
if rep.height == None:
if int(rep.audio_sampling_rate) >= best_audio_sample:
best_audio = rep
best_audio_sample = int(rep.audio_sampling_rate)
else:
if int(rep.height) >= best_video_res:
best_video = rep
best_video_res = int(rep.height)
return best_video, best_audio
def parse_datetime(s):
def try_strptime(s, fmt):
try:
return datetime.strptime(s, fmt)
except ValueError:
return None
dt = try_strptime(s, '%H:%M')
if dt:
today = datetime.today()
return dt.replace(year=today.year, month=today.month, day=today.day)
dt = try_strptime(s, '%H:%M:%S')
if dt:
today = datetime.today()
return dt.replace(year=today.year, month=today.month, day=today.day)
dt = try_strptime(s, '%d.%m %H:%M')
if dt:
return dt.replace(year=datetime.today().year)
dt = try_strptime(s, '%d.%m.%Y %H:%M')
if dt:
return dt
dt = try_strptime(s, '%d.%m %H:%M:%S')
if dt:
return dt.replace(year=datetime.today().year)
dt = try_strptime(s, '%d.%m.%Y %H:%M:%S')
if dt:
return dt
dt = try_strptime(s, '%Y-%m-%dT%H:%M:%S')
if dt:
return dt
return None
def parse_duration(s):
m = re.match(r'^((?P<h>\d+)h)?((?P<m>\d+)m)?((?P<s>\d+)s)?$', s)
if not m:
return None
h, m, s = m.groupdict().values()
if not h and not m and not s:
return None
secs = 0
if h:
secs += int(h) * 3600
if m:
secs += int(m) * 60
if s:
secs += int(s)
return secs
def main(ffmpeg_executable):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', metavar='OUTPUT_FILE', action='store', help='The output filename')
parser.add_argument('-s', '--start', metavar='START_TIME', action='store', help='The start time (possible formats = "12:34", "12:34:56", "7.8.2009 12:34:56", "2009-08-07T12:34:56")')
parser.add_argument('-e', '--end', metavar='END_TIME', action='store', help='The end time (same format as start time)')
parser.add_argument('-d', '--duration', action='store', help='The duration (possible formats = "12h34m56s", "12m34s", "123s", "123m", "123h", ...)')
parser.add_argument('-u', '--utc', action='store_true', help='Use UTC instead of local time for start and end time', default=False)
parser.add_argument('-y', '--overwrite', action='store_true', help='Overwrite file without asking', default=False)
parser.add_argument('url', metavar='URL', action='store', help='The URL of the YouTube stream')
args = parser.parse_args()
url = args.url
output_path = args.output
start_time = None
duration_secs = None
def arg_fail(message):
print(message, file=sys.stderr)
parser.print_help()
sys.exit(1)
if args.start:
start_time = parse_datetime(args.start)
if not start_time:
arg_fail('Invalid start time format')
start_time = start_time.replace(tzinfo=timezone.utc if args.utc else None).astimezone()
if args.duration and args.end:
arg_fail('Specify end time or duration, not both')
if args.duration:
duration_secs = parse_duration(args.duration)
if not duration_secs:
arg_fail('Invalid duration format')
if duration_secs == 0:
arg_fail('Duration cannot be 0')
if args.end:
end_time = parse_datetime(args.end)
if not end_time:
arg_fail('Invalid end time format!')
end_time = end_time.replace(tzinfo=timezone.utc if args.utc else None).astimezone()
duration_secs = (end_time - start_time).total_seconds()
if duration_secs == 0:
arg_fail('Duration cannot be 0')
data = get_mpd_data(url)
video, audio = get_best_representation(data)
video_base = video.base_urls[0].base_url_value + "/".join(video.segment_lists[0].segment_urls[0].media.split('/')[:-3])
audio_base = audio.base_urls[0].base_url_value + "/".join(audio.segment_lists[0].segment_urls[0].media.split('/')[:-3])
max_seg = int(video.segment_lists[0].segment_urls[-1].media.split('/')[-3])
if not output_path:
print(f"You can go back {int(max_seg*2/60/60)} hours and {int(max_seg*2/60%60)} minutes back...")
exit(0)
if os.path.exists(output_path):
if args.overwrite:
os.remove(output_path)
else:
while True:
print(f'File "{output_path}" already exists! Overwrite? [y/N] ', end='')
yn = input().lower()
if yn == '' or yn == 'n':
sys.exit(0)
else:
os.remove(output_path)
break
if start_time:
req_time = datetime.strptime(data.split("yt:mpdRequestTime=\"")[-1].split("\"")[0], "%Y-%m-%dT%H:%M:%S.%f").replace(tzinfo=timezone.utc).astimezone()
segments_back = round((req_time - start_time).total_seconds() / 2)
segments_back = segments_back if segments_back < max_seg else max_seg
dur_segments = round(duration_secs / 2)
start_segment = max_seg - segments_back
end_segment = start_segment + dur_segments
total_segments = range(start_segment, end_segment)
else:
total_segments = range(max_seg)
# make a temporary directory in the output file's directory
tempdir_parent = os.path.dirname(os.path.abspath(os.path.realpath(output_path)))
tempdir = ".temp-" + str(random.randint(1000,9999))
while os.path.exists(tempdir):
tempdir = ".temp-" + str(random.randint(1000,9999))
tempdir = os.path.join(tempdir_parent, tempdir)
os.mkdir(tempdir)
os.mkdir(os.path.join(tempdir, "temp-video"))
os.mkdir(os.path.join(tempdir, "temp-audio"))
# get video and audio segments asynchronously
asyncio.get_event_loop().run_until_complete(get_segments(total_segments, video_base, audio_base, tempdir))
# merge video and audio segments each into its file
os.mkdir(os.path.join(tempdir, "avseg"))
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
params = ((ffmpeg_executable, tempdir, i) for i in total_segments)
list(tqdm(pool.imap_unordered(process_segments, params), total=len(total_segments), desc="Merging segments"))
pool.close()
pool.join()
with open(os.path.join(tempdir, "avseg.txt"), "w+") as avseg:
for i in total_segments:
avseg.write(f"file '{os.path.join(tempdir, 'avseg', f'{i}.ts')}'\n")
shutil.rmtree(os.path.join(tempdir, "temp-video"), ignore_errors=True)
shutil.rmtree(os.path.join(tempdir, "temp-audio"), ignore_errors=True)
size_avseg_total = sum(os.path.getsize(os.path.join(tempdir, "avseg", f)) for f in os.listdir(os.path.join(tempdir, "avseg")) if os.path.isfile(os.path.join(tempdir, "avseg", f)))//1024
cmd_final = [
ffmpeg_executable,
"-y",
"-f",
"concat",
"-safe",
"0",
"-i",
f"{os.path.join(tempdir, 'avseg.txt')}",
"-c",
"copy",
output_path
]
process_final = subprocess.Popen(cmd_final, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
pbar_final = tqdm(total=size_avseg_total, desc="Merging final")
while True:
line_avseg = process_final.stdout.readline()
if not line_avseg: break
match_avseg = re.match(r".*size= *(\d+)", line_avseg)
if match_avseg is not None:
size_avseg = int(match_avseg[1])
pbar_final.update(size_avseg - pbar_final.n)
pbar_final.update(size_avseg_total - pbar_final.n)
pbar_final.close()
shutil.rmtree(tempdir, ignore_errors=True)
if __name__ == "__main__":
plt = platform.system()
if plt == "Windows":
if not (os.path.exists("./bin/ffmpeg.exe") or shutil.which("ffmpeg")):
print("Run 'python download.py' first!")
exit(1)
elif os.path.exists("./bin/ffmpeg.exe"):
main(".\\bin\\ffmpeg.exe")
else:
main("ffmpeg")
elif plt == "Linux" or plt == "Darwin":
if not shutil.which("ffmpeg"):
print("Install ffmpeg to path!")
exit(1)
else:
main("ffmpeg")
| from requests import get, Session # requests
import sys #
from bs4 import BeautifulSoup # beautifulsoup4 / lxml
import urllib.parse #
import os #
from mpegdash.parser import MPEGDASHParser # mpegdash
from datetime import datetime, timezone
from tqdm import tqdm # tqdm
import platform
import shutil
import re
import argparse
import asyncio
import aiohttp
import random
import subprocess
import multiprocessing
async def fetch(session, url, i, folder, pbar, sem):
async with sem, session.get(url) as response:
resp = await response.read()
with open(os.path.join(folder, f"{str(i)}.ts"), "wb") as f:
f.write(resp)
pbar.update()
async def get_segments(total_segments, video_base, audio_base, tempdir):
pbar = tqdm(total=2*len(total_segments), desc="Downloading segments")
async with aiohttp.ClientSession() as session:
sem = asyncio.Semaphore(12)
tasks = []
for i in total_segments:
tasks.append(asyncio.create_task(fetch(session, f"{video_base}/{i}", i, os.path.join(tempdir, "temp-video"), pbar, sem)))
tasks.append(asyncio.create_task(fetch(session, f"{audio_base}/{i}", i, os.path.join(tempdir, "temp-audio"), pbar, sem)))
await asyncio.wait(tasks)
pbar.close()
def process_segments(params):
ffmpeg_executable = params[0]
tempdir = params[1]
i = params[2]
cmd_avseg = [
ffmpeg_executable,
"-y",
"-i",
f"{os.path.join(tempdir, 'temp-video', f'{i}.ts')}",
"-i",
f"{os.path.join(tempdir, 'temp-audio', f'{i}.ts')}",
"-c",
"copy",
f"{os.path.join(tempdir, 'avseg')}/{i}.ts"
]
proc = subprocess.Popen(cmd_avseg, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
proc.communicate()
def get_mpd_data(video_url):
with Session() as s:
raw_page = s.get(video_url)
soup = BeautifulSoup(raw_page.text, 'lxml')
script_obj = str(soup(id="player-wrap")[0].find_all("script")[1]).split("\\\"")
for i in range(len(script_obj)):
if script_obj[i] == "dashManifestUrl":
mpd_url = script_obj[i+2]
break
mpd_url = urllib.parse.unquote(mpd_url).replace("\/", "/")
mpd_file_content = s.get(mpd_url)
return mpd_file_content.text
def get_best_representation(mpd_data):
best_video = None
best_video_res = 0
best_audio = None
best_audio_sample = 0
mpd = MPEGDASHParser.parse(mpd_data)
for period in mpd.periods:
for adaptationset in period.adaptation_sets:
for rep in adaptationset.representations:
if rep.height == None:
if int(rep.audio_sampling_rate) >= best_audio_sample:
best_audio = rep
best_audio_sample = int(rep.audio_sampling_rate)
else:
if int(rep.height) >= best_video_res:
best_video = rep
best_video_res = int(rep.height)
return best_video, best_audio
def parse_datetime(s):
def try_strptime(s, fmt):
try:
return datetime.strptime(s, fmt)
except ValueError:
return None
dt = try_strptime(s, '%H:%M')
if dt:
today = datetime.today()
return dt.replace(year=today.year, month=today.month, day=today.day)
dt = try_strptime(s, '%H:%M:%S')
if dt:
today = datetime.today()
return dt.replace(year=today.year, month=today.month, day=today.day)
dt = try_strptime(s, '%d.%m %H:%M')
if dt:
return dt.replace(year=datetime.today().year)
dt = try_strptime(s, '%d.%m.%Y %H:%M')
if dt:
return dt
dt = try_strptime(s, '%d.%m %H:%M:%S')
if dt:
return dt.replace(year=datetime.today().year)
dt = try_strptime(s, '%d.%m.%Y %H:%M:%S')
if dt:
return dt
dt = try_strptime(s, '%Y-%m-%dT%H:%M:%S')
if dt:
return dt
return None
def parse_duration(s):
m = re.match(r'^((?P<h>\d+)h)?((?P<m>\d+)m)?((?P<s>\d+)s)?$', s)
if not m:
return None
h, m, s = m.groupdict().values()
if not h and not m and not s:
return None
secs = 0
if h:
secs += int(h) * 3600
if m:
secs += int(m) * 60
if s:
secs += int(s)
return secs
def main(ffmpeg_executable):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', metavar='OUTPUT_FILE', action='store', help='The output filename')
parser.add_argument('-s', '--start', metavar='START_TIME', action='store', help='The start time (possible formats = "12:34", "12:34:56", "7.8.2009 12:34:56", "2009-08-07T12:34:56")')
parser.add_argument('-e', '--end', metavar='END_TIME', action='store', help='The end time (same format as start time)')
parser.add_argument('-d', '--duration', action='store', help='The duration (possible formats = "12h34m56s", "12m34s", "123s", "123m", "123h", ...)')
parser.add_argument('-u', '--utc', action='store_true', help='Use UTC instead of local time for start and end time', default=False)
parser.add_argument('-y', '--overwrite', action='store_true', help='Overwrite file without asking', default=False)
parser.add_argument('url', metavar='URL', action='store', help='The URL of the YouTube stream')
args = parser.parse_args()
url = args.url
output_path = args.output
start_time = None
duration_secs = None
def arg_fail(message):
print(message, file=sys.stderr)
parser.print_help()
sys.exit(1)
if args.start:
start_time = parse_datetime(args.start)
if not start_time:
arg_fail('Invalid start time format')
start_time = start_time.replace(tzinfo=timezone.utc if args.utc else None).astimezone()
if args.duration and args.end:
arg_fail('Specify end time or duration, not both')
if args.duration:
duration_secs = parse_duration(args.duration)
if not duration_secs:
arg_fail('Invalid duration format')
if duration_secs == 0:
arg_fail('Duration cannot be 0')
if args.end:
end_time = parse_datetime(args.end)
if not end_time:
arg_fail('Invalid end time format!')
end_time = end_time.replace(tzinfo=timezone.utc if args.utc else None).astimezone()
duration_secs = (end_time - start_time).total_seconds()
if duration_secs == 0:
arg_fail('Duration cannot be 0')
data = get_mpd_data(url)
video, audio = get_best_representation(data)
video_base = video.base_urls[0].base_url_value + "/".join(video.segment_lists[0].segment_urls[0].media.split('/')[:-3])
audio_base = audio.base_urls[0].base_url_value + "/".join(audio.segment_lists[0].segment_urls[0].media.split('/')[:-3])
max_seg = int(video.segment_lists[0].segment_urls[-1].media.split('/')[-3])
if not output_path:
print(f"You can go back {int(max_seg*2/60/60)} hours and {int(max_seg*2/60%60)} minutes back...")
exit(0)
if os.path.exists(output_path):
if args.overwrite:
os.remove(output_path)
else:
while True:
print(f'File "{output_path}" already exists! Overwrite? [y/N] ', end='')
yn = input().lower()
if yn == '' or yn == 'n':
sys.exit(0)
else:
os.remove(output_path)
break
if start_time:
req_time = datetime.strptime(data.split("yt:mpdRequestTime=\"")[-1].split("\"")[0], "%Y-%m-%dT%H:%M:%S.%f").replace(tzinfo=timezone.utc).astimezone()
segments_back = round((req_time - start_time).total_seconds() / 2)
segments_back = segments_back if segments_back < max_seg else max_seg
dur_segments = round(duration_secs / 2)
start_segment = max_seg - segments_back
end_segment = start_segment + dur_segments
total_segments = range(start_segment, end_segment)
else:
total_segments = range(max_seg)
# make a temporary directory in the output file's directory
tempdir_parent = os.path.dirname(os.path.abspath(os.path.realpath(output_path)))
tempdir = ".temp-" + str(random.randint(1000,9999))
while os.path.exists(tempdir):
tempdir = ".temp-" + str(random.randint(1000,9999))
tempdir = os.path.join(tempdir_parent, tempdir)
os.mkdir(tempdir)
os.mkdir(os.path.join(tempdir, "temp-video"))
os.mkdir(os.path.join(tempdir, "temp-audio"))
# get video and audio segments asynchronously
asyncio.get_event_loop().run_until_complete(get_segments(total_segments, video_base, audio_base, tempdir))
# merge video and audio segments each into its file
os.mkdir(os.path.join(tempdir, "avseg"))
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
params = ((ffmpeg_executable, tempdir, i) for i in total_segments)
list(tqdm(pool.imap_unordered(process_segments, params), total=len(total_segments), desc="Merging segments"))
pool.close()
pool.join()
with open(os.path.join(tempdir, "avseg.txt"), "w+") as avseg:
for i in total_segments:
avseg.write(f"file '{os.path.join(tempdir, 'avseg', f'{i}.ts')}'\n")
shutil.rmtree(os.path.join(tempdir, "temp-video"), ignore_errors=True)
shutil.rmtree(os.path.join(tempdir, "temp-audio"), ignore_errors=True)
size_avseg_total = sum(os.path.getsize(os.path.join(tempdir, "avseg", f)) for f in os.listdir(os.path.join(tempdir, "avseg")) if os.path.isfile(os.path.join(tempdir, "avseg", f)))//1024
cmd_final = [
ffmpeg_executable,
"-y",
"-f",
"concat",
"-safe",
"0",
"-i",
f"{os.path.join(tempdir, 'avseg.txt')}",
"-c",
"copy",
output_path
]
process_final = subprocess.Popen(cmd_final, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
pbar_final = tqdm(total=size_avseg_total, desc="Merging final")
while True:
line_avseg = process_final.stdout.readline()
if not line_avseg: break
match_avseg = re.match(r".*size= *(\d+)", line_avseg)
if match_avseg is not None:
size_avseg = int(match_avseg[1])
pbar_final.update(size_avseg - pbar_final.n)
pbar_final.update(size_avseg_total - pbar_final.n)
pbar_final.close()
shutil.rmtree(tempdir, ignore_errors=True)
if __name__ == "__main__":
plt = platform.system()
if plt == "Windows":
if not (os.path.exists("./bin/ffmpeg.exe") or shutil.which("ffmpeg")):
print("Run 'python download.py' first!")
exit(1)
elif os.path.exists("./bin/ffmpeg.exe"):
main(".\\bin\\ffmpeg.exe")
else:
main("ffmpeg")
elif plt == "Linux" or plt == "Darwin":
if not shutil.which("ffmpeg"):
print("Install ffmpeg to path!")
exit(1)
else:
main("ffmpeg")
| en | 0.850004 | # requests # # beautifulsoup4 / lxml # # # mpegdash # tqdm # make a temporary directory in the output file's directory # get video and audio segments asynchronously # merge video and audio segments each into its file | 2.504028 | 3 |
tree/tests/test_tree_ds.py | kylepclarkson/Tries | 0 | 6622909 | import unittest
from tree.Tree import Tree
class TestTree(unittest.TestCase):
def test_create_tree_0(self):
t = Tree(None)
self.assertEqual(t.root().element(), None)
def test_create_tree_1(self):
# A tree with only a root.
t = Tree()
self.assertEqual(len(t), 1)
def test_create_tree_2(self):
# A tree with root, three children.
t = Tree('r')
t._add(t.root(), 'a')
t._add(t.root(), 'b')
t._add(t.root(), 'c')
self.assertEqual(t.height(), 1)
self.assertEqual(len(t), 4)
def test_create_tree_3(self):
t = Tree()
a = t._add(t.root(), 'a')
t._add(t.root(), 'b')
t._add(t.root(), 'c')
t._add(a, 'aa')
self.assertEqual(t.height(), 2)
# Single child node of node a contains value 'aa' as element.
self.assertEqual(t.children(a)[0].element(), 'aa')
def test_bfs_1(self):
t = Tree('a')
b = t._add(t.root(), 'b')
t._add(b, 'd')
c = t._add(t.root(), 'c')
t._add(c, 'e')
f = t._add(c, 'f')
t._add(f, 'g')
self.assertEqual([x.element() for x in t.bfs()],
['a','b','c','d','e','f','g'])
def test_add_between(self):
t = Tree('r')
r = t.root()
a = t.add(r, 'a')
b = t.add(a, 'b')
c = t.add(a, 'c')
d = t.add(a, 'd')
e = t.add_between(r, a, 'e')
f = t.add(e, 'f')
self.assertEqual([x.element() for x in t.bfs()],
['r', 'e', 'a', 'f', 'b', 'c', 'd'])
self.assertEqual(len(t), 7)
self.assertEqual(t.height(), 3)
def test_level_order(self):
t = Tree('r')
r = t.root()
a = t.add(r, 'a')
b = t.add(a, 'b')
c = t.add(a, 'c')
d = t.add(a, 'd')
e = t.add_between(r, a, 'e')
f = t.add(e, 'f')
self.assertEqual([[x.element() for x in level] for level in t.level_traversal()],
[['r'], ['e'], ['a','f'], ['b', 'c', 'd']])
if __name__ == '__main__':
unittest.main()
| import unittest
from tree.Tree import Tree
class TestTree(unittest.TestCase):
def test_create_tree_0(self):
t = Tree(None)
self.assertEqual(t.root().element(), None)
def test_create_tree_1(self):
# A tree with only a root.
t = Tree()
self.assertEqual(len(t), 1)
def test_create_tree_2(self):
# A tree with root, three children.
t = Tree('r')
t._add(t.root(), 'a')
t._add(t.root(), 'b')
t._add(t.root(), 'c')
self.assertEqual(t.height(), 1)
self.assertEqual(len(t), 4)
def test_create_tree_3(self):
t = Tree()
a = t._add(t.root(), 'a')
t._add(t.root(), 'b')
t._add(t.root(), 'c')
t._add(a, 'aa')
self.assertEqual(t.height(), 2)
# Single child node of node a contains value 'aa' as element.
self.assertEqual(t.children(a)[0].element(), 'aa')
def test_bfs_1(self):
t = Tree('a')
b = t._add(t.root(), 'b')
t._add(b, 'd')
c = t._add(t.root(), 'c')
t._add(c, 'e')
f = t._add(c, 'f')
t._add(f, 'g')
self.assertEqual([x.element() for x in t.bfs()],
['a','b','c','d','e','f','g'])
def test_add_between(self):
t = Tree('r')
r = t.root()
a = t.add(r, 'a')
b = t.add(a, 'b')
c = t.add(a, 'c')
d = t.add(a, 'd')
e = t.add_between(r, a, 'e')
f = t.add(e, 'f')
self.assertEqual([x.element() for x in t.bfs()],
['r', 'e', 'a', 'f', 'b', 'c', 'd'])
self.assertEqual(len(t), 7)
self.assertEqual(t.height(), 3)
def test_level_order(self):
t = Tree('r')
r = t.root()
a = t.add(r, 'a')
b = t.add(a, 'b')
c = t.add(a, 'c')
d = t.add(a, 'd')
e = t.add_between(r, a, 'e')
f = t.add(e, 'f')
self.assertEqual([[x.element() for x in level] for level in t.level_traversal()],
[['r'], ['e'], ['a','f'], ['b', 'c', 'd']])
if __name__ == '__main__':
unittest.main()
| en | 0.913327 | # A tree with only a root. # A tree with root, three children. # Single child node of node a contains value 'aa' as element. | 3.663496 | 4 |
tf-benchmark/tf-keras-config/mlp.py | csruiliu/NSD-Project | 0 | 6622910 | <reponame>csruiliu/NSD-Project<gh_stars>0
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
def model_fn(input_shape, num_classes=10):
"""
tensorflow.keras sequential x
"""
x = inputs = Input(shape=input_shape)
x = Dense(512, activation='relu')(x)
x = Dense(128, activation='relu')(x)
outputs = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
return model
| import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
def model_fn(input_shape, num_classes=10):
"""
tensorflow.keras sequential x
"""
x = inputs = Input(shape=input_shape)
x = Dense(512, activation='relu')(x)
x = Dense(128, activation='relu')(x)
outputs = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
return model | en | 0.556824 | tensorflow.keras sequential x | 3.424814 | 3 |
databases/working_ap/annotate_and_resquiggle_reads.py | mrubio-chavarria/project_2 | 0 | 6622911 | <filename>databases/working_ap/annotate_and_resquiggle_reads.py
#!/home/mario/anaconda3/envs/project2_venv/bin python
"""
DESCRIPTION:
This script tries to gather all the steps needed to
perform once the basecalls have been obtained.
"""
# Libraries
import os
import sys
from tombo import tombo_helper, tombo_stats, resquiggle
import h5py
import mappy
from tqdm import tqdm
import multiprocessing as mp
from multiprocessing import Manager
from shutil import move, rmtree
# Functions
def filter_reads(read_folders, filtered_reads, q_score_threshold):
"""
DESCRIPTION:
A function to filter the reads based on their q score.
:param read_folders: [str] the route to folder with th reads.
:param filtered_reads: [manager.list()] list in which the names
of the filtered reads should be appended.
:param q_score_threshold: [float] the value to filter.
"""
count = 0
for reads_folder in read_folders:
read_files = os.listdir(reads_folder)
for file in tqdm(read_files):
read_file = reads_folder + '/' + file
fast5_data = h5py.File(read_file, 'r')
# Set parameters for resquiggling
aligner = mappy.Aligner(reference_file, preset=str('map-ont'), best_n=1)
seq_samp_type = tombo_helper.get_seq_sample_type(fast5_data)
std_ref = tombo_stats.TomboModel(seq_samp_type=seq_samp_type)
# Extract data from FAST5
try:
map_results = resquiggle.map_read(fast5_data, aligner, std_ref)
except tombo_helper.TomboError:
# Avoid reads lacking alignment (very poor quality)
continue
# Filter reads based on q score for quality
if map_results.mean_q_score >= q_score_threshold:
count += 1
filtered_reads.append(read_file)
def annotate_basecalls(*args):
"""
"""
for pair in args:
command = f'tombo preprocess annotate_raw_with_fastqs --fast5-basedir {pair[0]} --fastq-filenames {pair[1]} --overwrite'
os.system(command)
#subprocess.check_call(command, shell=True)
def read_code(filename):
code1, code2 = filename.split('_')[2:4]
return code1[0:8] + '_' + code2
if __name__ == "__main__":
database_dir = sys.argv[1]
n_processes = int(sys.argv[2])
flowcell = sys.argv[3]
# workdir = f'/home/mario/Projects/project_2/databases/working_3xr6'
# selected_folder = '0'
# n_processes = 4
# flowcell = 'flowcell3'
# Format to multiple to single read files
# print('***************************************************************************************')
# print('Format reads from multi to single files')
# print('***************************************************************************************')
reads_folder = database_dir + '/' + 'reads' + '/' + flowcell
basecalls_folder = database_dir + '/' + 'basecalls' + '/' + flowcell
fastq_file = basecalls_folder + '/' + 'multi.fastq'
#command = f'multi_to_single_fast5 --input_path {reads_folder}/multi --save_path {reads_folder}/single'
#os.system(command)
# # Flatten directory structure
# single_reads_folders = [reads_folder + '/' + 'single' + '/' + folder
# for folder in os.listdir(reads_folder + '/' + 'single') if not folder.endswith('txt')]
# single_reads_folder = reads_folder + '/' + 'single'
single_reads_folder = reads_folder + '/' + 'single'
# Annotate the reads with the basecalls
print('***************************************************************************************')
print('Annotate the reads')
print('***************************************************************************************')
# Read all the possible fastqs
command = f'tombo preprocess annotate_raw_with_fastqs --fast5-basedir {single_reads_folder} --fastq-filenames {fastq_file} --overwrite'
code = os.system(command)
print('Annotation completed')
# single_folders = [folder for folder in os.listdir(single_reads_folder) if not folder.endswith('txt')]
# single_folders = [single_reads_folder + '/' + file for file in sorted(single_folders, key=lambda x: int(x.split('/')[-1]))]
# fastq_files = [basecalls_folder + '/' + file for file in sorted(os.listdir(basecalls_folder), key=lambda x: int(x[:-6].split('_')[-2]))]
# file_pairs = list(zip(single_folders, fastq_files))
# group_size = len(file_pairs) // n_processes
# group_indeces = list(range(0, len(file_pairs), group_size))
# file_groups = [file_pairs[group_size * i:group_size * (i+1)] if i != n_processes - 1 else file_pairs[group_size * i::]
# for i in range(n_processes)]
# if len(file_pairs) % n_processes != 0:
# extra_pairs = file_pairs[group_size * n_processes::]
# [file_groups[i].append(extra_pairs[i]) for i in range(len(extra_pairs))]
# processes = []
# for rank in range(n_processes):
# print(f'Process {rank} launched')
# process = mp.Process(target=annotate_basecalls, args=(file_groups[rank]))
# process.start()
# for process in processes:
# process.join()
# print('Annotation completed')
# Resquiggle
print('***************************************************************************************')
print('Resquiggle the reads...')
print('***************************************************************************************')
reference_file = database_dir + '/' + 'reference.fasta'
command = f'tombo resquiggle {single_reads_folder} {reference_file} --processes {n_processes} --num-most-common-errors 5 --overwrite'
os.system(command)
print('Resquiggling completed')
| <filename>databases/working_ap/annotate_and_resquiggle_reads.py
#!/home/mario/anaconda3/envs/project2_venv/bin python
"""
DESCRIPTION:
This script tries to gather all the steps needed to
perform once the basecalls have been obtained.
"""
# Libraries
import os
import sys
from tombo import tombo_helper, tombo_stats, resquiggle
import h5py
import mappy
from tqdm import tqdm
import multiprocessing as mp
from multiprocessing import Manager
from shutil import move, rmtree
# Functions
def filter_reads(read_folders, filtered_reads, q_score_threshold):
"""
DESCRIPTION:
A function to filter the reads based on their q score.
:param read_folders: [str] the route to folder with th reads.
:param filtered_reads: [manager.list()] list in which the names
of the filtered reads should be appended.
:param q_score_threshold: [float] the value to filter.
"""
count = 0
for reads_folder in read_folders:
read_files = os.listdir(reads_folder)
for file in tqdm(read_files):
read_file = reads_folder + '/' + file
fast5_data = h5py.File(read_file, 'r')
# Set parameters for resquiggling
aligner = mappy.Aligner(reference_file, preset=str('map-ont'), best_n=1)
seq_samp_type = tombo_helper.get_seq_sample_type(fast5_data)
std_ref = tombo_stats.TomboModel(seq_samp_type=seq_samp_type)
# Extract data from FAST5
try:
map_results = resquiggle.map_read(fast5_data, aligner, std_ref)
except tombo_helper.TomboError:
# Avoid reads lacking alignment (very poor quality)
continue
# Filter reads based on q score for quality
if map_results.mean_q_score >= q_score_threshold:
count += 1
filtered_reads.append(read_file)
def annotate_basecalls(*args):
"""
"""
for pair in args:
command = f'tombo preprocess annotate_raw_with_fastqs --fast5-basedir {pair[0]} --fastq-filenames {pair[1]} --overwrite'
os.system(command)
#subprocess.check_call(command, shell=True)
def read_code(filename):
code1, code2 = filename.split('_')[2:4]
return code1[0:8] + '_' + code2
if __name__ == "__main__":
database_dir = sys.argv[1]
n_processes = int(sys.argv[2])
flowcell = sys.argv[3]
# workdir = f'/home/mario/Projects/project_2/databases/working_3xr6'
# selected_folder = '0'
# n_processes = 4
# flowcell = 'flowcell3'
# Format to multiple to single read files
# print('***************************************************************************************')
# print('Format reads from multi to single files')
# print('***************************************************************************************')
reads_folder = database_dir + '/' + 'reads' + '/' + flowcell
basecalls_folder = database_dir + '/' + 'basecalls' + '/' + flowcell
fastq_file = basecalls_folder + '/' + 'multi.fastq'
#command = f'multi_to_single_fast5 --input_path {reads_folder}/multi --save_path {reads_folder}/single'
#os.system(command)
# # Flatten directory structure
# single_reads_folders = [reads_folder + '/' + 'single' + '/' + folder
# for folder in os.listdir(reads_folder + '/' + 'single') if not folder.endswith('txt')]
# single_reads_folder = reads_folder + '/' + 'single'
single_reads_folder = reads_folder + '/' + 'single'
# Annotate the reads with the basecalls
print('***************************************************************************************')
print('Annotate the reads')
print('***************************************************************************************')
# Read all the possible fastqs
command = f'tombo preprocess annotate_raw_with_fastqs --fast5-basedir {single_reads_folder} --fastq-filenames {fastq_file} --overwrite'
code = os.system(command)
print('Annotation completed')
# single_folders = [folder for folder in os.listdir(single_reads_folder) if not folder.endswith('txt')]
# single_folders = [single_reads_folder + '/' + file for file in sorted(single_folders, key=lambda x: int(x.split('/')[-1]))]
# fastq_files = [basecalls_folder + '/' + file for file in sorted(os.listdir(basecalls_folder), key=lambda x: int(x[:-6].split('_')[-2]))]
# file_pairs = list(zip(single_folders, fastq_files))
# group_size = len(file_pairs) // n_processes
# group_indeces = list(range(0, len(file_pairs), group_size))
# file_groups = [file_pairs[group_size * i:group_size * (i+1)] if i != n_processes - 1 else file_pairs[group_size * i::]
# for i in range(n_processes)]
# if len(file_pairs) % n_processes != 0:
# extra_pairs = file_pairs[group_size * n_processes::]
# [file_groups[i].append(extra_pairs[i]) for i in range(len(extra_pairs))]
# processes = []
# for rank in range(n_processes):
# print(f'Process {rank} launched')
# process = mp.Process(target=annotate_basecalls, args=(file_groups[rank]))
# process.start()
# for process in processes:
# process.join()
# print('Annotation completed')
# Resquiggle
print('***************************************************************************************')
print('Resquiggle the reads...')
print('***************************************************************************************')
reference_file = database_dir + '/' + 'reference.fasta'
command = f'tombo resquiggle {single_reads_folder} {reference_file} --processes {n_processes} --num-most-common-errors 5 --overwrite'
os.system(command)
print('Resquiggling completed')
| en | 0.63148 | #!/home/mario/anaconda3/envs/project2_venv/bin python DESCRIPTION: This script tries to gather all the steps needed to perform once the basecalls have been obtained. # Libraries # Functions DESCRIPTION: A function to filter the reads based on their q score. :param read_folders: [str] the route to folder with th reads. :param filtered_reads: [manager.list()] list in which the names of the filtered reads should be appended. :param q_score_threshold: [float] the value to filter. # Set parameters for resquiggling # Extract data from FAST5 # Avoid reads lacking alignment (very poor quality) # Filter reads based on q score for quality #subprocess.check_call(command, shell=True) # workdir = f'/home/mario/Projects/project_2/databases/working_3xr6' # selected_folder = '0' # n_processes = 4 # flowcell = 'flowcell3' # Format to multiple to single read files # print('***************************************************************************************') # print('Format reads from multi to single files') # print('***************************************************************************************') #command = f'multi_to_single_fast5 --input_path {reads_folder}/multi --save_path {reads_folder}/single' #os.system(command) # # Flatten directory structure # single_reads_folders = [reads_folder + '/' + 'single' + '/' + folder # for folder in os.listdir(reads_folder + '/' + 'single') if not folder.endswith('txt')] # single_reads_folder = reads_folder + '/' + 'single' # Annotate the reads with the basecalls # Read all the possible fastqs # single_folders = [folder for folder in os.listdir(single_reads_folder) if not folder.endswith('txt')] # single_folders = [single_reads_folder + '/' + file for file in sorted(single_folders, key=lambda x: int(x.split('/')[-1]))] # fastq_files = [basecalls_folder + '/' + file for file in sorted(os.listdir(basecalls_folder), key=lambda x: int(x[:-6].split('_')[-2]))] # file_pairs = list(zip(single_folders, fastq_files)) # group_size = len(file_pairs) // n_processes # group_indeces = list(range(0, len(file_pairs), group_size)) # file_groups = [file_pairs[group_size * i:group_size * (i+1)] if i != n_processes - 1 else file_pairs[group_size * i::] # for i in range(n_processes)] # if len(file_pairs) % n_processes != 0: # extra_pairs = file_pairs[group_size * n_processes::] # [file_groups[i].append(extra_pairs[i]) for i in range(len(extra_pairs))] # processes = [] # for rank in range(n_processes): # print(f'Process {rank} launched') # process = mp.Process(target=annotate_basecalls, args=(file_groups[rank])) # process.start() # for process in processes: # process.join() # print('Annotation completed') # Resquiggle | 2.203601 | 2 |
data_collection.py | gopiraj15/tutorial-repo | 0 | 6622912 | <gh_stars>0
from bs4 import BeautifulSoup
import requests as req
base_url = "http://www.talkinghands.co.in"
open_pg = req.get(base_url+"/sentences").text
soup = BeautifulSoup(open_pg,"lxml")
vid_title_url={}
for i in soup.find("div",{"class":"span8"}).find_all("div",{"class":"views-field views-field-title"}):
vid_pg = BeautifulSoup(req.get(base_url+i.find("a").get("href")).text,"lxml")
try:
vid_title_url[i.text] = vid_pg.find("iframe").get("src").replace("//","")
except:
print('Some Error!')
from pytube import YouTube
for i in vid_title_url.keys():
if vid_title_url[i]:
yt = YouTube(vid_title_url[i])
try:
#yt.streams.filter('mp4').download(filename = 'data/' + i)
yt.streams.first().download('data/', filename = i)
except:
print("Some Error!")
| from bs4 import BeautifulSoup
import requests as req
base_url = "http://www.talkinghands.co.in"
open_pg = req.get(base_url+"/sentences").text
soup = BeautifulSoup(open_pg,"lxml")
vid_title_url={}
for i in soup.find("div",{"class":"span8"}).find_all("div",{"class":"views-field views-field-title"}):
vid_pg = BeautifulSoup(req.get(base_url+i.find("a").get("href")).text,"lxml")
try:
vid_title_url[i.text] = vid_pg.find("iframe").get("src").replace("//","")
except:
print('Some Error!')
from pytube import YouTube
for i in vid_title_url.keys():
if vid_title_url[i]:
yt = YouTube(vid_title_url[i])
try:
#yt.streams.filter('mp4').download(filename = 'data/' + i)
yt.streams.first().download('data/', filename = i)
except:
print("Some Error!") | ja | 0.067828 | #yt.streams.filter('mp4').download(filename = 'data/' + i) | 2.996307 | 3 |
bookwyrm/tests/lists_stream/test_signals.py | mouse-reeve/fedireads | 270 | 6622913 | <reponame>mouse-reeve/fedireads
""" testing lists_stream """
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import lists_stream, models
@patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async")
class ListsStreamSignals(TestCase):
"""using redis to build activity streams"""
def setUp(self):
"""use a test csv"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"mouse", "<EMAIL>", "password", local=True, localname="mouse"
)
self.another_user = models.User.objects.create_user(
"fish", "<EMAIL>", "password", local=True, localname="fish"
)
with patch("bookwyrm.models.user.set_remote_server.delay"):
self.remote_user = models.User.objects.create_user(
"rat",
"<EMAIL>",
"<PASSWORD>",
local=False,
remote_id="https://example.com/users/rat",
inbox="https://example.com/users/rat/inbox",
outbox="https://example.com/users/rat/outbox",
)
def test_add_list_on_create_command(self, _):
"""a new lists has entered"""
book_list = models.List.objects.create(
user=self.remote_user, name="hi", privacy="public"
)
with patch("bookwyrm.lists_stream.add_list_task.delay") as mock:
lists_stream.add_list_on_create_command(book_list.id)
self.assertEqual(mock.call_count, 1)
args = mock.call_args[0]
self.assertEqual(args[0], book_list.id)
def test_remove_list_on_delete(self, _):
"""delete a list"""
book_list = models.List.objects.create(
user=self.remote_user, name="hi", privacy="public"
)
with patch("bookwyrm.lists_stream.remove_list_task.delay") as mock:
lists_stream.remove_list_on_delete(models.List, book_list)
args = mock.call_args[0]
self.assertEqual(args[0], book_list.id)
def test_populate_lists_on_account_create_command(self, _):
"""create streams for a user"""
with patch("bookwyrm.lists_stream.populate_lists_task.delay") as mock:
lists_stream.add_list_on_account_create_command(self.local_user.id)
self.assertEqual(mock.call_count, 1)
args = mock.call_args[0]
self.assertEqual(args[0], self.local_user.id)
@patch("bookwyrm.activitystreams.remove_user_statuses_task.delay")
def test_remove_lists_on_block(self, *_):
"""don't show lists from blocked users"""
with patch("bookwyrm.lists_stream.remove_user_lists_task.delay") as mock:
models.UserBlocks.objects.create(
user_subject=self.local_user,
user_object=self.remote_user,
)
args = mock.call_args[0]
self.assertEqual(args[0], self.local_user.id)
self.assertEqual(args[1], self.remote_user.id)
@patch("bookwyrm.activitystreams.remove_user_statuses_task.delay")
@patch("bookwyrm.activitystreams.add_user_statuses_task.delay")
def test_add_lists_on_unblock(self, *_):
"""re-add lists on unblock"""
with patch("bookwyrm.lists_stream.remove_user_lists_task.delay"):
block = models.UserBlocks.objects.create(
user_subject=self.local_user,
user_object=self.remote_user,
)
with patch("bookwyrm.lists_stream.add_user_lists_task.delay") as mock:
block.delete()
args = mock.call_args[0]
self.assertEqual(args[0], self.local_user.id)
self.assertEqual(args[1], self.remote_user.id)
@patch("bookwyrm.activitystreams.remove_user_statuses_task.delay")
@patch("bookwyrm.activitystreams.add_user_statuses_task.delay")
def test_add_lists_on_unblock_reciprocal_block(self, *_):
"""dont' re-add lists on unblock if there's a block the other way"""
with patch("bookwyrm.lists_stream.remove_user_lists_task.delay"):
block = models.UserBlocks.objects.create(
user_subject=self.local_user,
user_object=self.remote_user,
)
block = models.UserBlocks.objects.create(
user_subject=self.remote_user,
user_object=self.local_user,
)
with patch("bookwyrm.lists_stream.add_user_lists_task.delay") as mock:
block.delete()
self.assertFalse(mock.called)
| """ testing lists_stream """
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import lists_stream, models
@patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async")
class ListsStreamSignals(TestCase):
"""using redis to build activity streams"""
def setUp(self):
"""use a test csv"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"mouse", "<EMAIL>", "password", local=True, localname="mouse"
)
self.another_user = models.User.objects.create_user(
"fish", "<EMAIL>", "password", local=True, localname="fish"
)
with patch("bookwyrm.models.user.set_remote_server.delay"):
self.remote_user = models.User.objects.create_user(
"rat",
"<EMAIL>",
"<PASSWORD>",
local=False,
remote_id="https://example.com/users/rat",
inbox="https://example.com/users/rat/inbox",
outbox="https://example.com/users/rat/outbox",
)
def test_add_list_on_create_command(self, _):
"""a new lists has entered"""
book_list = models.List.objects.create(
user=self.remote_user, name="hi", privacy="public"
)
with patch("bookwyrm.lists_stream.add_list_task.delay") as mock:
lists_stream.add_list_on_create_command(book_list.id)
self.assertEqual(mock.call_count, 1)
args = mock.call_args[0]
self.assertEqual(args[0], book_list.id)
def test_remove_list_on_delete(self, _):
"""delete a list"""
book_list = models.List.objects.create(
user=self.remote_user, name="hi", privacy="public"
)
with patch("bookwyrm.lists_stream.remove_list_task.delay") as mock:
lists_stream.remove_list_on_delete(models.List, book_list)
args = mock.call_args[0]
self.assertEqual(args[0], book_list.id)
def test_populate_lists_on_account_create_command(self, _):
"""create streams for a user"""
with patch("bookwyrm.lists_stream.populate_lists_task.delay") as mock:
lists_stream.add_list_on_account_create_command(self.local_user.id)
self.assertEqual(mock.call_count, 1)
args = mock.call_args[0]
self.assertEqual(args[0], self.local_user.id)
@patch("bookwyrm.activitystreams.remove_user_statuses_task.delay")
def test_remove_lists_on_block(self, *_):
"""don't show lists from blocked users"""
with patch("bookwyrm.lists_stream.remove_user_lists_task.delay") as mock:
models.UserBlocks.objects.create(
user_subject=self.local_user,
user_object=self.remote_user,
)
args = mock.call_args[0]
self.assertEqual(args[0], self.local_user.id)
self.assertEqual(args[1], self.remote_user.id)
@patch("bookwyrm.activitystreams.remove_user_statuses_task.delay")
@patch("bookwyrm.activitystreams.add_user_statuses_task.delay")
def test_add_lists_on_unblock(self, *_):
"""re-add lists on unblock"""
with patch("bookwyrm.lists_stream.remove_user_lists_task.delay"):
block = models.UserBlocks.objects.create(
user_subject=self.local_user,
user_object=self.remote_user,
)
with patch("bookwyrm.lists_stream.add_user_lists_task.delay") as mock:
block.delete()
args = mock.call_args[0]
self.assertEqual(args[0], self.local_user.id)
self.assertEqual(args[1], self.remote_user.id)
@patch("bookwyrm.activitystreams.remove_user_statuses_task.delay")
@patch("bookwyrm.activitystreams.add_user_statuses_task.delay")
def test_add_lists_on_unblock_reciprocal_block(self, *_):
"""dont' re-add lists on unblock if there's a block the other way"""
with patch("bookwyrm.lists_stream.remove_user_lists_task.delay"):
block = models.UserBlocks.objects.create(
user_subject=self.local_user,
user_object=self.remote_user,
)
block = models.UserBlocks.objects.create(
user_subject=self.remote_user,
user_object=self.local_user,
)
with patch("bookwyrm.lists_stream.add_user_lists_task.delay") as mock:
block.delete()
self.assertFalse(mock.called) | en | 0.887199 | testing lists_stream using redis to build activity streams use a test csv a new lists has entered delete a list create streams for a user don't show lists from blocked users re-add lists on unblock dont' re-add lists on unblock if there's a block the other way | 2.400907 | 2 |
specs/matchers/built_in/have_property_spec.py | danibaena/expects | 189 | 6622914 | # -*- coding: utf-8 -*-
from expects import *
from expects.aliases import *
from expects.testing import failure
class Foo(object):
bar = 0
baz = 1
with describe('have_property'):
with before.each:
self.obj = Foo()
with it('should pass if object has property'):
expect(self.obj).to(have_property('bar'))
with it('should pass if object has property with value'):
expect(self.obj).to(have_property('bar', 0))
with it('should fail if object does not have property'):
with failure("but: property 'foo' not found"):
expect(self.obj).to(have_property('foo'))
with it('should fail if object hasnt property with value'):
with failure("but: property 'foo' not found"):
expect(self.obj).to(have_property('foo', 0))
with it('should fail if object has property with different value'):
with failure("but: property 'bar' equal 1 not found"):
expect(self.obj).to(have_property('bar', 1))
with it('should fail if object has property without none value'):
with failure("but: property 'bar' equal None not found"):
expect(self.obj).to(have_property('bar', None))
with context('when negated'):
with it('should pass if object does not have property'):
expect(self.obj).not_to(have_property('foo'))
with it('should pass if object does not have property with value'):
expect(self.obj).not_to(have_property('foo', 0))
with it('should pass if object has property without value'):
expect(self.obj).not_to(have_property('bar', 1))
with it('should fail if object has property'):
with failure("but: property 'bar' found"):
expect(self.obj).not_to(have_property('bar'))
with it('should fail if object has property with value'):
with failure("but: property 'bar' equal 0 found"):
expect(self.obj).not_to(have_property('bar', 0))
with context('when composed'):
with it('should pass if object has property below 1'):
expect(self.obj).to(have_property('bar', be_below(1)))
with it('should pass if object does not have property above 1'):
expect(self.obj).to(have_property('bar', not_(above(1))))
with it('should fail if object has property above 1'):
with failure("but: property 'bar' above 1 not found"):
expect(self.obj).to(have_property('bar', above(1)))
with it('should fail if object has property not below 1'):
with failure("but: property 'bar' not be below 1 not found"):
expect(self.obj).to(have_property('bar', not_(be_below(1))))
| # -*- coding: utf-8 -*-
from expects import *
from expects.aliases import *
from expects.testing import failure
class Foo(object):
bar = 0
baz = 1
with describe('have_property'):
with before.each:
self.obj = Foo()
with it('should pass if object has property'):
expect(self.obj).to(have_property('bar'))
with it('should pass if object has property with value'):
expect(self.obj).to(have_property('bar', 0))
with it('should fail if object does not have property'):
with failure("but: property 'foo' not found"):
expect(self.obj).to(have_property('foo'))
with it('should fail if object hasnt property with value'):
with failure("but: property 'foo' not found"):
expect(self.obj).to(have_property('foo', 0))
with it('should fail if object has property with different value'):
with failure("but: property 'bar' equal 1 not found"):
expect(self.obj).to(have_property('bar', 1))
with it('should fail if object has property without none value'):
with failure("but: property 'bar' equal None not found"):
expect(self.obj).to(have_property('bar', None))
with context('when negated'):
with it('should pass if object does not have property'):
expect(self.obj).not_to(have_property('foo'))
with it('should pass if object does not have property with value'):
expect(self.obj).not_to(have_property('foo', 0))
with it('should pass if object has property without value'):
expect(self.obj).not_to(have_property('bar', 1))
with it('should fail if object has property'):
with failure("but: property 'bar' found"):
expect(self.obj).not_to(have_property('bar'))
with it('should fail if object has property with value'):
with failure("but: property 'bar' equal 0 found"):
expect(self.obj).not_to(have_property('bar', 0))
with context('when composed'):
with it('should pass if object has property below 1'):
expect(self.obj).to(have_property('bar', be_below(1)))
with it('should pass if object does not have property above 1'):
expect(self.obj).to(have_property('bar', not_(above(1))))
with it('should fail if object has property above 1'):
with failure("but: property 'bar' above 1 not found"):
expect(self.obj).to(have_property('bar', above(1)))
with it('should fail if object has property not below 1'):
with failure("but: property 'bar' not be below 1 not found"):
expect(self.obj).to(have_property('bar', not_(be_below(1))))
| en | 0.769321 | # -*- coding: utf-8 -*- | 3.070924 | 3 |
typedjson/__init__.py | cosmin/python-typed-json | 0 | 6622915 | <filename>typedjson/__init__.py<gh_stars>0
from .basemodel import BaseModel
from .jsonencoder import ModelJsonEncoder
__all__ = [BaseModel, ModelJsonEncoder]
| <filename>typedjson/__init__.py<gh_stars>0
from .basemodel import BaseModel
from .jsonencoder import ModelJsonEncoder
__all__ = [BaseModel, ModelJsonEncoder]
| none | 1 | 1.356199 | 1 | |
home_platform/core.py | ml-lab/home-platform | 3 | 6622916 | <filename>home_platform/core.py<gh_stars>1-10
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from panda3d.core import NodePath
class World(object):
def step(self, dt):
raise NotImplementedError()
class Scene(object):
def __init__(self):
self.scene = NodePath('scene')
agents = self.scene.attachNewNode('agents')
self.agents = [agents.attachNewNode('agent-0'),]
self.worlds = dict()
def getTotalNbHouses(self):
nodepaths = self.scene.findAllMatches('**/house*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def getTotalNbRooms(self):
nodepaths = self.scene.findAllMatches('**/room*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def getTotalNbObjects(self):
nodepaths = self.scene.findAllMatches('**/object*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def getTotalNbAgents(self):
nodepaths = self.scene.findAllMatches('**/agents/*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def __str__(self):
return "Scene: %d houses, %d rooms, %d objects, %d agents" % ( self.getTotalNbHouses(),
self.getTotalNbRooms(),
self.getTotalNbObjects(),
self.getTotalNbAgents())
__repr__ = __str__
| <filename>home_platform/core.py<gh_stars>1-10
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from panda3d.core import NodePath
class World(object):
def step(self, dt):
raise NotImplementedError()
class Scene(object):
def __init__(self):
self.scene = NodePath('scene')
agents = self.scene.attachNewNode('agents')
self.agents = [agents.attachNewNode('agent-0'),]
self.worlds = dict()
def getTotalNbHouses(self):
nodepaths = self.scene.findAllMatches('**/house*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def getTotalNbRooms(self):
nodepaths = self.scene.findAllMatches('**/room*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def getTotalNbObjects(self):
nodepaths = self.scene.findAllMatches('**/object*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def getTotalNbAgents(self):
nodepaths = self.scene.findAllMatches('**/agents/*')
if nodepaths is not None:
count = len(nodepaths)
else:
count = 0
return count
def __str__(self):
return "Scene: %d houses, %d rooms, %d objects, %d agents" % ( self.getTotalNbHouses(),
self.getTotalNbRooms(),
self.getTotalNbObjects(),
self.getTotalNbAgents())
__repr__ = __str__
| en | 0.725259 | # Copyright (c) 2017, IGLU consortium # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. | 1.615053 | 2 |
aql/util_types/aql_path_types.py | menify/aqualid | 1 | 6622917 | <gh_stars>1-10
#
# Copyright (c) 2011-2015 The developers of Aqualid project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os.path
from .aql_simple_types import String, IgnoreCaseString
__all__ = (
'FilePath',
'AbsFilePath',
)
# ==============================================================================
if os.path.normcase('ABC') == os.path.normcase('abc'):
FilePathBase = IgnoreCaseString
else:
FilePathBase = String
try:
_splitunc = os.path.splitunc
except AttributeError:
def _splitunc(path):
return str(), path
# ==============================================================================
class FilePath (FilePathBase):
# -----------------------------------------------------------
def __getnewargs__(self):
return str(self),
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
# -----------------------------------------------------------
def __add__(self, other):
return FilePath(super(FilePath, self).__add__(other))
def __iadd__(self, other):
return FilePath(super(FilePath, self).__add__(other))
# -----------------------------------------------------------
def __hash__(self):
return super(FilePath, self).__hash__()
# -----------------------------------------------------------
def abspath(self):
return FilePath(os.path.abspath(self))
def normpath(self):
return FilePath(os.path.normpath(self))
def filename(self):
return FilePath(os.path.basename(self))
def dirname(self):
return FilePath(os.path.dirname(self))
def ext(self):
return FilePathBase(os.path.splitext(self)[1])
def name(self):
return FilePathBase(os.path.splitext(self.filename())[0])
def drive(self):
drive, path = os.path.splitdrive(self)
if not drive:
drive, path = _splitunc(path)
return FilePathBase(drive)
# -----------------------------------------------------------
def change(self, dirname=None, name=None, ext=None, prefix=None):
self_dirname, self_filename = os.path.split(self)
self_name, self_ext = os.path.splitext(self_filename)
if dirname is None:
dirname = self_dirname
if name is None:
name = self_name
if ext is None:
ext = self_ext
if prefix:
name = prefix + name
return FilePath(os.path.join(dirname, name + ext))
# -----------------------------------------------------------
def join_path(self, *paths):
return FilePath(os.path.join(self, *paths))
# ==============================================================================
class AbsFilePath (FilePath):
def __new__(cls, value=None):
if type(value) is cls:
return value
if value is None:
value = ''
value = os.path.normcase(os.path.abspath(value))
return super(AbsFilePath, cls).__new__(cls, value)
| #
# Copyright (c) 2011-2015 The developers of Aqualid project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os.path
from .aql_simple_types import String, IgnoreCaseString
__all__ = (
'FilePath',
'AbsFilePath',
)
# ==============================================================================
if os.path.normcase('ABC') == os.path.normcase('abc'):
FilePathBase = IgnoreCaseString
else:
FilePathBase = String
try:
_splitunc = os.path.splitunc
except AttributeError:
def _splitunc(path):
return str(), path
# ==============================================================================
class FilePath (FilePathBase):
# -----------------------------------------------------------
def __getnewargs__(self):
return str(self),
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
# -----------------------------------------------------------
def __add__(self, other):
return FilePath(super(FilePath, self).__add__(other))
def __iadd__(self, other):
return FilePath(super(FilePath, self).__add__(other))
# -----------------------------------------------------------
def __hash__(self):
return super(FilePath, self).__hash__()
# -----------------------------------------------------------
def abspath(self):
return FilePath(os.path.abspath(self))
def normpath(self):
return FilePath(os.path.normpath(self))
def filename(self):
return FilePath(os.path.basename(self))
def dirname(self):
return FilePath(os.path.dirname(self))
def ext(self):
return FilePathBase(os.path.splitext(self)[1])
def name(self):
return FilePathBase(os.path.splitext(self.filename())[0])
def drive(self):
drive, path = os.path.splitdrive(self)
if not drive:
drive, path = _splitunc(path)
return FilePathBase(drive)
# -----------------------------------------------------------
def change(self, dirname=None, name=None, ext=None, prefix=None):
self_dirname, self_filename = os.path.split(self)
self_name, self_ext = os.path.splitext(self_filename)
if dirname is None:
dirname = self_dirname
if name is None:
name = self_name
if ext is None:
ext = self_ext
if prefix:
name = prefix + name
return FilePath(os.path.join(dirname, name + ext))
# -----------------------------------------------------------
def join_path(self, *paths):
return FilePath(os.path.join(self, *paths))
# ==============================================================================
class AbsFilePath (FilePath):
def __new__(cls, value=None):
if type(value) is cls:
return value
if value is None:
value = ''
value = os.path.normcase(os.path.abspath(value))
return super(AbsFilePath, cls).__new__(cls, value) | en | 0.567349 | # # Copyright (c) 2011-2015 The developers of Aqualid project # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom # the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # ============================================================================== # ============================================================================== # ----------------------------------------------------------- # ----------------------------------------------------------- # ----------------------------------------------------------- # ----------------------------------------------------------- # ----------------------------------------------------------- # ----------------------------------------------------------- # ============================================================================== | 2.197557 | 2 |
carson/endpoints.py | cyu/carson | 7 | 6622918 | <gh_stars>1-10
MAP_ATTR_TO_ENDPOINT = {
'STATUS': {
'TYPE': 'GET',
'URI': 'status',
'AUTH': False
},
'AUTHENTICATE': {
'TYPE': 'POST',
'URI': 'oauth/token',
'AUTH': False
},
'REVOKE_AUTH_TOKEN': {
'TYPE': 'POST',
'URI': 'oauth/revoke',
'AUTH': True
},
'PRODUCT_LIST': {
'TYPE': 'GET',
'URI': 'api/1/products',
'AUTH': True
},
'VEHICLE_LIST': {
'TYPE': 'GET',
'URI': 'api/1/vehicles',
'AUTH': True
},
'VEHICLE_SUMMARY': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}',
'AUTH': True
},
'VEHICLE_DATA_LEGACY': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/data',
'AUTH': True
},
'VEHICLE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/vehicle_data',
'AUTH': True
},
'VEHICLE_SERVICE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/service_data',
'AUTH': True
},
'NEARBY_CHARGING_SITES': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/nearby_charging_sites',
'AUTH': True
},
'WAKE_UP': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/wake_up',
'AUTH': True
},
'UNLOCK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/door_unlock',
'AUTH': True
},
'LOCK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/door_lock',
'AUTH': True
},
'HONK_HORN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/honk_horn',
'AUTH': True
},
'FLASH_LIGHTS': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/flash_lights',
'AUTH': True
},
'CLIMATE_ON': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/auto_conditioning_start',
'AUTH': True
},
'CLIMATE_OFF': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/auto_conditioning_stop',
'AUTH': True
},
'MAX_DEFROST': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_preconditioning_max',
'AUTH': True
},
'CHANGE_CLIMATE_TEMPERATURE_SETTING': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_temps',
'AUTH': True
},
'CHANGE_CHARGE_LIMIT': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_charge_limit',
'AUTH': True
},
'CHANGE_SUNROOF_STATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/sun_roof_control',
'AUTH': True
},
'WINDOW_CONTROL': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/window_control',
'AUTH': True
},
'ACTUATE_TRUNK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/actuate_trunk',
'AUTH': True
},
'REMOTE_START': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/remote_start_drive',
'AUTH': True
},
'TRIGGER_HOMELINK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/trigger_homelink',
'AUTH': True
},
'CHARGE_PORT_DOOR_OPEN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_port_door_open',
'AUTH': True
},
'CHARGE_PORT_DOOR_CLOSE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_port_door_close',
'AUTH': True
},
'START_CHARGE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_start',
'AUTH': True
},
'STOP_CHARGE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_stop',
'AUTH': True
},
'MEDIA_TOGGLE_PLAYBACK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_toggle_playback',
'AUTH': True
},
'MEDIA_NEXT_TRACK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_next_track',
'AUTH': True
},
'MEDIA_PREVIOUS_TRACK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_prev_track',
'AUTH': True
},
'MEDIA_NEXT_FAVORITE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_next_fav',
'AUTH': True
},
'MEDIA_PREVIOUS_FAVORITE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_prev_fav',
'AUTH': True
},
'MEDIA_VOLUME_UP': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_volume_up',
'AUTH': True
},
'MEDIA_VOLUME_DOWN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_volume_down',
'AUTH': True
},
'SEND_LOG': {
'TYPE': 'POST',
'URI': 'api/1/logs',
'AUTH': True
},
'SEND_REPORT': {
'TYPE': 'POST',
'URI': 'api/1/reports',
'AUTH': True
},
'RETRIEVE_NOTIFICATION_PREFERENCES': {
'TYPE': 'GET',
'URI': 'api/1/notification_preferences',
'AUTH': True
},
'SEND_NOTIFICATION_PREFERENCES': {
'TYPE': 'POST',
'URI': 'api/1/notification_preferences',
'AUTH': True
},
'RETRIEVE_NOTIFICATION_SUBSCRIPTION_PREFERENCES': {
'TYPE': 'GET',
'URI': 'api/1/vehicle_subscriptions',
'AUTH': True
},
'SEND_NOTIFICATION_SUBSCRIPTION_PREFERENCES': {
'TYPE': 'POST',
'URI': 'api/1/vehicle_subscriptions',
'AUTH': True
},
'DEACTIVATE_DEVICE_TOKEN': {
'TYPE': 'POST',
'URI': 'api/1/device/{device_token}/deactivate',
'AUTH': True
},
'CALENDAR_SYNC': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/upcoming_calendar_entries',
'AUTH': True
},
'SET_VALET_MODE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_valet_mode',
'AUTH': True
},
'RESET_VALET_PIN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/reset_valet_pin',
'AUTH': True
},
'SPEED_LIMIT_ACTIVATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_activate',
'AUTH': True
},
'SPEED_LIMIT_DEACTIVATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_deactivate',
'AUTH': True
},
'SPEED_LIMIT_SET_LIMIT': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_set_limit',
'AUTH': True
},
'SPEED_LIMIT_CLEAR_PIN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_clear_pin',
'AUTH': True
},
'SCHEDULE_SOFTWARE_UPDATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/schedule_software_update',
'AUTH': True
},
'CANCEL_SOFTWARE_UPDATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/cancel_software_update',
'AUTH': True
},
'SET_SENTRY_MODE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_sentry_mode',
'AUTH': True
},
'POWERWALL_ORDER_SESSION_DATA': {
'TYPE': 'GET',
'URI': 'api/1/users/powerwall_order_entry_data',
'AUTH': True
},
'POWERWALL_ORDER_PAGE': {
'TYPE': 'GET',
'URI': 'powerwall_order_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'ONBOARDING_EXPERIENCE': {
'TYPE': 'GET',
'URI': 'api/1/users/onboarding_data',
'AUTH': True
},
'ONBOARDING_EXPERIENCE_PAGE': {
'TYPE': 'GET',
'URI': 'onboarding_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'SERVICE_SELF_SCHEDULING_ELIGIBILITY': {
'TYPE': 'GET',
'URI': 'api/1/users/service_scheduling_data',
'AUTH': True
},
'SERVICE_SELF_SCHEDULING_PAGE': {
'TYPE': 'GET',
'URI': 'service_scheduling_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'REFERRAL_DATA': {
'TYPE': 'GET',
'URI': 'api/1/users/referral_data',
'AUTH': True
},
'REFERRAL_PAGE': {
'TYPE': 'GET',
'URI': 'referral_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'ROADSIDE_ASSISTANCE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/users/roadside_assistance_data',
'AUTH': True
},
'ROADSIDE_ASSISTANCE_PAGE': {
'TYPE': 'GET',
'URI': 'roadside_assistance_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'UPGRADE_ELIGIBILITY': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/eligible_upgrades',
'AUTH': True
},
'UPGRADE_URL': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/purchase_url',
'AUTH': True
},
'MESSAGE_CENTER_MESSAGE_LIST': {
'TYPE': 'GET',
'URI': 'api/1/messages',
'AUTH': True
},
'MESSAGE_CENTER_MESSAGE': {
'TYPE': 'GET',
'URI': 'api/1/messages/{message_id}',
'AUTH': True
},
'MESSAGE_CENTER_COUNTS': {
'TYPE': 'GET',
'URI': 'api/1/messages/count',
'AUTH': True
},
'MESSAGE_CENTER_MESSAGE_ACTION_UPDATE': {
'TYPE': 'POST',
'URI': 'api/1/messages/{message_id}/actions',
'AUTH': True
},
'MESSAGE_CENTER_CTA_PAGE': {
'TYPE': 'GET',
'URI': 'messages_cta_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'AUTH_COMMAND_TOKEN': {
'TYPE': 'POST',
'URI': 'api/1/users/command_token',
'AUTH': True
},
'SEND_DEVICE_KEY': {
'TYPE': 'POST',
'URI': 'api/1/users/keys',
'AUTH': True
},
'DIAGNOSTICS_ENTITLEMENTS': {
'TYPE': 'GET',
'URI': 'api/1/diagnostics',
'AUTH': True
},
'SEND_DIAGNOSTICS': {
'TYPE': 'POST',
'URI': 'api/1/diagnostics',
'AUTH': True
},
'BATTERY_SUMMARY': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}/status',
'AUTH': True
},
'BATTERY_DATA': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}',
'AUTH': True
},
'BATTERY_POWER_TIMESERIES_DATA': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}/powerhistory',
'AUTH': True
},
'BATTERY_ENERGY_TIMESERIES_DATA': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}/energyhistory',
'AUTH': True
},
'BATTERY_BACKUP_RESERVE': {
'TYPE': 'POST',
'URI': 'api/1/powerwalls/{battery_id}/backup',
'AUTH': True
},
'BATTERY_SITE_NAME': {
'TYPE': 'POST',
'URI': 'api/1/powerwalls/{battery_id}/site_name',
'AUTH': True
},
'BATTERY_OPERATION_MODE': {
'TYPE': 'POST',
'URI': 'api/1/powerwalls/{battery_id}/operation',
'AUTH': True
},
'SITE_SUMMARY': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/status',
'AUTH': True
},
'SITE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/live_status',
'AUTH': True
},
'SITE_CONFIG': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/site_info',
'AUTH': True
},
'HISTORY_DATA': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/history',
'AUTH': True
},
'CALENDAR_HISTORY_DATA': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/calendar_history',
'AUTH': True
},
'BACKUP_RESERVE': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/backup',
'AUTH': True
},
'OFF_GRID_VEHICLE_CHARGING_RESERVE': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/off_grid_vehicle_charging_reserve',
'AUTH': True
},
'SITE_NAME': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/site_name',
'AUTH': True
},
'OPERATION_MODE': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/operation',
'AUTH': True
},
'TIME_OF_USE_SETTINGS': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/time_of_use_settings',
'AUTH': True
},
'STORM_MODE_SETTINGS': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/storm_mode',
'AUTH': True
},
'SEND_NOTIFICATION_CONFIRMATION': {
'TYPE': 'POST',
'URI': 'api/1/notification_confirmations',
'AUTH': True
},
'SHARE_TO_VEHICLE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/share',
'AUTH': True
},
'REMOTE_SEAT_HEATER_REQUEST': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/remote_seat_heater_request',
'AUTH': True
},
'REMOTE_STEERING_WHEEL_HEATER_REQUEST': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/remote_steering_wheel_heater_request',
'AUTH': True
}
}
| MAP_ATTR_TO_ENDPOINT = {
'STATUS': {
'TYPE': 'GET',
'URI': 'status',
'AUTH': False
},
'AUTHENTICATE': {
'TYPE': 'POST',
'URI': 'oauth/token',
'AUTH': False
},
'REVOKE_AUTH_TOKEN': {
'TYPE': 'POST',
'URI': 'oauth/revoke',
'AUTH': True
},
'PRODUCT_LIST': {
'TYPE': 'GET',
'URI': 'api/1/products',
'AUTH': True
},
'VEHICLE_LIST': {
'TYPE': 'GET',
'URI': 'api/1/vehicles',
'AUTH': True
},
'VEHICLE_SUMMARY': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}',
'AUTH': True
},
'VEHICLE_DATA_LEGACY': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/data',
'AUTH': True
},
'VEHICLE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/vehicle_data',
'AUTH': True
},
'VEHICLE_SERVICE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/service_data',
'AUTH': True
},
'NEARBY_CHARGING_SITES': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/nearby_charging_sites',
'AUTH': True
},
'WAKE_UP': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/wake_up',
'AUTH': True
},
'UNLOCK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/door_unlock',
'AUTH': True
},
'LOCK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/door_lock',
'AUTH': True
},
'HONK_HORN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/honk_horn',
'AUTH': True
},
'FLASH_LIGHTS': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/flash_lights',
'AUTH': True
},
'CLIMATE_ON': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/auto_conditioning_start',
'AUTH': True
},
'CLIMATE_OFF': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/auto_conditioning_stop',
'AUTH': True
},
'MAX_DEFROST': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_preconditioning_max',
'AUTH': True
},
'CHANGE_CLIMATE_TEMPERATURE_SETTING': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_temps',
'AUTH': True
},
'CHANGE_CHARGE_LIMIT': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_charge_limit',
'AUTH': True
},
'CHANGE_SUNROOF_STATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/sun_roof_control',
'AUTH': True
},
'WINDOW_CONTROL': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/window_control',
'AUTH': True
},
'ACTUATE_TRUNK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/actuate_trunk',
'AUTH': True
},
'REMOTE_START': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/remote_start_drive',
'AUTH': True
},
'TRIGGER_HOMELINK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/trigger_homelink',
'AUTH': True
},
'CHARGE_PORT_DOOR_OPEN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_port_door_open',
'AUTH': True
},
'CHARGE_PORT_DOOR_CLOSE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_port_door_close',
'AUTH': True
},
'START_CHARGE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_start',
'AUTH': True
},
'STOP_CHARGE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/charge_stop',
'AUTH': True
},
'MEDIA_TOGGLE_PLAYBACK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_toggle_playback',
'AUTH': True
},
'MEDIA_NEXT_TRACK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_next_track',
'AUTH': True
},
'MEDIA_PREVIOUS_TRACK': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_prev_track',
'AUTH': True
},
'MEDIA_NEXT_FAVORITE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_next_fav',
'AUTH': True
},
'MEDIA_PREVIOUS_FAVORITE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_prev_fav',
'AUTH': True
},
'MEDIA_VOLUME_UP': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_volume_up',
'AUTH': True
},
'MEDIA_VOLUME_DOWN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/media_volume_down',
'AUTH': True
},
'SEND_LOG': {
'TYPE': 'POST',
'URI': 'api/1/logs',
'AUTH': True
},
'SEND_REPORT': {
'TYPE': 'POST',
'URI': 'api/1/reports',
'AUTH': True
},
'RETRIEVE_NOTIFICATION_PREFERENCES': {
'TYPE': 'GET',
'URI': 'api/1/notification_preferences',
'AUTH': True
},
'SEND_NOTIFICATION_PREFERENCES': {
'TYPE': 'POST',
'URI': 'api/1/notification_preferences',
'AUTH': True
},
'RETRIEVE_NOTIFICATION_SUBSCRIPTION_PREFERENCES': {
'TYPE': 'GET',
'URI': 'api/1/vehicle_subscriptions',
'AUTH': True
},
'SEND_NOTIFICATION_SUBSCRIPTION_PREFERENCES': {
'TYPE': 'POST',
'URI': 'api/1/vehicle_subscriptions',
'AUTH': True
},
'DEACTIVATE_DEVICE_TOKEN': {
'TYPE': 'POST',
'URI': 'api/1/device/{device_token}/deactivate',
'AUTH': True
},
'CALENDAR_SYNC': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/upcoming_calendar_entries',
'AUTH': True
},
'SET_VALET_MODE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_valet_mode',
'AUTH': True
},
'RESET_VALET_PIN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/reset_valet_pin',
'AUTH': True
},
'SPEED_LIMIT_ACTIVATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_activate',
'AUTH': True
},
'SPEED_LIMIT_DEACTIVATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_deactivate',
'AUTH': True
},
'SPEED_LIMIT_SET_LIMIT': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_set_limit',
'AUTH': True
},
'SPEED_LIMIT_CLEAR_PIN': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/speed_limit_clear_pin',
'AUTH': True
},
'SCHEDULE_SOFTWARE_UPDATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/schedule_software_update',
'AUTH': True
},
'CANCEL_SOFTWARE_UPDATE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/cancel_software_update',
'AUTH': True
},
'SET_SENTRY_MODE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/set_sentry_mode',
'AUTH': True
},
'POWERWALL_ORDER_SESSION_DATA': {
'TYPE': 'GET',
'URI': 'api/1/users/powerwall_order_entry_data',
'AUTH': True
},
'POWERWALL_ORDER_PAGE': {
'TYPE': 'GET',
'URI': 'powerwall_order_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'ONBOARDING_EXPERIENCE': {
'TYPE': 'GET',
'URI': 'api/1/users/onboarding_data',
'AUTH': True
},
'ONBOARDING_EXPERIENCE_PAGE': {
'TYPE': 'GET',
'URI': 'onboarding_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'SERVICE_SELF_SCHEDULING_ELIGIBILITY': {
'TYPE': 'GET',
'URI': 'api/1/users/service_scheduling_data',
'AUTH': True
},
'SERVICE_SELF_SCHEDULING_PAGE': {
'TYPE': 'GET',
'URI': 'service_scheduling_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'REFERRAL_DATA': {
'TYPE': 'GET',
'URI': 'api/1/users/referral_data',
'AUTH': True
},
'REFERRAL_PAGE': {
'TYPE': 'GET',
'URI': 'referral_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'ROADSIDE_ASSISTANCE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/users/roadside_assistance_data',
'AUTH': True
},
'ROADSIDE_ASSISTANCE_PAGE': {
'TYPE': 'GET',
'URI': 'roadside_assistance_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'UPGRADE_ELIGIBILITY': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/eligible_upgrades',
'AUTH': True
},
'UPGRADE_URL': {
'TYPE': 'GET',
'URI': 'api/1/vehicles/{vehicle_id}/purchase_url',
'AUTH': True
},
'MESSAGE_CENTER_MESSAGE_LIST': {
'TYPE': 'GET',
'URI': 'api/1/messages',
'AUTH': True
},
'MESSAGE_CENTER_MESSAGE': {
'TYPE': 'GET',
'URI': 'api/1/messages/{message_id}',
'AUTH': True
},
'MESSAGE_CENTER_COUNTS': {
'TYPE': 'GET',
'URI': 'api/1/messages/count',
'AUTH': True
},
'MESSAGE_CENTER_MESSAGE_ACTION_UPDATE': {
'TYPE': 'POST',
'URI': 'api/1/messages/{message_id}/actions',
'AUTH': True
},
'MESSAGE_CENTER_CTA_PAGE': {
'TYPE': 'GET',
'URI': 'messages_cta_page',
'AUTH': True,
'CONTENT': 'HTML'
},
'AUTH_COMMAND_TOKEN': {
'TYPE': 'POST',
'URI': 'api/1/users/command_token',
'AUTH': True
},
'SEND_DEVICE_KEY': {
'TYPE': 'POST',
'URI': 'api/1/users/keys',
'AUTH': True
},
'DIAGNOSTICS_ENTITLEMENTS': {
'TYPE': 'GET',
'URI': 'api/1/diagnostics',
'AUTH': True
},
'SEND_DIAGNOSTICS': {
'TYPE': 'POST',
'URI': 'api/1/diagnostics',
'AUTH': True
},
'BATTERY_SUMMARY': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}/status',
'AUTH': True
},
'BATTERY_DATA': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}',
'AUTH': True
},
'BATTERY_POWER_TIMESERIES_DATA': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}/powerhistory',
'AUTH': True
},
'BATTERY_ENERGY_TIMESERIES_DATA': {
'TYPE': 'GET',
'URI': 'api/1/powerwalls/{battery_id}/energyhistory',
'AUTH': True
},
'BATTERY_BACKUP_RESERVE': {
'TYPE': 'POST',
'URI': 'api/1/powerwalls/{battery_id}/backup',
'AUTH': True
},
'BATTERY_SITE_NAME': {
'TYPE': 'POST',
'URI': 'api/1/powerwalls/{battery_id}/site_name',
'AUTH': True
},
'BATTERY_OPERATION_MODE': {
'TYPE': 'POST',
'URI': 'api/1/powerwalls/{battery_id}/operation',
'AUTH': True
},
'SITE_SUMMARY': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/status',
'AUTH': True
},
'SITE_DATA': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/live_status',
'AUTH': True
},
'SITE_CONFIG': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/site_info',
'AUTH': True
},
'HISTORY_DATA': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/history',
'AUTH': True
},
'CALENDAR_HISTORY_DATA': {
'TYPE': 'GET',
'URI': 'api/1/energy_sites/{site_id}/calendar_history',
'AUTH': True
},
'BACKUP_RESERVE': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/backup',
'AUTH': True
},
'OFF_GRID_VEHICLE_CHARGING_RESERVE': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/off_grid_vehicle_charging_reserve',
'AUTH': True
},
'SITE_NAME': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/site_name',
'AUTH': True
},
'OPERATION_MODE': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/operation',
'AUTH': True
},
'TIME_OF_USE_SETTINGS': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/time_of_use_settings',
'AUTH': True
},
'STORM_MODE_SETTINGS': {
'TYPE': 'POST',
'URI': 'api/1/energy_sites/{site_id}/storm_mode',
'AUTH': True
},
'SEND_NOTIFICATION_CONFIRMATION': {
'TYPE': 'POST',
'URI': 'api/1/notification_confirmations',
'AUTH': True
},
'SHARE_TO_VEHICLE': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/share',
'AUTH': True
},
'REMOTE_SEAT_HEATER_REQUEST': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/remote_seat_heater_request',
'AUTH': True
},
'REMOTE_STEERING_WHEEL_HEATER_REQUEST': {
'TYPE': 'POST',
'URI': 'api/1/vehicles/{vehicle_id}/command/remote_steering_wheel_heater_request',
'AUTH': True
}
} | none | 1 | 1.520306 | 2 | |
tests/cc/test_array.py | lcp/bcc | 0 | 6622919 | #!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
from ctypes import c_int, c_ulonglong
import random
import time
from unittest import main, TestCase
class TestArray(TestCase):
def test_simple(self):
b = BPF(text="""BPF_TABLE("array", int, u64, table1, 128);""")
t1 = b["table1"]
t1[c_int(0)] = c_ulonglong(100)
t1[c_int(127)] = c_ulonglong(1000)
for i, v in t1.items():
if i.value == 0:
self.assertEqual(v.value, 100)
if i.value == 127:
self.assertEqual(v.value, 1000)
self.assertEqual(len(t1), 128)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
from ctypes import c_int, c_ulonglong
import random
import time
from unittest import main, TestCase
class TestArray(TestCase):
def test_simple(self):
b = BPF(text="""BPF_TABLE("array", int, u64, table1, 128);""")
t1 = b["table1"]
t1[c_int(0)] = c_ulonglong(100)
t1[c_int(127)] = c_ulonglong(1000)
for i, v in t1.items():
if i.value == 0:
self.assertEqual(v.value, 100)
if i.value == 127:
self.assertEqual(v.value, 1000)
self.assertEqual(len(t1), 128)
if __name__ == "__main__":
main()
| en | 0.562793 | #!/usr/bin/env python # Copyright (c) PLUMgrid, Inc. # Licensed under the Apache License, Version 2.0 (the "License") BPF_TABLE("array", int, u64, table1, 128); | 2.575787 | 3 |
catalog/bindings/gmd/multi_solid.py | NIVANorge/s-enda-playground | 0 | 6622920 | from dataclasses import dataclass
from bindings.gmd.multi_solid_type import MultiSolidType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class MultiSolid(MultiSolidType):
"""A gml:MultiSolid is defined by one or more gml:AbstractSolids.
The members of the geometric aggregate may be specified either using
the "standard" property (gml:solidMember) or the array property
(gml:solidMembers). It is also valid to use both the "standard" and
the array properties in the same collection.
"""
class Meta:
namespace = "http://www.opengis.net/gml"
| from dataclasses import dataclass
from bindings.gmd.multi_solid_type import MultiSolidType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class MultiSolid(MultiSolidType):
"""A gml:MultiSolid is defined by one or more gml:AbstractSolids.
The members of the geometric aggregate may be specified either using
the "standard" property (gml:solidMember) or the array property
(gml:solidMembers). It is also valid to use both the "standard" and
the array properties in the same collection.
"""
class Meta:
namespace = "http://www.opengis.net/gml"
| en | 0.775486 | A gml:MultiSolid is defined by one or more gml:AbstractSolids. The members of the geometric aggregate may be specified either using the "standard" property (gml:solidMember) or the array property (gml:solidMembers). It is also valid to use both the "standard" and the array properties in the same collection. | 2.64051 | 3 |
landmarkerio/template.py | patricksnape/landmarkerio-server | 0 | 6622921 | <reponame>patricksnape/landmarkerio-server<filename>landmarkerio/template.py
import itertools
from collections import namedtuple
import os.path as p
from pathlib import Path
from flask import safe_join
import abc
import yaml
import os
from landmarkerio import TEMPLATE_DINAME, FileExt
Group = namedtuple('Group', ['label', 'n', 'index'])
def parse_connectivity(index_lst, n):
index = []
for i in index_lst:
if ':' in i:
# User is providing a slice
start, end = (int(x) for x in i.split(':'))
index.extend([x, x+1] for x in xrange(start, end))
else:
# Just a standard pair of numbers
index.append([int(j) for j in i.split(' ')])
indexes = set(itertools.chain.from_iterable(index))
if len(index) > 0 and (min(indexes) < 0 or max(indexes) > n):
raise ValueError("invalid connectivity")
return index
def load_yaml_template(filepath, n_dims):
with open(filepath) as f:
data = yaml.load(f.read())
if 'groups' in data:
raw_groups = data['groups']
else:
raise KeyError(
"Missing 'groups' or 'template' key in yaml file %s"
% filepath)
groups = []
for index, group in enumerate(raw_groups):
label = group.get('label', index) # Allow simple ordered groups
n = group['points'] # Should raise KeyError by design if missing
connectivity = group.get('connectivity', [])
if isinstance(connectivity, list):
index = parse_connectivity(connectivity, n)
elif connectivity == 'cycle':
index = parse_connectivity(
['0:%d' % (n - 1), '%d 0' % (n - 1)], n)
else:
index = [] # Couldn't parse connectivity, safe default
groups.append(Group(label, n, index))
return build_json(groups, n_dims)
def parse_group(group):
# split on \n and strip left and right whitespace.
x = [l.strip() for l in group.split('\n')]
label, n_str = x[0].split(' ')
n = int(n_str)
index_str = x[1:]
if len(index_str) == 0:
return Group(label, n, [])
index = parse_connectivity(index_str, n)
return Group(label, n, index)
def group_to_json(group, n_dims):
group_json = {}
lms = [{'point': [None] * n_dims}] * group.n
group_json['landmarks'] = lms
group_json['connectivity'] = group.index
group_json['label'] = group.label
return group_json
def build_json(groups, n_dims):
n_points = sum(g.n for g in groups)
offset = 0
connectivity = []
labels = []
for g in groups:
connectivity += [[j + offset for j in i] for i in g.index]
labels.append({
'label': g.label,
'mask': list(range(offset, offset + g.n))
})
offset += g.n
lm_json = {
'labels': labels,
'landmarks': {
'connectivity': connectivity,
'points': [[None] * n_dims] * n_points
},
'version': 2,
}
return lm_json
def load_legacy_template(path, n_dims):
with open(path) as f:
ta = f.read().strip().split('\n\n')
groups = [parse_group(g) for g in ta]
return build_json(groups, n_dims)
def group_to_dict(g):
data = {'label': g.label, 'points': g.n}
if g.index:
data['connectivity'] = ['{} {}'.format(c[0], c[1]) for c in g.index]
return data
def convert_legacy_template(path):
with open(path) as f:
ta = f.read().strip().split('\n\n')
groups = [parse_group(g) for g in ta]
data = {'groups': [group_to_dict(g) for g in groups]}
new_path = path[:-3] + 'yml'
warning = ''
if p.isfile(new_path):
new_path = path[:-4] + '-converted.yml'
warning = '(appended -converted to avoid collision)'
with open(new_path, 'w') as nf:
yaml.dump(data, nf, indent=4, default_flow_style=False)
os.remove(path)
print " - {} > {} {}".format(path, new_path, warning)
def load_template(path, n_dims):
return load_yaml_template(path, n_dims)
class TemplateAdapter(object):
r"""
Abstract definition of an adapter that can be passed to app_for_adapter in
order to generate a legal Flask implementation of landmarker.io's REST API
for Template retrieval.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def template_ids(self):
pass
@abc.abstractmethod
def load_template(self, lm_id):
pass
class FileTemplateAdapter(TemplateAdapter):
def __init__(self, n_dims, template_dir=None):
self.n_dims = n_dims
if template_dir is None:
# try the user folder
user_templates = p.expanduser(p.join('~', TEMPLATE_DINAME))
if p.isdir(user_templates):
template_dir = user_templates
else:
raise ValueError("No template dir provided and "
"{} doesn't exist".format(user_templates))
self.template_dir = Path(p.abspath(p.expanduser(template_dir)))
print ('templates: {}'.format(self.template_dir))
def handle_old_templates(self, upgrade_templates=False):
old_ids = [t.stem for t
in self.template_dir.glob('*' + FileExt.old_template)]
if len(old_ids) > 0 and upgrade_templates:
print "Converting {} old style templates".format(len(old_ids))
for lm_id in old_ids:
fp = safe_join(str(self.template_dir),
lm_id + FileExt.old_template)
convert_legacy_template(fp)
elif len(old_ids) > 0:
print((
"\nWARNING: ignored {} old style '.txt' templates in '{}' " +
"({}).\n" +
"See https://github.com/menpo/landmarkerio-server#templates " +
"more information. You can restart with the " +
"'--upgrade-templates' flag to convert them automatically " +
"(one time operation)\n"
).format(
len(old_ids),
self.template_dir,
", ".join(['{}.txt'.format(t) for t in old_ids]))
)
def template_ids(self):
return [t.stem for t in self.template_paths()]
def template_paths(self):
return self.template_dir.glob('*' + FileExt.template)
def load_template(self, lm_id):
fp = safe_join(str(self.template_dir), lm_id + FileExt.template)
return load_template(fp, self.n_dims)
class CachedFileTemplateAdapter(FileTemplateAdapter):
def __init__(self, n_dims, template_dir=None, upgrade_templates=False):
super(CachedFileTemplateAdapter, self).__init__(
n_dims,
template_dir=template_dir
)
# Handle those before generating cache as we want to load them if
# upgrade_templates is True
FileTemplateAdapter.handle_old_templates(
self, upgrade_templates=upgrade_templates)
self._cache = {lm_id: FileTemplateAdapter.load_template(self, lm_id)
for lm_id in FileTemplateAdapter.template_ids(self)}
print('cached {} templates ({})'.format(
len(self._cache), ', '.join(self._cache.keys())))
def load_template(self, lm_id):
return self._cache[lm_id]
| import itertools
from collections import namedtuple
import os.path as p
from pathlib import Path
from flask import safe_join
import abc
import yaml
import os
from landmarkerio import TEMPLATE_DINAME, FileExt
Group = namedtuple('Group', ['label', 'n', 'index'])
def parse_connectivity(index_lst, n):
index = []
for i in index_lst:
if ':' in i:
# User is providing a slice
start, end = (int(x) for x in i.split(':'))
index.extend([x, x+1] for x in xrange(start, end))
else:
# Just a standard pair of numbers
index.append([int(j) for j in i.split(' ')])
indexes = set(itertools.chain.from_iterable(index))
if len(index) > 0 and (min(indexes) < 0 or max(indexes) > n):
raise ValueError("invalid connectivity")
return index
def load_yaml_template(filepath, n_dims):
with open(filepath) as f:
data = yaml.load(f.read())
if 'groups' in data:
raw_groups = data['groups']
else:
raise KeyError(
"Missing 'groups' or 'template' key in yaml file %s"
% filepath)
groups = []
for index, group in enumerate(raw_groups):
label = group.get('label', index) # Allow simple ordered groups
n = group['points'] # Should raise KeyError by design if missing
connectivity = group.get('connectivity', [])
if isinstance(connectivity, list):
index = parse_connectivity(connectivity, n)
elif connectivity == 'cycle':
index = parse_connectivity(
['0:%d' % (n - 1), '%d 0' % (n - 1)], n)
else:
index = [] # Couldn't parse connectivity, safe default
groups.append(Group(label, n, index))
return build_json(groups, n_dims)
def parse_group(group):
# split on \n and strip left and right whitespace.
x = [l.strip() for l in group.split('\n')]
label, n_str = x[0].split(' ')
n = int(n_str)
index_str = x[1:]
if len(index_str) == 0:
return Group(label, n, [])
index = parse_connectivity(index_str, n)
return Group(label, n, index)
def group_to_json(group, n_dims):
group_json = {}
lms = [{'point': [None] * n_dims}] * group.n
group_json['landmarks'] = lms
group_json['connectivity'] = group.index
group_json['label'] = group.label
return group_json
def build_json(groups, n_dims):
n_points = sum(g.n for g in groups)
offset = 0
connectivity = []
labels = []
for g in groups:
connectivity += [[j + offset for j in i] for i in g.index]
labels.append({
'label': g.label,
'mask': list(range(offset, offset + g.n))
})
offset += g.n
lm_json = {
'labels': labels,
'landmarks': {
'connectivity': connectivity,
'points': [[None] * n_dims] * n_points
},
'version': 2,
}
return lm_json
def load_legacy_template(path, n_dims):
with open(path) as f:
ta = f.read().strip().split('\n\n')
groups = [parse_group(g) for g in ta]
return build_json(groups, n_dims)
def group_to_dict(g):
data = {'label': g.label, 'points': g.n}
if g.index:
data['connectivity'] = ['{} {}'.format(c[0], c[1]) for c in g.index]
return data
def convert_legacy_template(path):
with open(path) as f:
ta = f.read().strip().split('\n\n')
groups = [parse_group(g) for g in ta]
data = {'groups': [group_to_dict(g) for g in groups]}
new_path = path[:-3] + 'yml'
warning = ''
if p.isfile(new_path):
new_path = path[:-4] + '-converted.yml'
warning = '(appended -converted to avoid collision)'
with open(new_path, 'w') as nf:
yaml.dump(data, nf, indent=4, default_flow_style=False)
os.remove(path)
print " - {} > {} {}".format(path, new_path, warning)
def load_template(path, n_dims):
return load_yaml_template(path, n_dims)
class TemplateAdapter(object):
r"""
Abstract definition of an adapter that can be passed to app_for_adapter in
order to generate a legal Flask implementation of landmarker.io's REST API
for Template retrieval.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def template_ids(self):
pass
@abc.abstractmethod
def load_template(self, lm_id):
pass
class FileTemplateAdapter(TemplateAdapter):
def __init__(self, n_dims, template_dir=None):
self.n_dims = n_dims
if template_dir is None:
# try the user folder
user_templates = p.expanduser(p.join('~', TEMPLATE_DINAME))
if p.isdir(user_templates):
template_dir = user_templates
else:
raise ValueError("No template dir provided and "
"{} doesn't exist".format(user_templates))
self.template_dir = Path(p.abspath(p.expanduser(template_dir)))
print ('templates: {}'.format(self.template_dir))
def handle_old_templates(self, upgrade_templates=False):
old_ids = [t.stem for t
in self.template_dir.glob('*' + FileExt.old_template)]
if len(old_ids) > 0 and upgrade_templates:
print "Converting {} old style templates".format(len(old_ids))
for lm_id in old_ids:
fp = safe_join(str(self.template_dir),
lm_id + FileExt.old_template)
convert_legacy_template(fp)
elif len(old_ids) > 0:
print((
"\nWARNING: ignored {} old style '.txt' templates in '{}' " +
"({}).\n" +
"See https://github.com/menpo/landmarkerio-server#templates " +
"more information. You can restart with the " +
"'--upgrade-templates' flag to convert them automatically " +
"(one time operation)\n"
).format(
len(old_ids),
self.template_dir,
", ".join(['{}.txt'.format(t) for t in old_ids]))
)
def template_ids(self):
return [t.stem for t in self.template_paths()]
def template_paths(self):
return self.template_dir.glob('*' + FileExt.template)
def load_template(self, lm_id):
fp = safe_join(str(self.template_dir), lm_id + FileExt.template)
return load_template(fp, self.n_dims)
class CachedFileTemplateAdapter(FileTemplateAdapter):
def __init__(self, n_dims, template_dir=None, upgrade_templates=False):
super(CachedFileTemplateAdapter, self).__init__(
n_dims,
template_dir=template_dir
)
# Handle those before generating cache as we want to load them if
# upgrade_templates is True
FileTemplateAdapter.handle_old_templates(
self, upgrade_templates=upgrade_templates)
self._cache = {lm_id: FileTemplateAdapter.load_template(self, lm_id)
for lm_id in FileTemplateAdapter.template_ids(self)}
print('cached {} templates ({})'.format(
len(self._cache), ', '.join(self._cache.keys())))
def load_template(self, lm_id):
return self._cache[lm_id] | en | 0.825273 | # User is providing a slice # Just a standard pair of numbers # Allow simple ordered groups # Should raise KeyError by design if missing # Couldn't parse connectivity, safe default # split on \n and strip left and right whitespace. Abstract definition of an adapter that can be passed to app_for_adapter in order to generate a legal Flask implementation of landmarker.io's REST API for Template retrieval. # try the user folder #templates " + # Handle those before generating cache as we want to load them if # upgrade_templates is True | 2.617095 | 3 |
tests/_test_requests.py | Zadigo/Zah | 0 | 6622922 | from zah.router import Router
from zah.server import BaseServer
from zah.urls import render, render_page
from werkzeug.test import create_environ
from zah.decorators._http import only_GET
from werkzeug.wrappers import Request
from zah.decorators._cache import cache_control, never_cache
environ = create_environ('/home', 'http://127.0.0.1:5000')
request = Request(environ)
server = BaseServer()
server.use_component(Router)
# 1. Using render page
# server.add_route('/home', render_page('home.html'))
# 2. Using decorator
# @server.as_route('/contact')
# def contact(request, **kwargs):
# return render(request, 'home.html')
# 3. Using view function
# @only_SAFE
# def home(request, *args, **kwargs):
# return render(request, 'home.html')
# server.add_route('/home', home)
# print(request.headers)
# response = server._dispatch_request(request)
# try:
# print(response)
# except:
# print(response, '/ has no headers')
# @cache_control(max_age=200, private=True)
# def home(request, **kwargs):
# return render(request, 'home.html')
def home(request, **kwargs):
return render(request, 'home.html')
response = home(request)
print(response.headers)
| from zah.router import Router
from zah.server import BaseServer
from zah.urls import render, render_page
from werkzeug.test import create_environ
from zah.decorators._http import only_GET
from werkzeug.wrappers import Request
from zah.decorators._cache import cache_control, never_cache
environ = create_environ('/home', 'http://127.0.0.1:5000')
request = Request(environ)
server = BaseServer()
server.use_component(Router)
# 1. Using render page
# server.add_route('/home', render_page('home.html'))
# 2. Using decorator
# @server.as_route('/contact')
# def contact(request, **kwargs):
# return render(request, 'home.html')
# 3. Using view function
# @only_SAFE
# def home(request, *args, **kwargs):
# return render(request, 'home.html')
# server.add_route('/home', home)
# print(request.headers)
# response = server._dispatch_request(request)
# try:
# print(response)
# except:
# print(response, '/ has no headers')
# @cache_control(max_age=200, private=True)
# def home(request, **kwargs):
# return render(request, 'home.html')
def home(request, **kwargs):
return render(request, 'home.html')
response = home(request)
print(response.headers)
| en | 0.408306 | # 1. Using render page # server.add_route('/home', render_page('home.html')) # 2. Using decorator # @server.as_route('/contact') # def contact(request, **kwargs): # return render(request, 'home.html') # 3. Using view function # @only_SAFE # def home(request, *args, **kwargs): # return render(request, 'home.html') # server.add_route('/home', home) # print(request.headers) # response = server._dispatch_request(request) # try: # print(response) # except: # print(response, '/ has no headers') # @cache_control(max_age=200, private=True) # def home(request, **kwargs): # return render(request, 'home.html') | 2.113616 | 2 |
exercicios/038.py | daldantas/Curso-Python | 0 | 6622923 | <reponame>daldantas/Curso-Python
n1 = int(input("Digite um nº: "))
n2 = int(input("Digite outro nº: "))
if n1 > n2:
print(n1, "é maior que", n2)
elif n2 > n1:
print(n2, "é maior que", n1)
else:
print(n1, "e", n2, "São iguais") | n1 = int(input("Digite um nº: "))
n2 = int(input("Digite outro nº: "))
if n1 > n2:
print(n1, "é maior que", n2)
elif n2 > n1:
print(n2, "é maior que", n1)
else:
print(n1, "e", n2, "São iguais") | none | 1 | 3.90765 | 4 | |
Ex_Files_Python_Standard_Library_EssT/Exercise Files/Chapter 1/01_05/mathMyExample.py | steveayers124/PythonStandardLibraryEssentialTraining | 1 | 6622924 | <reponame>steveayers124/PythonStandardLibraryEssentialTraining
import math
# https://docs.python.org/3/library/math.html
print("ceiling of")
f1 = 4.0
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
f1 = 4.1
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
f1 = 4.5
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
f1 = 4.9
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
i1 = 3
print(f" math.ceil({i1}) = |{math.ceil(i1)}|")
print("copysign of")
a=1.0
b=-1.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=-1.0
b=1.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=-1.0
b=-1.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=1.0
b=0.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=1.0
b=-0.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
# According to the documentation, platforms supporting signed zeros yield |-1.0|.
print("math.factorial(x)")
i=1
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=2
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=3
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=4
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=5
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=6
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=4
j=3
print(f" (math.factorial({i}) / math.factorial({j})) = |{(math.factorial(i) / math.factorial(j))}|")
i=5
j=2
print(f" (math.factorial({i}) / math.factorial({j})) = |{(math.factorial(i) / math.factorial(j))}|")
i=6
j=5
k=3
print(f" ((math.factorial({i}) - math.factorial({j}) ) / math.factorial({k})) = |{((math.factorial(i) - math.factorial(j) ) / math.factorial(k))}|")
print("\n")
print("floor of")
f1 = 4.0
print(f" math.floor({f1}) = |{math.floor(f1)}|")
f1 = 4.1
print(f" math.floor({f1}) = |{math.floor(f1)}|")
f1 = 4.5
print(f" math.floor({f1}) = |{math.floor(f1)}|")
f1 = 4.9
print(f" math.floor({f1}) = |{math.floor(f1)}|")
i1 = 3
print(f" math.floor({i1}) = |{math.floor(i1)}|")
print("\n")
print("math.fmod(x, y)")
i=10.0
j=3.0
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
i=10
j=3
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
i=10.0
j=5.0
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
i=13.0
j=7.0
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
print("\n")
print("x % y")
i=10
j=3
print(f" {i} % {j} = |{i % j}|")
i=10.0
j=3.0
print(f" {i} % {j} = |{i % j}|")
i=10
j=5
print(f" {i} % {j} = |{i % j}|")
i=13
j=7
print(f" {i} % {j} = |{i % j}|")
# even though % may have identical result in many cases,
# math.fmod(a, b) is preferred for floats, and
# a % b is preferred for integers.
# [see https://docs.python.org/3/library/math.html :: math.fmod(x, y)]
print("\n")
print("math.frexp(x)")
# Okay, this is lower level stuff... given a float, express its internal representation:
# First its multiplier, the mantissa, and second its exponent which the base, 2, is raised to.
# This is used to “pick apart” the internal representation of a float in a portable way.
f = 10.24
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(10.24) = |(0.64, 4)|
f = 1024.0
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(1024.0) = |(0.5, 11)|
print(f" 0.5 * 2**11 = |0.5 * {2**11}|") # 0.5 * 2**11 = |0.5 * 2048|
print(f" 0.5 * 2**11 = |{0.5 * 2**11}|") # 0.5 * 2**11 = |1024.0|
f = 102.4
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(102.4) = |(0.8, 7)|
print(f" 0.8 * 2**7 = |0.8 * {2**7}|") # 0.8 * 2**7 = |0.8 * 128|
print(f" 0.8 * 2**7 = |{0.8 * 2**7}|") # 0.8 * 2**7 = |102.4|
f = 3.1415
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(3.1415) = |(0.785375, 2)|
print(f" 0.785375 * 2**2 = |0.785375 * {2**2}|") # 0.785375 * 2**2 = |0.785375 * 4|
print(f" 0.785375 * 2**2 = |{0.785375 * 2**2}|") # 0.785375 * 2**2 = |3.1415|
# print("\n")
| import math
# https://docs.python.org/3/library/math.html
print("ceiling of")
f1 = 4.0
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
f1 = 4.1
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
f1 = 4.5
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
f1 = 4.9
print(f" math.ceil({f1}) = |{math.ceil(f1)}|")
i1 = 3
print(f" math.ceil({i1}) = |{math.ceil(i1)}|")
print("copysign of")
a=1.0
b=-1.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=-1.0
b=1.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=-1.0
b=-1.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=1.0
b=0.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
a=1.0
b=-0.0
print(f" copysign({a}, {b}) = |{math.copysign(a, b)}|")
# According to the documentation, platforms supporting signed zeros yield |-1.0|.
print("math.factorial(x)")
i=1
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=2
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=3
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=4
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=5
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=6
print(f" math.factorial({i}) = |{math.factorial(i)}|")
i=4
j=3
print(f" (math.factorial({i}) / math.factorial({j})) = |{(math.factorial(i) / math.factorial(j))}|")
i=5
j=2
print(f" (math.factorial({i}) / math.factorial({j})) = |{(math.factorial(i) / math.factorial(j))}|")
i=6
j=5
k=3
print(f" ((math.factorial({i}) - math.factorial({j}) ) / math.factorial({k})) = |{((math.factorial(i) - math.factorial(j) ) / math.factorial(k))}|")
print("\n")
print("floor of")
f1 = 4.0
print(f" math.floor({f1}) = |{math.floor(f1)}|")
f1 = 4.1
print(f" math.floor({f1}) = |{math.floor(f1)}|")
f1 = 4.5
print(f" math.floor({f1}) = |{math.floor(f1)}|")
f1 = 4.9
print(f" math.floor({f1}) = |{math.floor(f1)}|")
i1 = 3
print(f" math.floor({i1}) = |{math.floor(i1)}|")
print("\n")
print("math.fmod(x, y)")
i=10.0
j=3.0
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
i=10
j=3
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
i=10.0
j=5.0
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
i=13.0
j=7.0
print(f" math.fmod({i}, {j}) = |{math.fmod(i, j)}|")
print("\n")
print("x % y")
i=10
j=3
print(f" {i} % {j} = |{i % j}|")
i=10.0
j=3.0
print(f" {i} % {j} = |{i % j}|")
i=10
j=5
print(f" {i} % {j} = |{i % j}|")
i=13
j=7
print(f" {i} % {j} = |{i % j}|")
# even though % may have identical result in many cases,
# math.fmod(a, b) is preferred for floats, and
# a % b is preferred for integers.
# [see https://docs.python.org/3/library/math.html :: math.fmod(x, y)]
print("\n")
print("math.frexp(x)")
# Okay, this is lower level stuff... given a float, express its internal representation:
# First its multiplier, the mantissa, and second its exponent which the base, 2, is raised to.
# This is used to “pick apart” the internal representation of a float in a portable way.
f = 10.24
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(10.24) = |(0.64, 4)|
f = 1024.0
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(1024.0) = |(0.5, 11)|
print(f" 0.5 * 2**11 = |0.5 * {2**11}|") # 0.5 * 2**11 = |0.5 * 2048|
print(f" 0.5 * 2**11 = |{0.5 * 2**11}|") # 0.5 * 2**11 = |1024.0|
f = 102.4
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(102.4) = |(0.8, 7)|
print(f" 0.8 * 2**7 = |0.8 * {2**7}|") # 0.8 * 2**7 = |0.8 * 128|
print(f" 0.8 * 2**7 = |{0.8 * 2**7}|") # 0.8 * 2**7 = |102.4|
f = 3.1415
print(f" math.frexp({f}) = |{math.frexp(f)}|") # math.frexp(3.1415) = |(0.785375, 2)|
print(f" 0.785375 * 2**2 = |0.785375 * {2**2}|") # 0.785375 * 2**2 = |0.785375 * 4|
print(f" 0.785375 * 2**2 = |{0.785375 * 2**2}|") # 0.785375 * 2**2 = |3.1415|
# print("\n") | en | 0.734199 | # https://docs.python.org/3/library/math.html # According to the documentation, platforms supporting signed zeros yield |-1.0|. # even though % may have identical result in many cases, # math.fmod(a, b) is preferred for floats, and # a % b is preferred for integers. # [see https://docs.python.org/3/library/math.html :: math.fmod(x, y)] # Okay, this is lower level stuff... given a float, express its internal representation: # First its multiplier, the mantissa, and second its exponent which the base, 2, is raised to. # This is used to “pick apart” the internal representation of a float in a portable way. # math.frexp(10.24) = |(0.64, 4)| # math.frexp(1024.0) = |(0.5, 11)| # 0.5 * 2**11 = |0.5 * 2048| # 0.5 * 2**11 = |1024.0| # math.frexp(102.4) = |(0.8, 7)| # 0.8 * 2**7 = |0.8 * 128| # 0.8 * 2**7 = |102.4| # math.frexp(3.1415) = |(0.785375, 2)| # 0.785375 * 2**2 = |0.785375 * 4| # 0.785375 * 2**2 = |3.1415| # print("\n") | 3.519577 | 4 |
gait_analysis/gait_analysis/__init__.py | gait-analyzer/.github | 1 | 6622925 | from .subject import Subject
from .algorithms import *
| from .subject import Subject
from .algorithms import *
| none | 1 | 0.973302 | 1 | |
dock_r/main.py | aaronsteers/quickdock | 0 | 6622926 | """dock_r module, helps with dockerization."""
import datetime
import hashlib
import json
import time
import os
import docker
from logless import get_logger, logged, logged_block
import runnow
import uio
MAX_ECS_WAIT = 12 * 60 * 60 # max 12 hours wait
docker_client = docker.from_env()
logging = get_logger("dock-r")
def build(dockerfile_path, tag_as, addl_args=None):
""" Build an image. 'tag_as' can be a string or list of strings """
folder_path = os.path.dirname(dockerfile_path)
addl_args = addl_args or ""
tag_as = _to_list(tag_as)
if tag_as:
tags = " ".join([f"-t {t}" for t in tag_as])
cmd = f"docker build {addl_args} {tags} {folder_path} -f {dockerfile_path}"
else:
cmd = f"docker build {addl_args} {folder_path} -f {dockerfile_path}"
runnow.run(cmd)
def _to_list(str_or_list):
if str_or_list is None:
return []
if isinstance(str_or_list, str):
return str_or_list.split(",")
return str_or_list
def tag(image_name: str, tag_as):
"""Tag an image. 'tag_as' can be a string or list of strings."""
tag_as = _to_list(tag_as)
for tag in tag_as:
runnow.run(f"docker tag {image_name} {tag}")
@logged("pushing image '{image_name}'")
def push(image_name):
# docker_client.images.push(image_name)
cmd = f"docker push {image_name}"
runnow.run(cmd)
def smart_split(dockerfile_path: str, tag_as, addl_args=None):
tag_as = _to_list(tag_as)
if tag_as:
interim_image_name = tag_as[0].split(":")[0]
else:
interim_image_name = "untitled_image"
(image_core, dockerfile_core), (image_derived, dockerfile_derived) = _smart_split(
dockerfile_path, interim_image_name, addl_args=addl_args
)
dockerfile_path_core = os.path.realpath(f"{dockerfile_path}.core")
dockerfile_path_derived = os.path.realpath(f"{dockerfile_path}.quick")
uio.create_text_file(filepath=dockerfile_path_core, contents=dockerfile_core)
if dockerfile_derived:
uio.create_text_file(filepath=dockerfile_path_derived, contents=dockerfile_derived)
else:
uio.delete_file(dockerfile_path_derived, ignore_missing=True)
dockerfile_path_derived = None
return image_core, dockerfile_path_core, image_derived, dockerfile_path_derived
@logged("smartly building '{dockerfile_path}' as {tag_as or '(none)'}")
def smart_build(
dockerfile_path: str,
tag_as=None,
push_core=True,
push_final=False,
with_login=False,
addl_args=None,
ignore_caches=False,
):
"""
Builds the dockerfile if needed but pulls it from the remote if possible.
"""
if bool(with_login):
login()
tag_as = _to_list(tag_as)
result = smart_split(dockerfile_path, tag_as, addl_args=addl_args)
image_core, dockerfile_path_core, image_derived, dockerfile_path_derived = result
if not ignore_caches:
if dockerfile_path_derived is None and exists_remotely(image_core):
logging.info(
"Image with matching hash already exists "
"and no host files are referenced in Dockerfile."
f"Attempting to retag existing image '{image_core}' as '{tag_as}'..."
)
remote_retag(
image_name=image_core.split(":")[0],
existing_tag=image_core.split(":")[1],
tag_as=tag_as,
)
return
pull(image_core, skip_if_exists=True, silent=True)
if ignore_caches or not exists_locally(image_core):
with logged_block(f"building interim (core) image as '{image_core}'"):
build(dockerfile_path_core, image_core, addl_args=addl_args)
if push_core:
if ignore_caches or not exists_remotely(image_core):
with logged_block(f"pushing interim (core) image '{image_derived}'"):
push(image_core)
else:
logging.info(f"Already exists. Skipping push of image '{image_derived}'")
with logged_block(f"building '{dockerfile_path_derived}' as '{image_derived}'"):
if dockerfile_path_derived:
build(dockerfile_path_derived, image_derived, addl_args=addl_args)
else:
tag(image_core, image_derived)
if tag_as:
tag(image_derived, tag_as)
if push_final:
for image_name in tag_as:
push(image_name)
@logged("pulling image {image_name}")
def pull(image_name, skip_if_exists=False, silent=False):
if skip_if_exists and exists_locally(image_name):
logging.info(f"Skipping image pull. Already exists locally: {image_name}")
return image_name
try:
runnow.run(f"docker pull {image_name}", raise_error=True)
except Exception as ex:
logging.info(f"Failed to pull image: {image_name}\n{ex}")
if silent:
return False
raise ex
if not exists_locally(image_name):
logging.warning("Pull was successful in API but could not be confirmed")
return image_name
def exists_locally(image_name):
try:
_ = docker_client.images.get(image_name)
return True
except docker.errors.ImageNotFound:
return False
def exists_remotely(image_name):
try:
image = docker_client.images.get_registry_data(image_name)
if image:
return True
return False
except docker.errors.ImageNotFound:
return False
except Exception as ex:
logging.exception(
f"Failure when checking if image exists remotely '{image_name}'. {ex}"
)
return None
def _smart_split(dockerfile_path, image_name, addl_args=None):
"""
Returns list of tuples: [
(partial_image_name, partial_dockerfile_text)
(derived_image_name, derived_dockerfile_text)
]
Create two dockerfiles from a single file.
1. The first 'core' image will contain all statements until the first COPY or ADD.
2. The second 'derived' image will pull from 'core' and complete the build using
local files or artifacts required by ADD or COPY commands.
"""
orig_text = uio.get_text_file_contents(dockerfile_path)
addl_args = addl_args or ""
core_dockerfile = ""
derived_dockerfile = ""
requires_context = False # Whether we need file context to determine output
for line in orig_text.split("\n"):
if any([line.startswith("COPY"), line.startswith("ADD")]):
requires_context = True
if not requires_context:
core_dockerfile += line + "\n"
else:
derived_dockerfile += line + "\n"
core_md5 = hashlib.md5((addl_args + core_dockerfile).encode("utf-8")).hexdigest()
full_md5 = hashlib.md5((addl_args + orig_text).encode("utf-8")).hexdigest()
core_image_name = f"{image_name}:core-md5-{core_md5}"
derived_image_name = f"{image_name}:md5-{full_md5}"
core_dockerfile = (
f"# NO NOT EDIT - file is generated automatically from `Dockerfile`\n\n"
f"# Dockerfile.core - will be created and pushed as:\n"
f"# \t{core_image_name}\n\n{core_dockerfile}"
)
if derived_dockerfile:
derived_dockerfile = (
f"# NO NOT EDIT - file is generated automatically from `Dockerfile`\n\n"
f"FROM {core_image_name}\n\n{derived_dockerfile}"
)
else:
derived_dockerfile = None # No additional work to do.
return [(core_image_name, core_dockerfile), (derived_image_name, derived_dockerfile)]
def ecs_login(region):
logging.info("Logging into ECS...")
try:
_, ecs_login_cmd = runnow.run(
f"aws ecr get-login --region {region} --no-include-email", echo=False
)
_, _ = runnow.run(ecs_login_cmd, hide=True)
except Exception as ex:
raise RuntimeError(f"ECS login failed. {ex}")
def login(raise_error=False):
usr = os.environ.get("DOCKER_USERNAME", "")
pwd = <PASSWORD>("DOCKER_PASSWORD", "")
registry = os.environ.get("DOCKER_REGISTRY", "") or "index.docker.io"
if not (usr and pwd):
error_msg = (
"Could not login to docker registry."
"Missing env variable DOCKER_USERNAME or DOCKER_PASSWORD"
)
if raise_error:
raise RuntimeError(error_msg)
logging.warning(error_msg)
return False
logging.info(f"Logging into docker registry '{registry}' as user '{usr}'...")
try:
runnow.run(
f"docker login {registry} --username {usr} --password {pwd}", hide=True
)
if registry == "index.docker.io":
runnow.run(f"docker login --username {usr} --password {pwd}", hide=True)
except Exception as ex:
if raise_error:
raise RuntimeError(f"Docker login failed. {ex}")
logging.warning(f"Docker login failed. {ex}")
return True
@logged("applying tag '{tag_as}' to remote ECS image '{image_name}:{existing_tag}'")
def ecs_retag(image_name, existing_tag, tag_as):
tag_as = _to_list(tag_as)
if "amazonaws.com/" in image_name:
image_name = image_name.split("amazonaws.com/")[1]
get_manifest_cmd = (
f"aws ecr batch-get-image"
f" --repository-name {image_name} --image-ids imageTag={existing_tag}"
f" --query 'images[].imageManifest' --output text"
)
_, manifest = runnow.run(get_manifest_cmd, echo=False)
for new_tag in tag_as:
if "amazonaws.com/" in new_tag:
new_tag = new_tag.split("amazonaws.com/")[1]
if ":" in new_tag:
if image_name != new_tag.split(":")[0]:
raise RuntimeError(
f"Image names do not match: '{image_name}', '{new_tag.split(':')[0]}'"
)
new_tag = new_tag.split(":")[1]
put_image_cmd = [
"aws",
"ecr",
"put-image",
"--repository-name",
image_name,
"--image-tag",
new_tag,
"--image-manifest",
manifest,
]
return_code, output_text = runnow.run(
put_image_cmd, shell=False, echo=False, hide=True, raise_error=False
)
if return_code != 0 and "ImageAlreadyExistsException" in output_text:
logging.info("Image already exists. No tagging changes were made.")
elif return_code != 0:
raise RuntimeError(f"Could not retag the specified image.\n{output_text}")
@logged("applying tag '{tag_as}' to remote image '{image_name}:{existing_tag}'")
def remote_retag(image_name, existing_tag, tag_as, with_login=False):
tag_as = _to_list(tag_as)
if bool(with_login):
login()
if "amazonaws.com/" in image_name:
ecs_retag(image_name, existing_tag, tag_as)
return
existing_fullname = f"{image_name}:{existing_tag}"
pull(existing_fullname)
for new_tag in tag_as:
if ":" in new_tag:
new_fullname = new_tag
else:
new_fullname = f"{image_name}:{new_tag}"
tag(existing_fullname, new_fullname)
push(new_fullname)
def ecs_submit(
task_name: str,
cluster: str,
region: str,
container_name: str = None,
cmd_override: dict = None,
env_overrides: dict = None,
use_fargate: str = False,
wait_for_start=True,
wait_for_stop=False,
max_wait=None,
yyyymmdd=None,
):
cmd = (
f"aws ecs run-task"
f" --task-definition {task_name}"
f" --cluster {cluster}"
f" --region {region}"
)
if use_fargate:
cmd += f" --launch-type FARGATE"
else:
cmd += f" --launch-type EC2"
if env_overrides and isinstance(env_overrides, str):
env_overrides = {
x.split("=")[0]: x.split("=")[1] for x in env_overrides.split(",")
}
if yyyymmdd and yyyymmdd != "0":
if str(yyyymmdd).lower() == "today":
yyyymmdd = datetime.today().strftime("%Y%m%d")
env_overrides = env_overrides or {}
env_overrides["YYYYMMDD"] = yyyymmdd
if env_overrides or cmd_override:
if not container_name:
raise ValueError(
"container_name is required if "
"cmd_override or env_overrides are specified"
)
env_override_str = ""
cmd_override_str = ""
if env_overrides:
env_override_str = (
',"environment":['
+ ",".join(
[
"{" + f'"name":"{k}","value":"{v}"' + "}"
for k, v in env_overrides.items()
]
)
+ "]"
)
if cmd_override:
cmd_override_str = f", 'command': ['{cmd_override}']"
overrides = (
' --overrides \'{"containerOverrides":'
f'[{{"name":"{container_name}"'
f"{cmd_override_str}{env_override_str}"
"}]}'"
)
cmd += overrides
return_code, output_text = runnow.run(cmd, raise_error=False, echo=False)
if return_code != 0:
raise RuntimeError(f"Could not start task: {output_text}")
jsonobj = json.loads(output_text)
if len(jsonobj.get("tasks", [])) == 0 or len(jsonobj.get("failures", [])) > 0:
raise RuntimeError(
f"Could not start task ({jsonobj.get('failures', '')})\n{output_text}"
)
task_arn = jsonobj["tasks"][0]["taskArn"]
logging.info(f"ECS task status: {get_ecs_task_detail_url(region, task_arn, cluster)}")
logging.info(f"ECS task logs: {get_ecs_log_url(region, task_arn)}")
if wait_for_start:
ecs_wait_for_start(task_arn=task_arn, cluster=cluster, region=region)
if wait_for_stop:
ecs_wait_for_stop(task_arn=task_arn, cluster=cluster, region=region)
if not wait_for_start and not wait_for_stop:
logging.debug(f"ECS submit result: {output_text}")
return task_arn
def get_ecs_log_url(
region,
task_arn,
container_name="PTB-Container",
log_group="PTB-AWSLogs20190822233355860300000001",
):
task_id = task_arn.split("/")[-1]
return (
f"https://{region}.console.aws.amazon.com/cloudwatch/home?"
f"region={region}#logEventViewer:group={log_group};"
f"stream=container-log/{container_name}/{task_id}"
)
def get_ecs_task_detail_url(region, task_arn, cluster_name):
task_id = task_arn.split("/")[-1]
return (
f"https://{region}.console.aws.amazon.com/ecs/home?"
f"region={region}#/clusters/{cluster_name}/tasks/{task_id}/details"
)
def ecs_wait_for_start(task_arn, cluster, region, timeout=1200, raise_error=True):
return _ecs_wait_for(
"running",
task_arn,
cluster,
region,
timeout=timeout,
heartbeat_interval=15,
raise_error=raise_error,
)
def ecs_wait_for_stop(task_arn, cluster, region, timeout=1200, raise_error=True):
return _ecs_wait_for(
"stopped",
task_arn,
cluster,
region,
timeout=timeout,
heartbeat_interval=2 * 60,
raise_error=raise_error,
)
@logged(
"waiting for ECS status '{wait_for}'",
success_detail=lambda: get_ecs_log_url("{region}", "{task_arn}"),
)
def _ecs_wait_for(
wait_for,
task_arn,
cluster,
region,
timeout=1200,
heartbeat_interval=None,
raise_error=True,
):
task_id = task_arn.split("/")[-1]
wait_cmd = f"aws ecs wait tasks-{wait_for} --cluster {cluster} --tasks {task_arn}"
desc_cmd = f"aws ecs describe-tasks --cluster {cluster} --tasks {task_arn}"
with logged_block(
f"waiting for ECS job to reach '{wait_for}' status",
heartbeat_interval=heartbeat_interval,
):
timeout_time = time.time() + (timeout or MAX_ECS_WAIT)
return_code, output_text = runnow.run(wait_cmd, raise_error=False)
while return_code == 255 and time.time() < timeout_time:
logging.info("aws cli timeout expired. Retrying...")
return_code, output_text = runnow.run(wait_cmd, raise_error=True)
if return_code != 0:
raise RuntimeError(
f"ECS wait command failed or timed out (return={return_code}).\n"
f"{output_text}"
)
return_code, output_text = runnow.run(desc_cmd, raise_error=False)
if return_code != 0:
raise RuntimeError(f"ECS task describe failed.\n{output_text}")
jsonobj = json.loads(output_text)
if len(jsonobj.get("tasks", [])) == 0 or len(jsonobj.get("failures", [])) > 0:
RuntimeError(f"Could not start task ({jsonobj.get('failures', '')})")
task_arn = jsonobj["tasks"][0]["taskArn"]
logging.info(f"ECS task status: {get_ecs_task_detail_url(region, task_arn, cluster)}")
logging.info(f"ECS task logs: {get_ecs_log_url(region, task_arn)}")
return task_arn
| """dock_r module, helps with dockerization."""
import datetime
import hashlib
import json
import time
import os
import docker
from logless import get_logger, logged, logged_block
import runnow
import uio
MAX_ECS_WAIT = 12 * 60 * 60 # max 12 hours wait
docker_client = docker.from_env()
logging = get_logger("dock-r")
def build(dockerfile_path, tag_as, addl_args=None):
""" Build an image. 'tag_as' can be a string or list of strings """
folder_path = os.path.dirname(dockerfile_path)
addl_args = addl_args or ""
tag_as = _to_list(tag_as)
if tag_as:
tags = " ".join([f"-t {t}" for t in tag_as])
cmd = f"docker build {addl_args} {tags} {folder_path} -f {dockerfile_path}"
else:
cmd = f"docker build {addl_args} {folder_path} -f {dockerfile_path}"
runnow.run(cmd)
def _to_list(str_or_list):
if str_or_list is None:
return []
if isinstance(str_or_list, str):
return str_or_list.split(",")
return str_or_list
def tag(image_name: str, tag_as):
"""Tag an image. 'tag_as' can be a string or list of strings."""
tag_as = _to_list(tag_as)
for tag in tag_as:
runnow.run(f"docker tag {image_name} {tag}")
@logged("pushing image '{image_name}'")
def push(image_name):
# docker_client.images.push(image_name)
cmd = f"docker push {image_name}"
runnow.run(cmd)
def smart_split(dockerfile_path: str, tag_as, addl_args=None):
tag_as = _to_list(tag_as)
if tag_as:
interim_image_name = tag_as[0].split(":")[0]
else:
interim_image_name = "untitled_image"
(image_core, dockerfile_core), (image_derived, dockerfile_derived) = _smart_split(
dockerfile_path, interim_image_name, addl_args=addl_args
)
dockerfile_path_core = os.path.realpath(f"{dockerfile_path}.core")
dockerfile_path_derived = os.path.realpath(f"{dockerfile_path}.quick")
uio.create_text_file(filepath=dockerfile_path_core, contents=dockerfile_core)
if dockerfile_derived:
uio.create_text_file(filepath=dockerfile_path_derived, contents=dockerfile_derived)
else:
uio.delete_file(dockerfile_path_derived, ignore_missing=True)
dockerfile_path_derived = None
return image_core, dockerfile_path_core, image_derived, dockerfile_path_derived
@logged("smartly building '{dockerfile_path}' as {tag_as or '(none)'}")
def smart_build(
dockerfile_path: str,
tag_as=None,
push_core=True,
push_final=False,
with_login=False,
addl_args=None,
ignore_caches=False,
):
"""
Builds the dockerfile if needed but pulls it from the remote if possible.
"""
if bool(with_login):
login()
tag_as = _to_list(tag_as)
result = smart_split(dockerfile_path, tag_as, addl_args=addl_args)
image_core, dockerfile_path_core, image_derived, dockerfile_path_derived = result
if not ignore_caches:
if dockerfile_path_derived is None and exists_remotely(image_core):
logging.info(
"Image with matching hash already exists "
"and no host files are referenced in Dockerfile."
f"Attempting to retag existing image '{image_core}' as '{tag_as}'..."
)
remote_retag(
image_name=image_core.split(":")[0],
existing_tag=image_core.split(":")[1],
tag_as=tag_as,
)
return
pull(image_core, skip_if_exists=True, silent=True)
if ignore_caches or not exists_locally(image_core):
with logged_block(f"building interim (core) image as '{image_core}'"):
build(dockerfile_path_core, image_core, addl_args=addl_args)
if push_core:
if ignore_caches or not exists_remotely(image_core):
with logged_block(f"pushing interim (core) image '{image_derived}'"):
push(image_core)
else:
logging.info(f"Already exists. Skipping push of image '{image_derived}'")
with logged_block(f"building '{dockerfile_path_derived}' as '{image_derived}'"):
if dockerfile_path_derived:
build(dockerfile_path_derived, image_derived, addl_args=addl_args)
else:
tag(image_core, image_derived)
if tag_as:
tag(image_derived, tag_as)
if push_final:
for image_name in tag_as:
push(image_name)
@logged("pulling image {image_name}")
def pull(image_name, skip_if_exists=False, silent=False):
if skip_if_exists and exists_locally(image_name):
logging.info(f"Skipping image pull. Already exists locally: {image_name}")
return image_name
try:
runnow.run(f"docker pull {image_name}", raise_error=True)
except Exception as ex:
logging.info(f"Failed to pull image: {image_name}\n{ex}")
if silent:
return False
raise ex
if not exists_locally(image_name):
logging.warning("Pull was successful in API but could not be confirmed")
return image_name
def exists_locally(image_name):
try:
_ = docker_client.images.get(image_name)
return True
except docker.errors.ImageNotFound:
return False
def exists_remotely(image_name):
try:
image = docker_client.images.get_registry_data(image_name)
if image:
return True
return False
except docker.errors.ImageNotFound:
return False
except Exception as ex:
logging.exception(
f"Failure when checking if image exists remotely '{image_name}'. {ex}"
)
return None
def _smart_split(dockerfile_path, image_name, addl_args=None):
"""
Returns list of tuples: [
(partial_image_name, partial_dockerfile_text)
(derived_image_name, derived_dockerfile_text)
]
Create two dockerfiles from a single file.
1. The first 'core' image will contain all statements until the first COPY or ADD.
2. The second 'derived' image will pull from 'core' and complete the build using
local files or artifacts required by ADD or COPY commands.
"""
orig_text = uio.get_text_file_contents(dockerfile_path)
addl_args = addl_args or ""
core_dockerfile = ""
derived_dockerfile = ""
requires_context = False # Whether we need file context to determine output
for line in orig_text.split("\n"):
if any([line.startswith("COPY"), line.startswith("ADD")]):
requires_context = True
if not requires_context:
core_dockerfile += line + "\n"
else:
derived_dockerfile += line + "\n"
core_md5 = hashlib.md5((addl_args + core_dockerfile).encode("utf-8")).hexdigest()
full_md5 = hashlib.md5((addl_args + orig_text).encode("utf-8")).hexdigest()
core_image_name = f"{image_name}:core-md5-{core_md5}"
derived_image_name = f"{image_name}:md5-{full_md5}"
core_dockerfile = (
f"# NO NOT EDIT - file is generated automatically from `Dockerfile`\n\n"
f"# Dockerfile.core - will be created and pushed as:\n"
f"# \t{core_image_name}\n\n{core_dockerfile}"
)
if derived_dockerfile:
derived_dockerfile = (
f"# NO NOT EDIT - file is generated automatically from `Dockerfile`\n\n"
f"FROM {core_image_name}\n\n{derived_dockerfile}"
)
else:
derived_dockerfile = None # No additional work to do.
return [(core_image_name, core_dockerfile), (derived_image_name, derived_dockerfile)]
def ecs_login(region):
logging.info("Logging into ECS...")
try:
_, ecs_login_cmd = runnow.run(
f"aws ecr get-login --region {region} --no-include-email", echo=False
)
_, _ = runnow.run(ecs_login_cmd, hide=True)
except Exception as ex:
raise RuntimeError(f"ECS login failed. {ex}")
def login(raise_error=False):
usr = os.environ.get("DOCKER_USERNAME", "")
pwd = <PASSWORD>("DOCKER_PASSWORD", "")
registry = os.environ.get("DOCKER_REGISTRY", "") or "index.docker.io"
if not (usr and pwd):
error_msg = (
"Could not login to docker registry."
"Missing env variable DOCKER_USERNAME or DOCKER_PASSWORD"
)
if raise_error:
raise RuntimeError(error_msg)
logging.warning(error_msg)
return False
logging.info(f"Logging into docker registry '{registry}' as user '{usr}'...")
try:
runnow.run(
f"docker login {registry} --username {usr} --password {pwd}", hide=True
)
if registry == "index.docker.io":
runnow.run(f"docker login --username {usr} --password {pwd}", hide=True)
except Exception as ex:
if raise_error:
raise RuntimeError(f"Docker login failed. {ex}")
logging.warning(f"Docker login failed. {ex}")
return True
@logged("applying tag '{tag_as}' to remote ECS image '{image_name}:{existing_tag}'")
def ecs_retag(image_name, existing_tag, tag_as):
tag_as = _to_list(tag_as)
if "amazonaws.com/" in image_name:
image_name = image_name.split("amazonaws.com/")[1]
get_manifest_cmd = (
f"aws ecr batch-get-image"
f" --repository-name {image_name} --image-ids imageTag={existing_tag}"
f" --query 'images[].imageManifest' --output text"
)
_, manifest = runnow.run(get_manifest_cmd, echo=False)
for new_tag in tag_as:
if "amazonaws.com/" in new_tag:
new_tag = new_tag.split("amazonaws.com/")[1]
if ":" in new_tag:
if image_name != new_tag.split(":")[0]:
raise RuntimeError(
f"Image names do not match: '{image_name}', '{new_tag.split(':')[0]}'"
)
new_tag = new_tag.split(":")[1]
put_image_cmd = [
"aws",
"ecr",
"put-image",
"--repository-name",
image_name,
"--image-tag",
new_tag,
"--image-manifest",
manifest,
]
return_code, output_text = runnow.run(
put_image_cmd, shell=False, echo=False, hide=True, raise_error=False
)
if return_code != 0 and "ImageAlreadyExistsException" in output_text:
logging.info("Image already exists. No tagging changes were made.")
elif return_code != 0:
raise RuntimeError(f"Could not retag the specified image.\n{output_text}")
@logged("applying tag '{tag_as}' to remote image '{image_name}:{existing_tag}'")
def remote_retag(image_name, existing_tag, tag_as, with_login=False):
tag_as = _to_list(tag_as)
if bool(with_login):
login()
if "amazonaws.com/" in image_name:
ecs_retag(image_name, existing_tag, tag_as)
return
existing_fullname = f"{image_name}:{existing_tag}"
pull(existing_fullname)
for new_tag in tag_as:
if ":" in new_tag:
new_fullname = new_tag
else:
new_fullname = f"{image_name}:{new_tag}"
tag(existing_fullname, new_fullname)
push(new_fullname)
def ecs_submit(
task_name: str,
cluster: str,
region: str,
container_name: str = None,
cmd_override: dict = None,
env_overrides: dict = None,
use_fargate: str = False,
wait_for_start=True,
wait_for_stop=False,
max_wait=None,
yyyymmdd=None,
):
cmd = (
f"aws ecs run-task"
f" --task-definition {task_name}"
f" --cluster {cluster}"
f" --region {region}"
)
if use_fargate:
cmd += f" --launch-type FARGATE"
else:
cmd += f" --launch-type EC2"
if env_overrides and isinstance(env_overrides, str):
env_overrides = {
x.split("=")[0]: x.split("=")[1] for x in env_overrides.split(",")
}
if yyyymmdd and yyyymmdd != "0":
if str(yyyymmdd).lower() == "today":
yyyymmdd = datetime.today().strftime("%Y%m%d")
env_overrides = env_overrides or {}
env_overrides["YYYYMMDD"] = yyyymmdd
if env_overrides or cmd_override:
if not container_name:
raise ValueError(
"container_name is required if "
"cmd_override or env_overrides are specified"
)
env_override_str = ""
cmd_override_str = ""
if env_overrides:
env_override_str = (
',"environment":['
+ ",".join(
[
"{" + f'"name":"{k}","value":"{v}"' + "}"
for k, v in env_overrides.items()
]
)
+ "]"
)
if cmd_override:
cmd_override_str = f", 'command': ['{cmd_override}']"
overrides = (
' --overrides \'{"containerOverrides":'
f'[{{"name":"{container_name}"'
f"{cmd_override_str}{env_override_str}"
"}]}'"
)
cmd += overrides
return_code, output_text = runnow.run(cmd, raise_error=False, echo=False)
if return_code != 0:
raise RuntimeError(f"Could not start task: {output_text}")
jsonobj = json.loads(output_text)
if len(jsonobj.get("tasks", [])) == 0 or len(jsonobj.get("failures", [])) > 0:
raise RuntimeError(
f"Could not start task ({jsonobj.get('failures', '')})\n{output_text}"
)
task_arn = jsonobj["tasks"][0]["taskArn"]
logging.info(f"ECS task status: {get_ecs_task_detail_url(region, task_arn, cluster)}")
logging.info(f"ECS task logs: {get_ecs_log_url(region, task_arn)}")
if wait_for_start:
ecs_wait_for_start(task_arn=task_arn, cluster=cluster, region=region)
if wait_for_stop:
ecs_wait_for_stop(task_arn=task_arn, cluster=cluster, region=region)
if not wait_for_start and not wait_for_stop:
logging.debug(f"ECS submit result: {output_text}")
return task_arn
def get_ecs_log_url(
region,
task_arn,
container_name="PTB-Container",
log_group="PTB-AWSLogs20190822233355860300000001",
):
task_id = task_arn.split("/")[-1]
return (
f"https://{region}.console.aws.amazon.com/cloudwatch/home?"
f"region={region}#logEventViewer:group={log_group};"
f"stream=container-log/{container_name}/{task_id}"
)
def get_ecs_task_detail_url(region, task_arn, cluster_name):
task_id = task_arn.split("/")[-1]
return (
f"https://{region}.console.aws.amazon.com/ecs/home?"
f"region={region}#/clusters/{cluster_name}/tasks/{task_id}/details"
)
def ecs_wait_for_start(task_arn, cluster, region, timeout=1200, raise_error=True):
return _ecs_wait_for(
"running",
task_arn,
cluster,
region,
timeout=timeout,
heartbeat_interval=15,
raise_error=raise_error,
)
def ecs_wait_for_stop(task_arn, cluster, region, timeout=1200, raise_error=True):
return _ecs_wait_for(
"stopped",
task_arn,
cluster,
region,
timeout=timeout,
heartbeat_interval=2 * 60,
raise_error=raise_error,
)
@logged(
"waiting for ECS status '{wait_for}'",
success_detail=lambda: get_ecs_log_url("{region}", "{task_arn}"),
)
def _ecs_wait_for(
wait_for,
task_arn,
cluster,
region,
timeout=1200,
heartbeat_interval=None,
raise_error=True,
):
task_id = task_arn.split("/")[-1]
wait_cmd = f"aws ecs wait tasks-{wait_for} --cluster {cluster} --tasks {task_arn}"
desc_cmd = f"aws ecs describe-tasks --cluster {cluster} --tasks {task_arn}"
with logged_block(
f"waiting for ECS job to reach '{wait_for}' status",
heartbeat_interval=heartbeat_interval,
):
timeout_time = time.time() + (timeout or MAX_ECS_WAIT)
return_code, output_text = runnow.run(wait_cmd, raise_error=False)
while return_code == 255 and time.time() < timeout_time:
logging.info("aws cli timeout expired. Retrying...")
return_code, output_text = runnow.run(wait_cmd, raise_error=True)
if return_code != 0:
raise RuntimeError(
f"ECS wait command failed or timed out (return={return_code}).\n"
f"{output_text}"
)
return_code, output_text = runnow.run(desc_cmd, raise_error=False)
if return_code != 0:
raise RuntimeError(f"ECS task describe failed.\n{output_text}")
jsonobj = json.loads(output_text)
if len(jsonobj.get("tasks", [])) == 0 or len(jsonobj.get("failures", [])) > 0:
RuntimeError(f"Could not start task ({jsonobj.get('failures', '')})")
task_arn = jsonobj["tasks"][0]["taskArn"]
logging.info(f"ECS task status: {get_ecs_task_detail_url(region, task_arn, cluster)}")
logging.info(f"ECS task logs: {get_ecs_log_url(region, task_arn)}")
return task_arn
| en | 0.697735 | dock_r module, helps with dockerization. # max 12 hours wait Build an image. 'tag_as' can be a string or list of strings Tag an image. 'tag_as' can be a string or list of strings. # docker_client.images.push(image_name) Builds the dockerfile if needed but pulls it from the remote if possible. Returns list of tuples: [
(partial_image_name, partial_dockerfile_text)
(derived_image_name, derived_dockerfile_text)
]
Create two dockerfiles from a single file.
1. The first 'core' image will contain all statements until the first COPY or ADD.
2. The second 'derived' image will pull from 'core' and complete the build using
local files or artifacts required by ADD or COPY commands. # Whether we need file context to determine output # No additional work to do. #logEventViewer:group={log_group};" #/clusters/{cluster_name}/tasks/{task_id}/details" | 2.575673 | 3 |
tools/thumbnails.py | WebSVG/next-svg | 0 | 6622927 | <gh_stars>0
import os
import json
from PIL import Image
def load_json(fileName):
return json.load(open(fileName))
root_dir = "../public"
thumb_width = 300
def single_command(root_dir):
file_list = ""
for file in os.listdir(root_dir):
if file.endswith(".svg"):
file_list += " " +file
command = "inkscape --export-type=png -w {thumb_width}"+file_list
os.chdir(root_dir)
os.system(command)
def command_per_file_in_dir(root_dir):
os.chdir(root_dir)
for file in os.listdir(root_dir):
if file.endswith(".svg"):
export_filename=file.replace(".svg",".thumb.png")
command = f'inkscape --export-filename={export_filename} -w {thumb_width} "{file}"'
os.system(command)
def command_per_file():
file_list = load_json("../pages/slides_files.json")
for file in file_list:
rel_file = "../public/"+file
export_filename=rel_file+".thumb.png"
if file.endswith(".svg"):
command = f'inkscape --export-filename={export_filename} -w {thumb_width} "{rel_file}"'
os.system(command)
dest_file = export_filename.replace(".thumb.png",".thumb")
if(os.path.exists(dest_file)):
os.remove(dest_file)
os.rename(export_filename,dest_file)
if file.endswith(".png"):
im = Image.open(rel_file)
im.thumbnail((thumb_width,thumb_width), Image.ANTIALIAS)
im.save(export_filename, "PNG")
command_per_file()
| import os
import json
from PIL import Image
def load_json(fileName):
return json.load(open(fileName))
root_dir = "../public"
thumb_width = 300
def single_command(root_dir):
file_list = ""
for file in os.listdir(root_dir):
if file.endswith(".svg"):
file_list += " " +file
command = "inkscape --export-type=png -w {thumb_width}"+file_list
os.chdir(root_dir)
os.system(command)
def command_per_file_in_dir(root_dir):
os.chdir(root_dir)
for file in os.listdir(root_dir):
if file.endswith(".svg"):
export_filename=file.replace(".svg",".thumb.png")
command = f'inkscape --export-filename={export_filename} -w {thumb_width} "{file}"'
os.system(command)
def command_per_file():
file_list = load_json("../pages/slides_files.json")
for file in file_list:
rel_file = "../public/"+file
export_filename=rel_file+".thumb.png"
if file.endswith(".svg"):
command = f'inkscape --export-filename={export_filename} -w {thumb_width} "{rel_file}"'
os.system(command)
dest_file = export_filename.replace(".thumb.png",".thumb")
if(os.path.exists(dest_file)):
os.remove(dest_file)
os.rename(export_filename,dest_file)
if file.endswith(".png"):
im = Image.open(rel_file)
im.thumbnail((thumb_width,thumb_width), Image.ANTIALIAS)
im.save(export_filename, "PNG")
command_per_file() | none | 1 | 2.862388 | 3 | |
src/w1therm2influx/core.py | rkschamer/w1therm2influx | 0 | 6622928 | from influxdb import InfluxDBClient
from w1thermsensor import W1ThermSensor
from w1thermsensor.errors import W1ThermSensorError
import logging
# in case this is used as library, prevent logging if the application
# does not define it
logging.getLogger('foo').addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
class ValueCollector:
def __init__(self,
database: str,
user: str,
password: str,
host: str = 'localhost',
port: int = 8086,
measurement: str = "w1-values"):
logger.info("Initializing ValueCollector...")
self.sensors: list = W1ThermSensor.get_available_sensors()
logger.info("Found {} sensors: {}".format(len(self.sensors), self.sensors))
self.client = InfluxDBClient(host, port, user, password, database)
logger.info("Connect to InfluxDb (host={}, port={}, user={}, password=<PASSWORD>, db={})"
.format(host, port, user, database))
self.measurement = measurement
logger.debug("Using measurement '{}' for values".format(measurement))
logger.info("Initializing ValueCollector SUCCESSFUL!")
def __enter__(self):
return self
def __exit__(self):
if self.client is not None:
self.client.close()
def collect_and_write(self):
fields: object = {}
for index, sensor in enumerate(self.sensors):
sensor_id = sensor.id
try:
sensor_temperature = sensor.get_temperature()
logger.debug("Writing value for sensor {}: {}".format(sensor_id, sensor_temperature))
fields[sensor_id] = sensor_temperature
except W1ThermSensorError as e:
logger.error("Error reading {}: {}".format(sensor_id, e.args))
json_body = [
{
"measurement": self.measurement,
"fields": fields
}
]
logger.debug("Influx JSON body: " + str(json_body))
self.client.write_points(json_body)
| from influxdb import InfluxDBClient
from w1thermsensor import W1ThermSensor
from w1thermsensor.errors import W1ThermSensorError
import logging
# in case this is used as library, prevent logging if the application
# does not define it
logging.getLogger('foo').addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
class ValueCollector:
def __init__(self,
database: str,
user: str,
password: str,
host: str = 'localhost',
port: int = 8086,
measurement: str = "w1-values"):
logger.info("Initializing ValueCollector...")
self.sensors: list = W1ThermSensor.get_available_sensors()
logger.info("Found {} sensors: {}".format(len(self.sensors), self.sensors))
self.client = InfluxDBClient(host, port, user, password, database)
logger.info("Connect to InfluxDb (host={}, port={}, user={}, password=<PASSWORD>, db={})"
.format(host, port, user, database))
self.measurement = measurement
logger.debug("Using measurement '{}' for values".format(measurement))
logger.info("Initializing ValueCollector SUCCESSFUL!")
def __enter__(self):
return self
def __exit__(self):
if self.client is not None:
self.client.close()
def collect_and_write(self):
fields: object = {}
for index, sensor in enumerate(self.sensors):
sensor_id = sensor.id
try:
sensor_temperature = sensor.get_temperature()
logger.debug("Writing value for sensor {}: {}".format(sensor_id, sensor_temperature))
fields[sensor_id] = sensor_temperature
except W1ThermSensorError as e:
logger.error("Error reading {}: {}".format(sensor_id, e.args))
json_body = [
{
"measurement": self.measurement,
"fields": fields
}
]
logger.debug("Influx JSON body: " + str(json_body))
self.client.write_points(json_body)
| en | 0.950401 | # in case this is used as library, prevent logging if the application # does not define it | 2.784947 | 3 |
sesarwslib/sesarwsclient.py | Adam-Brown/SESAR-Web-Services-Lib | 1 | 6622929 | <filename>sesarwslib/sesarwsclient.py
import urllib
import urllib2
import xml.etree.ElementTree as eTree
import StringIO
SAMPLE_REGISTRATION_SERVICE_URL = 'http://app.geosamples.org/webservices/uploadservice.php'
CREDENTIAL_SERVICE_URL = 'http://app.geosamples.org/webservices/credentials_service.php'
IGSN_LIST_SERVICE_URL = 'http://app.geosamples.org/samples/user_code/'
class IgsnClient:
def __init__(self, username, password, version):
self.username = username
self.password = password
self.version = version
def register_sample(self, sample):
return self.register_samples([sample])[0]
# 1. Sample registration web service
def register_samples(self, samples):
output = StringIO.StringIO()
if self.version == 2:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<samples xmlns="http://app.geosamples.org"\n')
output.write('xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
output.write('xsi:schemaLocation="http://app.geosamples.org/samplev2.xsd">\n')
else:
output.write('<samples>\n')
for sample in samples:
sample.export(output, 0)
output.write('</samples>')
# According to the v2 XSD the tag should be sampleType... I can't test it yet because I think SESAR need to update something.
# I'm temporarily forcing this to use version 1 for the moment by rewriting these tags.
output_xml = output.getvalue().replace('<sampleType>', '<sample>').replace('</sampleType>', '</sample>')
http_body = urllib.urlencode({
'username': self.username,
'password': self.password,
'content': output_xml
})
http_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
req = urllib2.Request(SAMPLE_REGISTRATION_SERVICE_URL, http_body, http_headers)
try:
handler = urllib2.urlopen(req)
# handler.getcode()
# handler.headers.getheader('content-type')
# <results> <status>message</status><igsn>XXXX</igsn><status>message</status><igsn>XXXX</igsn> </results>
result = handler.read()
results_elem = eTree.fromstring(result)
igsns = []
for child in results_elem:
if child.tag == 'igsn':
igsns.append(child.text)
return igsns
except urllib2.HTTPError as httpError:
# This might happen, for example, if the sample ID already exists.
print httpError.read()
# 2. Credential web service
def get_user_codes(self):
http_body = urllib.urlencode({
'username': self.username,
'password': <PASSWORD>
})
http_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
req = urllib2.Request(CREDENTIAL_SERVICE_URL, http_body, http_headers)
handler = urllib2.urlopen(req)
# handler.getcode()
# <results><valid>yes</valid><user_codes><user_code>IEXXX</user_code></user_codes></results>
result = handler.read()
results_elem = eTree.fromstring(result)
valid = False
user_codes = []
for child in results_elem:
if child.tag == 'valid':
valid = child.text == 'yes'
elif child.tag == 'user_codes':
for user_code_elem in child:
user_codes.append(user_code_elem.text)
return {'valid': valid, 'user_codes': user_codes}
# 3. SESAR IGSN list web service for specific user code
@staticmethod
def list_igsns(user_code, limit=None, page_no=None):
# Build the constraint if limit is set.
# Only add the page number if limit is set too.
constraint = ''
if limit is not None:
constraint = "?limit=" + str(limit)
if page_no is not None:
constraint += "&page_no=" + str(page_no)
query = IGSN_LIST_SERVICE_URL + user_code + constraint
http_headers = {'Accept': 'application/xml'}
req = urllib2.Request(query, None, http_headers)
handler = urllib2.urlopen(req)
"""
Example response:
<samples>
<sample>
<igsn>IEXXX0001</igsn>
<url>http://app.geosamples.org/webservices/display.php?igsn=IEXXX0001</url>
</sample>
...
</samples>
"""
# TODO: Make robust
# handler.getcode()
result = handler.read()
samples_elem = eTree.fromstring(result)
igsns = []
for sample_elem in samples_elem:
for elem in sample_elem:
if elem.tag == 'igsn':
igsn = elem.text
elif elem.tag == 'url':
url = elem.text
else:
# TODO: Make a better error
raise Exception("Error")
if igsn and url:
igsns.append({'igsn': igsn, 'url': url})
return igsns | <filename>sesarwslib/sesarwsclient.py
import urllib
import urllib2
import xml.etree.ElementTree as eTree
import StringIO
SAMPLE_REGISTRATION_SERVICE_URL = 'http://app.geosamples.org/webservices/uploadservice.php'
CREDENTIAL_SERVICE_URL = 'http://app.geosamples.org/webservices/credentials_service.php'
IGSN_LIST_SERVICE_URL = 'http://app.geosamples.org/samples/user_code/'
class IgsnClient:
def __init__(self, username, password, version):
self.username = username
self.password = password
self.version = version
def register_sample(self, sample):
return self.register_samples([sample])[0]
# 1. Sample registration web service
def register_samples(self, samples):
output = StringIO.StringIO()
if self.version == 2:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<samples xmlns="http://app.geosamples.org"\n')
output.write('xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
output.write('xsi:schemaLocation="http://app.geosamples.org/samplev2.xsd">\n')
else:
output.write('<samples>\n')
for sample in samples:
sample.export(output, 0)
output.write('</samples>')
# According to the v2 XSD the tag should be sampleType... I can't test it yet because I think SESAR need to update something.
# I'm temporarily forcing this to use version 1 for the moment by rewriting these tags.
output_xml = output.getvalue().replace('<sampleType>', '<sample>').replace('</sampleType>', '</sample>')
http_body = urllib.urlencode({
'username': self.username,
'password': self.password,
'content': output_xml
})
http_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
req = urllib2.Request(SAMPLE_REGISTRATION_SERVICE_URL, http_body, http_headers)
try:
handler = urllib2.urlopen(req)
# handler.getcode()
# handler.headers.getheader('content-type')
# <results> <status>message</status><igsn>XXXX</igsn><status>message</status><igsn>XXXX</igsn> </results>
result = handler.read()
results_elem = eTree.fromstring(result)
igsns = []
for child in results_elem:
if child.tag == 'igsn':
igsns.append(child.text)
return igsns
except urllib2.HTTPError as httpError:
# This might happen, for example, if the sample ID already exists.
print httpError.read()
# 2. Credential web service
def get_user_codes(self):
http_body = urllib.urlencode({
'username': self.username,
'password': <PASSWORD>
})
http_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
req = urllib2.Request(CREDENTIAL_SERVICE_URL, http_body, http_headers)
handler = urllib2.urlopen(req)
# handler.getcode()
# <results><valid>yes</valid><user_codes><user_code>IEXXX</user_code></user_codes></results>
result = handler.read()
results_elem = eTree.fromstring(result)
valid = False
user_codes = []
for child in results_elem:
if child.tag == 'valid':
valid = child.text == 'yes'
elif child.tag == 'user_codes':
for user_code_elem in child:
user_codes.append(user_code_elem.text)
return {'valid': valid, 'user_codes': user_codes}
# 3. SESAR IGSN list web service for specific user code
@staticmethod
def list_igsns(user_code, limit=None, page_no=None):
# Build the constraint if limit is set.
# Only add the page number if limit is set too.
constraint = ''
if limit is not None:
constraint = "?limit=" + str(limit)
if page_no is not None:
constraint += "&page_no=" + str(page_no)
query = IGSN_LIST_SERVICE_URL + user_code + constraint
http_headers = {'Accept': 'application/xml'}
req = urllib2.Request(query, None, http_headers)
handler = urllib2.urlopen(req)
"""
Example response:
<samples>
<sample>
<igsn>IEXXX0001</igsn>
<url>http://app.geosamples.org/webservices/display.php?igsn=IEXXX0001</url>
</sample>
...
</samples>
"""
# TODO: Make robust
# handler.getcode()
result = handler.read()
samples_elem = eTree.fromstring(result)
igsns = []
for sample_elem in samples_elem:
for elem in sample_elem:
if elem.tag == 'igsn':
igsn = elem.text
elif elem.tag == 'url':
url = elem.text
else:
# TODO: Make a better error
raise Exception("Error")
if igsn and url:
igsns.append({'igsn': igsn, 'url': url})
return igsns | en | 0.534949 | # 1. Sample registration web service # According to the v2 XSD the tag should be sampleType... I can't test it yet because I think SESAR need to update something. # I'm temporarily forcing this to use version 1 for the moment by rewriting these tags. # handler.getcode() # handler.headers.getheader('content-type') # <results> <status>message</status><igsn>XXXX</igsn><status>message</status><igsn>XXXX</igsn> </results> # This might happen, for example, if the sample ID already exists. # 2. Credential web service # handler.getcode() # <results><valid>yes</valid><user_codes><user_code>IEXXX</user_code></user_codes></results> # 3. SESAR IGSN list web service for specific user code # Build the constraint if limit is set. # Only add the page number if limit is set too. Example response: <samples> <sample> <igsn>IEXXX0001</igsn> <url>http://app.geosamples.org/webservices/display.php?igsn=IEXXX0001</url> </sample> ... </samples> # TODO: Make robust # handler.getcode() # TODO: Make a better error | 2.388019 | 2 |
threes/test_threes_util.py | davitf/puzzle-ai | 0 | 6622930 | import numpy as np
import pytest
from threes import threes_util
# A series of sample lines, with the results of moving them left or right.
SAMPLE_LINES_BOTH_DIRECTIONS = [
# Move spaces.
([0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]),
([1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0]),
([0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]),
([4, 3, 0, 4], [4, 3, 4, 0], [0, 4, 3, 4]),
([2, 0, 0, 3], [2, 0, 3, 0], [0, 2, 0, 3]),
([2, 0, 1, 0], [2, 1, 0, 0], [0, 2, 0, 1]),
# Merge 1s and 2s.
([1, 2, 4, 3], [3, 4, 3, 0], [0, 3, 4, 3]),
([4, 2, 1, 3], [4, 3, 3, 0], [0, 4, 3, 3]),
([4, 3, 2, 1], [4, 3, 3, 0], [0, 4, 3, 3]),
([1, 2, 1, 2], [3, 1, 2, 0], [0, 1, 2, 3]),
([1, 2, 1, 5], [3, 1, 5, 0], [0, 1, 3, 5]),
([0, 1, 2, 3], [1, 2, 3, 0], [0, 0, 3, 3]),
# Merge two equal 3+ tiles.
([2, 2, 3, 3], [2, 2, 4, 0], [0, 2, 2, 4]),
([4, 4, 0, 1], [5, 0, 1, 0], [0, 4, 4, 1]),
([2, 8, 8, 8], [2, 9, 8, 0], [0, 2, 8, 9]),
# Two 1s or 2s cannot be merged.
([2, 2, 4, 3], [2, 2, 4, 3], [2, 2, 4, 3]),
([4, 1, 1, 3], [4, 1, 1, 3], [4, 1, 1, 3]),
# Non-contiguous tiles cannot be merged.
([2, 4, 2, 4], [2, 4, 2, 4], [2, 4, 2, 4]),
([2, 3, 1, 4], [2, 3, 1, 4], [2, 3, 1, 4]),
]
# move_line only computes the result of moving a line to the left. For a
# move to the right, we must invert the input and output. Here we convert
# the test data set above into pairs of expected (input, output) pairs.
move_line_testdata = []
for input, moved_left, moved_right in SAMPLE_LINES_BOTH_DIRECTIONS:
move_line_testdata.append((input, moved_left))
move_line_testdata.append((input[::-1], moved_right[::-1]))
@pytest.mark.parametrize("original, moved_left", move_line_testdata)
def test_is_line_movable_left(original, moved_left):
is_movable = original != moved_left
line = np.array(original)
assert threes_util.is_line_movable_left(line) == is_movable
# Check that the argument was not modified in-place.
assert line.tolist() == original
@pytest.mark.parametrize("input, output", move_line_testdata)
def test_move_line_left(input, output):
line = np.array(input)
moved, score_delta = threes_util.move_line_left(line)
assert np.all(line == output)
assert moved == (input != output)
assert score_delta == (
threes_util.total_score(output) - threes_util.total_score(input)
)
# A sample Threes board, with the result of moving it in each direction.
# In the result boards, the possible spots where a new tile can be placed are
# marked with -1 values.
# This board cannot be moved up.
SAMPLE_BOARD = [[3, 4, 7, 4], [1, 2, 3, 0], [4, 5, 0, 0], [1, 1, 0, 0]]
SAMPLE_BOARD_LEFT = [[3, 4, 7, 4], [3, 3, 0, -1], [4, 5, 0, 0], [1, 1, 0, 0]]
SAMPLE_BOARD_RIGHT = [[3, 4, 7, 4], [-1, 1, 2, 3], [-1, 4, 5, 0], [-1, 1, 1, 0]]
SAMPLE_BOARD_DOWN = [[3, 4, -1, -1], [1, 2, 7, 4], [4, 5, 3, 0], [1, 1, 0, 0]]
class FakeRandom(object):
"""A fake np.random implementation which expects a .choice() call."""
def __init__(self, max, return_value):
self.max = max
self.return_value = return_value
def choice(self, max):
assert max == self.max
return self.return_value
def generate_next_tile_boards(board, new_tile):
possible_new_tile_places = np.argwhere(board == -1)
new_boards = []
for new_tile_location in possible_new_tile_places:
new_board = board.copy()
new_board[board == -1] = 0
new_board[tuple(new_tile_location)] = new_tile
new_boards.append(new_board.tolist())
return new_boards
@pytest.mark.parametrize(
"orig_board, expected_board, direction",
[
(SAMPLE_BOARD, SAMPLE_BOARD_LEFT, threes_util.DIRECTION_LEFT),
(SAMPLE_BOARD, SAMPLE_BOARD_RIGHT, threes_util.DIRECTION_RIGHT),
(SAMPLE_BOARD, SAMPLE_BOARD_DOWN, threes_util.DIRECTION_DOWN),
],
)
def test_move_board(orig_board, expected_board, direction):
orig_board = np.array(orig_board)
expected_board = np.array(expected_board)
# Generate the possible new boards. In each one, one of the -1s is replaced
# by the new tile (which we're hardcoding to 10), and the others by 0s
# (empty spaces).
num_new_boards = np.sum(expected_board == -1)
expected_new_boards = generate_next_tile_boards(expected_board, 10)
assert num_new_boards == len(expected_new_boards)
# If there are no new boards, this is an illegal move, which is tested
# separately.
assert num_new_boards
# Run move_board with all the possible random values, so that it should
# generate all the possibilities (in an unknown order).
generated_new_boards = []
for i in range(num_new_boards):
moved_board = orig_board.copy()
score_delta = threes_util.move_board(
moved_board, direction, 10, np_rand=FakeRandom(num_new_boards, i)
)
assert score_delta == (
threes_util.total_score(expected_board)
- threes_util.total_score(orig_board)
)
generated_new_boards.append(moved_board.tolist())
# Check that the generated boards match the expected ones. The lists are
# sorted so that differences in ordering are ignored.
# (as in unittest.TestCase.assertCountEqual , which doesn't have a
# py.test equivalent).
assert sorted(expected_new_boards) == sorted(generated_new_boards)
def test_move_board_illegal_action():
board = np.array(SAMPLE_BOARD)
with pytest.raises(threes_util.IllegalMoveError):
delta = threes_util.move_board(
board, threes_util.DIRECTION_UP, 10, np_rand=FakeRandom(1, 0)
)
# Ensure that an illegal move does not change the input array.
assert np.all(board == SAMPLE_BOARD)
@pytest.mark.parametrize(
"max_tile, possible_tiles",
[
(threes_util.TILE_1, [threes_util.TILE_1]),
(threes_util.TILE_2, [threes_util.TILE_2]),
(threes_util.TILE_3, [threes_util.TILE_3]),
(threes_util.TILE_6, [threes_util.TILE_6]),
(threes_util.TILE_12, [threes_util.TILE_6, threes_util.TILE_12]),
(
threes_util.TILE_24,
[threes_util.TILE_6, threes_util.TILE_12, threes_util.TILE_24],
),
(
threes_util.TILE_48,
[threes_util.TILE_12, threes_util.TILE_24, threes_util.TILE_48],
),
(
threes_util.TILE_384,
[threes_util.TILE_96, threes_util.TILE_192, threes_util.TILE_384],
),
],
)
def test_future_tile_possibilities(max_tile, possible_tiles):
assert list(threes_util.future_tile_possibilities(max_tile)) == possible_tiles
| import numpy as np
import pytest
from threes import threes_util
# A series of sample lines, with the results of moving them left or right.
SAMPLE_LINES_BOTH_DIRECTIONS = [
# Move spaces.
([0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]),
([1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0]),
([0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]),
([4, 3, 0, 4], [4, 3, 4, 0], [0, 4, 3, 4]),
([2, 0, 0, 3], [2, 0, 3, 0], [0, 2, 0, 3]),
([2, 0, 1, 0], [2, 1, 0, 0], [0, 2, 0, 1]),
# Merge 1s and 2s.
([1, 2, 4, 3], [3, 4, 3, 0], [0, 3, 4, 3]),
([4, 2, 1, 3], [4, 3, 3, 0], [0, 4, 3, 3]),
([4, 3, 2, 1], [4, 3, 3, 0], [0, 4, 3, 3]),
([1, 2, 1, 2], [3, 1, 2, 0], [0, 1, 2, 3]),
([1, 2, 1, 5], [3, 1, 5, 0], [0, 1, 3, 5]),
([0, 1, 2, 3], [1, 2, 3, 0], [0, 0, 3, 3]),
# Merge two equal 3+ tiles.
([2, 2, 3, 3], [2, 2, 4, 0], [0, 2, 2, 4]),
([4, 4, 0, 1], [5, 0, 1, 0], [0, 4, 4, 1]),
([2, 8, 8, 8], [2, 9, 8, 0], [0, 2, 8, 9]),
# Two 1s or 2s cannot be merged.
([2, 2, 4, 3], [2, 2, 4, 3], [2, 2, 4, 3]),
([4, 1, 1, 3], [4, 1, 1, 3], [4, 1, 1, 3]),
# Non-contiguous tiles cannot be merged.
([2, 4, 2, 4], [2, 4, 2, 4], [2, 4, 2, 4]),
([2, 3, 1, 4], [2, 3, 1, 4], [2, 3, 1, 4]),
]
# move_line only computes the result of moving a line to the left. For a
# move to the right, we must invert the input and output. Here we convert
# the test data set above into pairs of expected (input, output) pairs.
move_line_testdata = []
for input, moved_left, moved_right in SAMPLE_LINES_BOTH_DIRECTIONS:
move_line_testdata.append((input, moved_left))
move_line_testdata.append((input[::-1], moved_right[::-1]))
@pytest.mark.parametrize("original, moved_left", move_line_testdata)
def test_is_line_movable_left(original, moved_left):
is_movable = original != moved_left
line = np.array(original)
assert threes_util.is_line_movable_left(line) == is_movable
# Check that the argument was not modified in-place.
assert line.tolist() == original
@pytest.mark.parametrize("input, output", move_line_testdata)
def test_move_line_left(input, output):
line = np.array(input)
moved, score_delta = threes_util.move_line_left(line)
assert np.all(line == output)
assert moved == (input != output)
assert score_delta == (
threes_util.total_score(output) - threes_util.total_score(input)
)
# A sample Threes board, with the result of moving it in each direction.
# In the result boards, the possible spots where a new tile can be placed are
# marked with -1 values.
# This board cannot be moved up.
SAMPLE_BOARD = [[3, 4, 7, 4], [1, 2, 3, 0], [4, 5, 0, 0], [1, 1, 0, 0]]
SAMPLE_BOARD_LEFT = [[3, 4, 7, 4], [3, 3, 0, -1], [4, 5, 0, 0], [1, 1, 0, 0]]
SAMPLE_BOARD_RIGHT = [[3, 4, 7, 4], [-1, 1, 2, 3], [-1, 4, 5, 0], [-1, 1, 1, 0]]
SAMPLE_BOARD_DOWN = [[3, 4, -1, -1], [1, 2, 7, 4], [4, 5, 3, 0], [1, 1, 0, 0]]
class FakeRandom(object):
"""A fake np.random implementation which expects a .choice() call."""
def __init__(self, max, return_value):
self.max = max
self.return_value = return_value
def choice(self, max):
assert max == self.max
return self.return_value
def generate_next_tile_boards(board, new_tile):
possible_new_tile_places = np.argwhere(board == -1)
new_boards = []
for new_tile_location in possible_new_tile_places:
new_board = board.copy()
new_board[board == -1] = 0
new_board[tuple(new_tile_location)] = new_tile
new_boards.append(new_board.tolist())
return new_boards
@pytest.mark.parametrize(
"orig_board, expected_board, direction",
[
(SAMPLE_BOARD, SAMPLE_BOARD_LEFT, threes_util.DIRECTION_LEFT),
(SAMPLE_BOARD, SAMPLE_BOARD_RIGHT, threes_util.DIRECTION_RIGHT),
(SAMPLE_BOARD, SAMPLE_BOARD_DOWN, threes_util.DIRECTION_DOWN),
],
)
def test_move_board(orig_board, expected_board, direction):
orig_board = np.array(orig_board)
expected_board = np.array(expected_board)
# Generate the possible new boards. In each one, one of the -1s is replaced
# by the new tile (which we're hardcoding to 10), and the others by 0s
# (empty spaces).
num_new_boards = np.sum(expected_board == -1)
expected_new_boards = generate_next_tile_boards(expected_board, 10)
assert num_new_boards == len(expected_new_boards)
# If there are no new boards, this is an illegal move, which is tested
# separately.
assert num_new_boards
# Run move_board with all the possible random values, so that it should
# generate all the possibilities (in an unknown order).
generated_new_boards = []
for i in range(num_new_boards):
moved_board = orig_board.copy()
score_delta = threes_util.move_board(
moved_board, direction, 10, np_rand=FakeRandom(num_new_boards, i)
)
assert score_delta == (
threes_util.total_score(expected_board)
- threes_util.total_score(orig_board)
)
generated_new_boards.append(moved_board.tolist())
# Check that the generated boards match the expected ones. The lists are
# sorted so that differences in ordering are ignored.
# (as in unittest.TestCase.assertCountEqual , which doesn't have a
# py.test equivalent).
assert sorted(expected_new_boards) == sorted(generated_new_boards)
def test_move_board_illegal_action():
board = np.array(SAMPLE_BOARD)
with pytest.raises(threes_util.IllegalMoveError):
delta = threes_util.move_board(
board, threes_util.DIRECTION_UP, 10, np_rand=FakeRandom(1, 0)
)
# Ensure that an illegal move does not change the input array.
assert np.all(board == SAMPLE_BOARD)
@pytest.mark.parametrize(
"max_tile, possible_tiles",
[
(threes_util.TILE_1, [threes_util.TILE_1]),
(threes_util.TILE_2, [threes_util.TILE_2]),
(threes_util.TILE_3, [threes_util.TILE_3]),
(threes_util.TILE_6, [threes_util.TILE_6]),
(threes_util.TILE_12, [threes_util.TILE_6, threes_util.TILE_12]),
(
threes_util.TILE_24,
[threes_util.TILE_6, threes_util.TILE_12, threes_util.TILE_24],
),
(
threes_util.TILE_48,
[threes_util.TILE_12, threes_util.TILE_24, threes_util.TILE_48],
),
(
threes_util.TILE_384,
[threes_util.TILE_96, threes_util.TILE_192, threes_util.TILE_384],
),
],
)
def test_future_tile_possibilities(max_tile, possible_tiles):
assert list(threes_util.future_tile_possibilities(max_tile)) == possible_tiles
| en | 0.920206 | # A series of sample lines, with the results of moving them left or right. # Move spaces. # Merge 1s and 2s. # Merge two equal 3+ tiles. # Two 1s or 2s cannot be merged. # Non-contiguous tiles cannot be merged. # move_line only computes the result of moving a line to the left. For a # move to the right, we must invert the input and output. Here we convert # the test data set above into pairs of expected (input, output) pairs. # Check that the argument was not modified in-place. # A sample Threes board, with the result of moving it in each direction. # In the result boards, the possible spots where a new tile can be placed are # marked with -1 values. # This board cannot be moved up. A fake np.random implementation which expects a .choice() call. # Generate the possible new boards. In each one, one of the -1s is replaced # by the new tile (which we're hardcoding to 10), and the others by 0s # (empty spaces). # If there are no new boards, this is an illegal move, which is tested # separately. # Run move_board with all the possible random values, so that it should # generate all the possibilities (in an unknown order). # Check that the generated boards match the expected ones. The lists are # sorted so that differences in ordering are ignored. # (as in unittest.TestCase.assertCountEqual , which doesn't have a # py.test equivalent). # Ensure that an illegal move does not change the input array. | 2.529571 | 3 |
BlueSubdivision/perlinVoxel.py | DweebsUnited/CodeMonkey | 0 | 6622931 | <reponame>DweebsUnited/CodeMonkey
# On a grid, while a geometric true
from random import uniform
from noise import snoise2
from math import exp, pow
class Sigmoid:
s = 0.001
a = 0
b = 0
def __init__( self, s = 0.001 ):
self.config( s )
def config( self, s ):
self.s = s;
self.a = self.raw( 1 );
self.b = self.raw( 0 );
if( self.a < self.b ):
t = self.a;
self.a = self.b;
self.b = t;
def raw( self, x ):
return 1.0 / ( 1.0 + exp( - self.s * ( x - 0.5 ) ) )
def run( self, x ):
return ( self.raw( x ) - self.b ) * 1.0 / ( self.a - self.b )
GRIDSIZE = 32
MAX_SIZE = 16
voxel = [ ]
level = -1
sig = Sigmoid( 2 )
while len( voxel ) < MAX_SIZE:
newVoxels = 0
selfLevel = level + 1
voxel.append( [ [ False for _ in range( GRIDSIZE ) ] for _ in range( GRIDSIZE ) ] )
for xdx in range( GRIDSIZE ):
for ydx in range( 0, GRIDSIZE ):
n = snoise2( xdx * 0.1, ydx * 0.1 )
if ( voxel[ level ][ xdx ][ ydx ] or level < 0 ) and ( uniform( 0, 1 ) < pow( n if n > 0 else 0, 1.0 / 2.5 ) ):
voxel[ selfLevel ][ xdx ][ ydx ] = True
newVoxels += 1
level = selfLevel
if newVoxels == 0:
break
coords = []
faces = []
def writeFaceX( ldx, xdx, ydx, dc ):
global coords, faces
newcoords = [
( xdx + 0.5 * dc, ydx + 0.5, ldx + 0.5 ),
( xdx + 0.5 * dc, ydx - 0.5, ldx + 0.5 ),
( xdx + 0.5 * dc, ydx - 0.5, ldx - 0.5 ),
( xdx + 0.5 * dc, ydx + 0.5, ldx - 0.5 ) ]
lc = len( coords )
coords += newcoords
faces.append( ( lc, lc + 1, lc + 2, lc + 3 ) )
def writeFaceY( ldx, xdx, ydx, dc ):
global coords, faces
newcoords = [
( xdx + 0.5, ydx + 0.5 * dc, ldx + 0.5 ),
( xdx + 0.5, ydx + 0.5 * dc, ldx - 0.5 ),
( xdx - 0.5, ydx + 0.5 * dc, ldx - 0.5 ),
( xdx - 0.5, ydx + 0.5 * dc, ldx + 0.5 ) ]
lc = len( coords )
coords += newcoords
faces.append( ( lc, lc + 1, lc + 2, lc + 3 ) )
def writeFaceL( ldx, xdx, ydx, dc ):
global coords, faces
newcoords = [
( xdx + 0.5, ydx + 0.5, ldx + 0.5 * dc ),
( xdx + 0.5, ydx - 0.5, ldx + 0.5 * dc ),
( xdx - 0.5, ydx - 0.5, ldx + 0.5 * dc ),
( xdx - 0.5, ydx + 0.5, ldx + 0.5 * dc ) ]
lc = len( coords )
coords += newcoords
faces.append( ( lc, lc + 1, lc + 2, lc + 3 ) )
for ldx in range( len( voxel ) ):
for xdx in range( GRIDSIZE ):
for ydx in range( GRIDSIZE ):
if voxel[ ldx ][ xdx ][ ydx ] is False:
continue
# If a neighbor is not filled, write out the face between them
if ldx - 1 < 0 or not voxel[ ldx - 1 ][ xdx ][ ydx ]:
writeFaceL( ldx, xdx, ydx, -1 )
if ldx + 1 >= len( voxel ) or not voxel[ ldx + 1 ][ xdx ][ ydx ]:
writeFaceL( ldx, xdx, ydx, 1 )
if xdx - 1 < 0 or not voxel[ ldx ][ xdx - 1 ][ ydx ]:
writeFaceX( ldx, xdx, ydx, -1 )
if xdx + 1 >= GRIDSIZE or not voxel[ ldx ][ xdx + 1 ][ ydx ]:
writeFaceX( ldx, xdx, ydx, 1 )
if ydx - 1 < 0 or not voxel[ ldx ][ xdx ][ ydx - 1 ]:
writeFaceY( ldx, xdx, ydx, -1 )
if ydx + 1 >= GRIDSIZE or not voxel[ ldx ][ xdx ][ ydx + 1 ]:
writeFaceY( ldx, xdx, ydx, 1 )
with open( "voxel.ply", "w" ) as out:
out.write( "ply\n" )
out.write( "format ascii 1.0\n" )
out.write( "element vertex " + str( len( coords ) ) + "\n" )
out.write( "property float x\n" )
out.write( "property float y\n" )
out.write( "property float z\n" )
out.write( "element face " + str( len( faces ) ) + "\n" )
out.write( "property list uchar int vertex_index\n" )
out.write( "end_header\n" )
for c in coords:
out.write( str( c[ 0 ] ) + " " + str( c[ 1 ] ) + " " + str( c[ 2 ] ) + "\n" )
for f in faces:
out.write( "4 " + str( f[ 0 ] ) + " " + str( f[ 1 ] ) + " " + str( f[ 2 ] ) + " " + str( f[ 3 ] ) + "\n" )
| # On a grid, while a geometric true
from random import uniform
from noise import snoise2
from math import exp, pow
class Sigmoid:
s = 0.001
a = 0
b = 0
def __init__( self, s = 0.001 ):
self.config( s )
def config( self, s ):
self.s = s;
self.a = self.raw( 1 );
self.b = self.raw( 0 );
if( self.a < self.b ):
t = self.a;
self.a = self.b;
self.b = t;
def raw( self, x ):
return 1.0 / ( 1.0 + exp( - self.s * ( x - 0.5 ) ) )
def run( self, x ):
return ( self.raw( x ) - self.b ) * 1.0 / ( self.a - self.b )
GRIDSIZE = 32
MAX_SIZE = 16
voxel = [ ]
level = -1
sig = Sigmoid( 2 )
while len( voxel ) < MAX_SIZE:
newVoxels = 0
selfLevel = level + 1
voxel.append( [ [ False for _ in range( GRIDSIZE ) ] for _ in range( GRIDSIZE ) ] )
for xdx in range( GRIDSIZE ):
for ydx in range( 0, GRIDSIZE ):
n = snoise2( xdx * 0.1, ydx * 0.1 )
if ( voxel[ level ][ xdx ][ ydx ] or level < 0 ) and ( uniform( 0, 1 ) < pow( n if n > 0 else 0, 1.0 / 2.5 ) ):
voxel[ selfLevel ][ xdx ][ ydx ] = True
newVoxels += 1
level = selfLevel
if newVoxels == 0:
break
coords = []
faces = []
def writeFaceX( ldx, xdx, ydx, dc ):
global coords, faces
newcoords = [
( xdx + 0.5 * dc, ydx + 0.5, ldx + 0.5 ),
( xdx + 0.5 * dc, ydx - 0.5, ldx + 0.5 ),
( xdx + 0.5 * dc, ydx - 0.5, ldx - 0.5 ),
( xdx + 0.5 * dc, ydx + 0.5, ldx - 0.5 ) ]
lc = len( coords )
coords += newcoords
faces.append( ( lc, lc + 1, lc + 2, lc + 3 ) )
def writeFaceY( ldx, xdx, ydx, dc ):
global coords, faces
newcoords = [
( xdx + 0.5, ydx + 0.5 * dc, ldx + 0.5 ),
( xdx + 0.5, ydx + 0.5 * dc, ldx - 0.5 ),
( xdx - 0.5, ydx + 0.5 * dc, ldx - 0.5 ),
( xdx - 0.5, ydx + 0.5 * dc, ldx + 0.5 ) ]
lc = len( coords )
coords += newcoords
faces.append( ( lc, lc + 1, lc + 2, lc + 3 ) )
def writeFaceL( ldx, xdx, ydx, dc ):
global coords, faces
newcoords = [
( xdx + 0.5, ydx + 0.5, ldx + 0.5 * dc ),
( xdx + 0.5, ydx - 0.5, ldx + 0.5 * dc ),
( xdx - 0.5, ydx - 0.5, ldx + 0.5 * dc ),
( xdx - 0.5, ydx + 0.5, ldx + 0.5 * dc ) ]
lc = len( coords )
coords += newcoords
faces.append( ( lc, lc + 1, lc + 2, lc + 3 ) )
for ldx in range( len( voxel ) ):
for xdx in range( GRIDSIZE ):
for ydx in range( GRIDSIZE ):
if voxel[ ldx ][ xdx ][ ydx ] is False:
continue
# If a neighbor is not filled, write out the face between them
if ldx - 1 < 0 or not voxel[ ldx - 1 ][ xdx ][ ydx ]:
writeFaceL( ldx, xdx, ydx, -1 )
if ldx + 1 >= len( voxel ) or not voxel[ ldx + 1 ][ xdx ][ ydx ]:
writeFaceL( ldx, xdx, ydx, 1 )
if xdx - 1 < 0 or not voxel[ ldx ][ xdx - 1 ][ ydx ]:
writeFaceX( ldx, xdx, ydx, -1 )
if xdx + 1 >= GRIDSIZE or not voxel[ ldx ][ xdx + 1 ][ ydx ]:
writeFaceX( ldx, xdx, ydx, 1 )
if ydx - 1 < 0 or not voxel[ ldx ][ xdx ][ ydx - 1 ]:
writeFaceY( ldx, xdx, ydx, -1 )
if ydx + 1 >= GRIDSIZE or not voxel[ ldx ][ xdx ][ ydx + 1 ]:
writeFaceY( ldx, xdx, ydx, 1 )
with open( "voxel.ply", "w" ) as out:
out.write( "ply\n" )
out.write( "format ascii 1.0\n" )
out.write( "element vertex " + str( len( coords ) ) + "\n" )
out.write( "property float x\n" )
out.write( "property float y\n" )
out.write( "property float z\n" )
out.write( "element face " + str( len( faces ) ) + "\n" )
out.write( "property list uchar int vertex_index\n" )
out.write( "end_header\n" )
for c in coords:
out.write( str( c[ 0 ] ) + " " + str( c[ 1 ] ) + " " + str( c[ 2 ] ) + "\n" )
for f in faces:
out.write( "4 " + str( f[ 0 ] ) + " " + str( f[ 1 ] ) + " " + str( f[ 2 ] ) + " " + str( f[ 3 ] ) + "\n" ) | en | 0.895781 | # On a grid, while a geometric true # If a neighbor is not filled, write out the face between them | 2.923528 | 3 |
src/ibl_tools/util.py | ekellbuch/ibl_tools | 0 | 6622932 | import numpy as np
def quantile_scaling(x, min_per=5, max_per=95):
"""
Scale data using max and min quantiles
"""
# see quantile_transform sklearn
x_min = np.nanpercentile(x, min_per)
x_max = np.nanpercentile(x, max_per)
xrmp = (x - x_min) / (x_max - x_min)
return xrmp, x_min, x_max
| import numpy as np
def quantile_scaling(x, min_per=5, max_per=95):
"""
Scale data using max and min quantiles
"""
# see quantile_transform sklearn
x_min = np.nanpercentile(x, min_per)
x_max = np.nanpercentile(x, max_per)
xrmp = (x - x_min) / (x_max - x_min)
return xrmp, x_min, x_max
| en | 0.434419 | Scale data using max and min quantiles # see quantile_transform sklearn | 3.382828 | 3 |
Alipay/AlipayNotify.py | Shellbye/django_Alipay | 10 | 6622933 | <reponame>Shellbye/django_Alipay
# -*- coding:utf-8 -*-
__author__ = '<EMAIL>'
import urllib2
from AlipayConfig import Config
from AlipayCore import Core
class Notify():
# HTTPS支付宝通知路径
Https_verify_url = "https://www.alipay.com/cooperate/gateway.do?service=notify_verify&"
# HTTP支付宝通知路径
Http_verify_url = "http://notify.alipay.com/trade/notify_query.do?"
# 从配置文件中初始化变量
# <param name="inputPara">通知返回参数数组</param>
# <param name="notify_id">通知验证ID</param>
def __init__(self):
config = Config()
# 合作身份者ID
self.partner = config.partner
self.seller_id = config.seller_id
# 交易安全校验码
self.key = config.key
self.input_charset = config.input_charset
# 签名方式
self.sign_type = config.sign_type
# 访问模式
self.transport = config.transport
# <summary>
# 验证消息是否是支付宝发出的合法消息
# </summary>
# <param name="inputPara">通知返回参数数组</param>
# <returns>验证结果</returns>
def Verify(self, response_data, method_type):
# 验证基本数据
if method_type == "GET":
if not self.verify_return_base_data(response_data):
return False
elif method_type == "POST":
if not self.verify_notify_base_data(response_data):
return False
# 获取返回回来的待签名数组签名后结果
mysign = self.get_response_mysign(response_data)
# 获取是否是支付宝服务器发来的请求的验证结果
responseTxt = self.verify_source(response_data['notify_id'])
# 验证
# verify_source的结果不是true,与服务器设置问题、合作身份者ID、notify_id一分钟失效有关
# mysign与sign不等,与安全校验码、请求时的参数格式(如:带自定义参数等)、编码格式有关
if responseTxt and response_data['sign'] == mysign: # 验证成功
return True
else: # 验证失败
return False
# <summary>
# 获取返回回来的待签名数组签名后结果
# </summary>
# <param name="inputPara">通知返回参数数组</param>
# <returns>签名结果字符串</returns>
def get_response_mysign(self, inputPara):
# 过滤空值、sign与sign_type参数
sPara = Core.FilterPara(inputPara)
# 获得签名结果
mysign = Core.BuildMysign(sPara, self.key, self.sign_type, self.input_charset)
return mysign
def verify_source(self, notify_id, timeout=120000):
verify_url = self.Https_verify_url
verify_url += "partner=" + self.partner + "¬ify_id=" + notify_id
# 获取远程服务器ATN结果,验证是否是支付宝服务器发来的请求
open_url = urllib2.urlopen(verify_url, timeout=timeout)
return open_url.read() == 'true'
def verify_return_base_data(self, data):
required_keys = ['is_success', 'sign', 'sign_type', 'trade_status', 'notify_id', 'seller_id']
for key in required_keys:
if key not in data:
return False
if data['is_success'] != 'T' or data['trade_status'] != 'TRADE_SUCCESS' \
or data['seller_id'] != self.seller_id or data['sign_type'] != self.sign_type:
return False
return True
def verify_notify_base_data(self, data):
required_keys = ['sign', 'sign_type', 'trade_status', 'notify_id', 'seller_id']
for key in required_keys:
if key not in data:
return False
if data['trade_status'] != 'TRADE_SUCCESS' \
or data['seller_id'] != self.seller_id or data['sign_type'] != self.sign_type:
return False
return True
| # -*- coding:utf-8 -*-
__author__ = '<EMAIL>'
import urllib2
from AlipayConfig import Config
from AlipayCore import Core
class Notify():
# HTTPS支付宝通知路径
Https_verify_url = "https://www.alipay.com/cooperate/gateway.do?service=notify_verify&"
# HTTP支付宝通知路径
Http_verify_url = "http://notify.alipay.com/trade/notify_query.do?"
# 从配置文件中初始化变量
# <param name="inputPara">通知返回参数数组</param>
# <param name="notify_id">通知验证ID</param>
def __init__(self):
config = Config()
# 合作身份者ID
self.partner = config.partner
self.seller_id = config.seller_id
# 交易安全校验码
self.key = config.key
self.input_charset = config.input_charset
# 签名方式
self.sign_type = config.sign_type
# 访问模式
self.transport = config.transport
# <summary>
# 验证消息是否是支付宝发出的合法消息
# </summary>
# <param name="inputPara">通知返回参数数组</param>
# <returns>验证结果</returns>
def Verify(self, response_data, method_type):
# 验证基本数据
if method_type == "GET":
if not self.verify_return_base_data(response_data):
return False
elif method_type == "POST":
if not self.verify_notify_base_data(response_data):
return False
# 获取返回回来的待签名数组签名后结果
mysign = self.get_response_mysign(response_data)
# 获取是否是支付宝服务器发来的请求的验证结果
responseTxt = self.verify_source(response_data['notify_id'])
# 验证
# verify_source的结果不是true,与服务器设置问题、合作身份者ID、notify_id一分钟失效有关
# mysign与sign不等,与安全校验码、请求时的参数格式(如:带自定义参数等)、编码格式有关
if responseTxt and response_data['sign'] == mysign: # 验证成功
return True
else: # 验证失败
return False
# <summary>
# 获取返回回来的待签名数组签名后结果
# </summary>
# <param name="inputPara">通知返回参数数组</param>
# <returns>签名结果字符串</returns>
def get_response_mysign(self, inputPara):
# 过滤空值、sign与sign_type参数
sPara = Core.FilterPara(inputPara)
# 获得签名结果
mysign = Core.BuildMysign(sPara, self.key, self.sign_type, self.input_charset)
return mysign
def verify_source(self, notify_id, timeout=120000):
verify_url = self.Https_verify_url
verify_url += "partner=" + self.partner + "¬ify_id=" + notify_id
# 获取远程服务器ATN结果,验证是否是支付宝服务器发来的请求
open_url = urllib2.urlopen(verify_url, timeout=timeout)
return open_url.read() == 'true'
def verify_return_base_data(self, data):
required_keys = ['is_success', 'sign', 'sign_type', 'trade_status', 'notify_id', 'seller_id']
for key in required_keys:
if key not in data:
return False
if data['is_success'] != 'T' or data['trade_status'] != 'TRADE_SUCCESS' \
or data['seller_id'] != self.seller_id or data['sign_type'] != self.sign_type:
return False
return True
def verify_notify_base_data(self, data):
required_keys = ['sign', 'sign_type', 'trade_status', 'notify_id', 'seller_id']
for key in required_keys:
if key not in data:
return False
if data['trade_status'] != 'TRADE_SUCCESS' \
or data['seller_id'] != self.seller_id or data['sign_type'] != self.sign_type:
return False
return True | zh | 0.603563 | # -*- coding:utf-8 -*- # HTTPS支付宝通知路径 # HTTP支付宝通知路径 # 从配置文件中初始化变量 # <param name="inputPara">通知返回参数数组</param> # <param name="notify_id">通知验证ID</param> # 合作身份者ID # 交易安全校验码 # 签名方式 # 访问模式 # <summary> # 验证消息是否是支付宝发出的合法消息 # </summary> # <param name="inputPara">通知返回参数数组</param> # <returns>验证结果</returns> # 验证基本数据 # 获取返回回来的待签名数组签名后结果 # 获取是否是支付宝服务器发来的请求的验证结果 # 验证 # verify_source的结果不是true,与服务器设置问题、合作身份者ID、notify_id一分钟失效有关 # mysign与sign不等,与安全校验码、请求时的参数格式(如:带自定义参数等)、编码格式有关 # 验证成功 # 验证失败 # <summary> # 获取返回回来的待签名数组签名后结果 # </summary> # <param name="inputPara">通知返回参数数组</param> # <returns>签名结果字符串</returns> # 过滤空值、sign与sign_type参数 # 获得签名结果 # 获取远程服务器ATN结果,验证是否是支付宝服务器发来的请求 | 2.688833 | 3 |
src/comp_crawling/find_recent.py | barahana20/comp_announcement_crawling | 0 | 6622934 | from datetime import datetime
def find_recent(file_name, new_timestamp):
timestamp = file_name[file_name.rfind('_')+1:file_name.index('.md')]
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H-%M")
new_timestamp = datetime.strptime(new_timestamp, "%Y-%m-%dT%H-%M")
if timestamp < new_timestamp:
return True
else:
return False
if find_recent(r'{info.id}_{info.title}_2022-04-07T01-22.md', '2023-04-07T01-22'):
print('새로 가져온 파일이 더 최신입니다.') | from datetime import datetime
def find_recent(file_name, new_timestamp):
timestamp = file_name[file_name.rfind('_')+1:file_name.index('.md')]
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H-%M")
new_timestamp = datetime.strptime(new_timestamp, "%Y-%m-%dT%H-%M")
if timestamp < new_timestamp:
return True
else:
return False
if find_recent(r'{info.id}_{info.title}_2022-04-07T01-22.md', '2023-04-07T01-22'):
print('새로 가져온 파일이 더 최신입니다.') | none | 1 | 2.851377 | 3 | |
ppo_pytorch/__init__.py | SSS135/ppo-pytorch | 2 | 6622935 | from . import common
from . import ppo
from . import models
| from . import common
from . import ppo
from . import models
| none | 1 | 1.058359 | 1 | |
bot/utils/convert.py | AEnterprise/kanelbulle | 4 | 6622936 | from discord.ext import commands
class ApexPlatformConverter(commands.Converter):
async def convert(self, ctx, argument):
ValidPlatforms = {
"XBOX": {"value": "xbl", "name": "Xbox"},
"PS4": {"value": "psn", "name": "PSN"},
"PC": {"value": "origin", "name": "PC"}
}
try:
return(ValidPlatforms[f"{argument.upper()}"])
except KeyError:
return("invalid")
class FortnitePlatformConverter(commands.Converter):
async def convert(self, ctx, argument):
if argument.lower() == "pc":
request_platform = "pc"
elif argument.lower() == "ps4":
request_platform = "psn"
elif argument.lower() == "xbox":
request_platform = "xbl"
else:
request_platform = "invalid"
return(request_platform)
| from discord.ext import commands
class ApexPlatformConverter(commands.Converter):
async def convert(self, ctx, argument):
ValidPlatforms = {
"XBOX": {"value": "xbl", "name": "Xbox"},
"PS4": {"value": "psn", "name": "PSN"},
"PC": {"value": "origin", "name": "PC"}
}
try:
return(ValidPlatforms[f"{argument.upper()}"])
except KeyError:
return("invalid")
class FortnitePlatformConverter(commands.Converter):
async def convert(self, ctx, argument):
if argument.lower() == "pc":
request_platform = "pc"
elif argument.lower() == "ps4":
request_platform = "psn"
elif argument.lower() == "xbox":
request_platform = "xbl"
else:
request_platform = "invalid"
return(request_platform)
| none | 1 | 2.863571 | 3 | |
lib/utils/utils.py | Bazinga699/NCL | 32 | 6622937 | <reponame>Bazinga699/NCL
import logging
import time
import os
import torch
from utils.lr_scheduler import WarmupMultiStepLR
from net import multi_Network, multi_Network_MOCO
def create_logger(cfg, rank=0):
dataset = cfg.DATASET.DATASET
net_type = cfg.BACKBONE.TYPE
module_type = cfg.MODULE.TYPE
log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "logs")
if not os.path.exists(log_dir) and rank == 0:
os.makedirs(log_dir)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_name = "{}_{}_{}_{}.log".format(dataset, net_type, module_type, time_str)
log_file = os.path.join(log_dir, log_name)
# set up logger
print("=> creating log {}".format(log_file))
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if rank > 0:
return logger, log_file
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
logger.info("---------------------Cfg is set as follow--------------------")
logger.info(cfg)
logger.info("-------------------------------------------------------------")
return logger, log_file
def get_optimizer(cfg, model):
base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR
params = []
for name, p in model.named_parameters():
if p.requires_grad:
params.append({"params": p})
else:
print("not add to optimizer: {}".format(name))
if cfg.TRAIN.OPTIMIZER.TYPE == "SGD":
optimizer = torch.optim.SGD(
params,
lr=base_lr,
momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
nesterov=True,
)
elif cfg.TRAIN.OPTIMIZER.TYPE == "ADAM":
optimizer = torch.optim.Adam(
params,
lr=base_lr,
betas=(0.9, 0.999),
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
)
else:
raise NotImplementedError
return optimizer
def get_scheduler(cfg, optimizer):
if cfg.TRAIN.LR_SCHEDULER.TYPE == "multistep":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "cosine":
if cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END > 0:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END, eta_min=1e-4
)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.MAX_EPOCH, eta_min=1e-4
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "warmup":
scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
warmup_epochs=cfg.TRAIN.LR_SCHEDULER.WARM_EPOCH,
)
else:
raise NotImplementedError("Unsupported LR Scheduler: {}".format(cfg.TRAIN.LR_SCHEDULER.TYPE))
return scheduler
def get_multi_model_final(cfg, num_classes, num_class_list, device, logger):
if cfg.NETWORK.MOCO:
model = multi_Network_MOCO(cfg, mode="train", num_classes=num_classes, use_dropout=cfg.DROPOUT)
else:
model = multi_Network(cfg, mode="train", num_classes=num_classes, use_dropout=cfg.DROPOUT)
if cfg.BACKBONE.FREEZE == True:
model.freeze_multi_backbone()
logger.info("Backbone has been freezed")
return model
def get_category_list(annotations, num_classes, cfg):
num_list = [0] * num_classes
cat_list = []
print("Weight List has been produced")
for anno in annotations:
category_id = anno["category_id"]
num_list[category_id] += 1
cat_list.append(category_id)
return num_list, cat_list
| import logging
import time
import os
import torch
from utils.lr_scheduler import WarmupMultiStepLR
from net import multi_Network, multi_Network_MOCO
def create_logger(cfg, rank=0):
dataset = cfg.DATASET.DATASET
net_type = cfg.BACKBONE.TYPE
module_type = cfg.MODULE.TYPE
log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "logs")
if not os.path.exists(log_dir) and rank == 0:
os.makedirs(log_dir)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_name = "{}_{}_{}_{}.log".format(dataset, net_type, module_type, time_str)
log_file = os.path.join(log_dir, log_name)
# set up logger
print("=> creating log {}".format(log_file))
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if rank > 0:
return logger, log_file
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
logger.info("---------------------Cfg is set as follow--------------------")
logger.info(cfg)
logger.info("-------------------------------------------------------------")
return logger, log_file
def get_optimizer(cfg, model):
base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR
params = []
for name, p in model.named_parameters():
if p.requires_grad:
params.append({"params": p})
else:
print("not add to optimizer: {}".format(name))
if cfg.TRAIN.OPTIMIZER.TYPE == "SGD":
optimizer = torch.optim.SGD(
params,
lr=base_lr,
momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
nesterov=True,
)
elif cfg.TRAIN.OPTIMIZER.TYPE == "ADAM":
optimizer = torch.optim.Adam(
params,
lr=base_lr,
betas=(0.9, 0.999),
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
)
else:
raise NotImplementedError
return optimizer
def get_scheduler(cfg, optimizer):
if cfg.TRAIN.LR_SCHEDULER.TYPE == "multistep":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "cosine":
if cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END > 0:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END, eta_min=1e-4
)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.MAX_EPOCH, eta_min=1e-4
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "warmup":
scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
warmup_epochs=cfg.TRAIN.LR_SCHEDULER.WARM_EPOCH,
)
else:
raise NotImplementedError("Unsupported LR Scheduler: {}".format(cfg.TRAIN.LR_SCHEDULER.TYPE))
return scheduler
def get_multi_model_final(cfg, num_classes, num_class_list, device, logger):
if cfg.NETWORK.MOCO:
model = multi_Network_MOCO(cfg, mode="train", num_classes=num_classes, use_dropout=cfg.DROPOUT)
else:
model = multi_Network(cfg, mode="train", num_classes=num_classes, use_dropout=cfg.DROPOUT)
if cfg.BACKBONE.FREEZE == True:
model.freeze_multi_backbone()
logger.info("Backbone has been freezed")
return model
def get_category_list(annotations, num_classes, cfg):
num_list = [0] * num_classes
cat_list = []
print("Weight List has been produced")
for anno in annotations:
category_id = anno["category_id"]
num_list[category_id] += 1
cat_list.append(category_id)
return num_list, cat_list | en | 0.489806 | # set up logger | 2.181174 | 2 |
aussiebb/const.py | yaleman/pyaussiebb | 0 | 6622938 | <reponame>yaleman/pyaussiebb
""" constants and utilities """
from typing import TypedDict
BASEURL = {
"api": "https://myaussie-api.aussiebroadband.com.au",
"login": "https://myaussie-auth.aussiebroadband.com.au/login",
}
DEFAULT_BACKOFF_DELAY = 90
DefaultHeaders = TypedDict(
"DefaultHeaders",
{
"Accept": str,
"Cache-Control": str,
"Content-Type": str,
"Origin": str,
"Referer": str,
},
)
def default_headers() -> DefaultHeaders:
"""returns a default set of headers"""
return {
"Accept": "application/json",
"Content-Type": "application/json",
"Origin": "https://my.aussiebroadband.com.au",
"Referer": "https://my.aussiebroadband.com.au/",
"Cache-Control": "no-cache",
}
API_ENDPOINTS = {
"account_contacts": "/contacts",
"account_paymentplans": "/billing/paymentplans",
"account_transactions": "/billing/transactions?group=true",
"billing_invoices": "/billing/invoices/{invoice_id}",
"fetch_service": "/fetch/{service_id}",
"get_appointment": "/tickets/{ticketid}/appointment",
"get_customer_details": "/customer",
"get_order": "/orders/nbn/{order_id}",
"get_orders": "/orders?v=2",
"get_service_tests": "/tests/{service_id}/available",
"get_services": "/services",
"get_test_history": "/tests/{service_id}",
"get_usage": "/broadband/{service_id}/usage",
"service_boltons": "/nbn/{service_id}/boltons",
"service_datablocks": "/nbn/{service_id}/datablocks",
"service_outages": "/nbn/{service_id}/outages",
"service_plans": "/planchange/{service_id}",
"support_tickets": "/tickets",
"telephony_usage": "/telephony/{service_id}/usage",
"test_line_state": "/tests/{service_id}/linestate",
"voip_devices": "/voip/{service_id}/devices",
"voip_service": "/voip/{service_id}",
}
TEST_MOCKDATA = {
"telephony_usage": {
"national": {"calls": 0, "cost": 0},
"mobile": {"calls": 0, "cost": 0},
"international": {"calls": 0, "cost": 0},
"sms": {"calls": 0, "cost": 0},
"internet": {"kbytes": 0, "cost": 0},
"voicemail": {"calls": 0, "cost": 0},
"other": {"calls": 0, "cost": 0},
"daysTotal": 31,
"daysRemaining": 2,
"historical": [],
},
"service_voip": {
"service_id": 123456,
"type": "VOIP",
"name": "VOIP",
"plan": "Aussie VOIP Casual ($0)",
"description": "VOIP: 123 DRURY LN, SUBURBTON",
"voipDetails": {
"phoneNumber": "0912345678",
"barInternational": True,
"divertNumber": None,
"supportsNumberDiversion": True,
},
"nextBillDate": "2054-01-01T13:00:00Z",
"openDate": "1970-01-01T13:00:00Z",
"usageAnniversary": 16,
"address": None,
"contract": None,
"discounts": [],
},
"service_nbn_fttc": {
"service_id": 12345,
"type": "NBN",
"name": "NBN",
"plan": "NBN 100/40Mbps - Plan Name",
"description": "NBN: 123 DRURY LN, SUBURBTON QLD - AVC000000000001",
"nbnDetails": {
"product": "FTTC",
"poiName": "<NAME>",
"cvcGraph": "https://cvcs.aussiebroadband.com.au/camphilllink2.png",
},
"nextBillDate": "2054-01-01T13:00:00Z",
"openDate": "1970-01-05T13:00:00Z",
"usageAnniversary": 16,
"ipAddresses": [
"2403:1001:b33f:1::/64",
"2403:7007:face::/48",
"172.16.17.32",
],
"address": {
"subaddresstype": None,
"subaddressnumber": None,
"streetnumber": "123",
"streetname": "DRURY",
"streettype": "LN",
"locality": "SUBURBTON",
"postcode": "4001",
"state": "QLD",
},
"contract": None,
"discounts": [],
},
}
FETCH_TYPES = [
"FETCHTV",
]
NBN_TYPES = [
"NBN",
"Opticomm",
]
PHONE_TYPES = [
"VOIP",
"PhoneMobile",
]
| """ constants and utilities """
from typing import TypedDict
BASEURL = {
"api": "https://myaussie-api.aussiebroadband.com.au",
"login": "https://myaussie-auth.aussiebroadband.com.au/login",
}
DEFAULT_BACKOFF_DELAY = 90
DefaultHeaders = TypedDict(
"DefaultHeaders",
{
"Accept": str,
"Cache-Control": str,
"Content-Type": str,
"Origin": str,
"Referer": str,
},
)
def default_headers() -> DefaultHeaders:
"""returns a default set of headers"""
return {
"Accept": "application/json",
"Content-Type": "application/json",
"Origin": "https://my.aussiebroadband.com.au",
"Referer": "https://my.aussiebroadband.com.au/",
"Cache-Control": "no-cache",
}
API_ENDPOINTS = {
"account_contacts": "/contacts",
"account_paymentplans": "/billing/paymentplans",
"account_transactions": "/billing/transactions?group=true",
"billing_invoices": "/billing/invoices/{invoice_id}",
"fetch_service": "/fetch/{service_id}",
"get_appointment": "/tickets/{ticketid}/appointment",
"get_customer_details": "/customer",
"get_order": "/orders/nbn/{order_id}",
"get_orders": "/orders?v=2",
"get_service_tests": "/tests/{service_id}/available",
"get_services": "/services",
"get_test_history": "/tests/{service_id}",
"get_usage": "/broadband/{service_id}/usage",
"service_boltons": "/nbn/{service_id}/boltons",
"service_datablocks": "/nbn/{service_id}/datablocks",
"service_outages": "/nbn/{service_id}/outages",
"service_plans": "/planchange/{service_id}",
"support_tickets": "/tickets",
"telephony_usage": "/telephony/{service_id}/usage",
"test_line_state": "/tests/{service_id}/linestate",
"voip_devices": "/voip/{service_id}/devices",
"voip_service": "/voip/{service_id}",
}
TEST_MOCKDATA = {
"telephony_usage": {
"national": {"calls": 0, "cost": 0},
"mobile": {"calls": 0, "cost": 0},
"international": {"calls": 0, "cost": 0},
"sms": {"calls": 0, "cost": 0},
"internet": {"kbytes": 0, "cost": 0},
"voicemail": {"calls": 0, "cost": 0},
"other": {"calls": 0, "cost": 0},
"daysTotal": 31,
"daysRemaining": 2,
"historical": [],
},
"service_voip": {
"service_id": 123456,
"type": "VOIP",
"name": "VOIP",
"plan": "Aussie VOIP Casual ($0)",
"description": "VOIP: 123 DRURY LN, SUBURBTON",
"voipDetails": {
"phoneNumber": "0912345678",
"barInternational": True,
"divertNumber": None,
"supportsNumberDiversion": True,
},
"nextBillDate": "2054-01-01T13:00:00Z",
"openDate": "1970-01-01T13:00:00Z",
"usageAnniversary": 16,
"address": None,
"contract": None,
"discounts": [],
},
"service_nbn_fttc": {
"service_id": 12345,
"type": "NBN",
"name": "NBN",
"plan": "NBN 100/40Mbps - Plan Name",
"description": "NBN: 123 DRURY LN, SUBURBTON QLD - AVC000000000001",
"nbnDetails": {
"product": "FTTC",
"poiName": "<NAME>",
"cvcGraph": "https://cvcs.aussiebroadband.com.au/camphilllink2.png",
},
"nextBillDate": "2054-01-01T13:00:00Z",
"openDate": "1970-01-05T13:00:00Z",
"usageAnniversary": 16,
"ipAddresses": [
"2403:1001:b33f:1::/64",
"2403:7007:face::/48",
"172.16.17.32",
],
"address": {
"subaddresstype": None,
"subaddressnumber": None,
"streetnumber": "123",
"streetname": "DRURY",
"streettype": "LN",
"locality": "SUBURBTON",
"postcode": "4001",
"state": "QLD",
},
"contract": None,
"discounts": [],
},
}
FETCH_TYPES = [
"FETCHTV",
]
NBN_TYPES = [
"NBN",
"Opticomm",
]
PHONE_TYPES = [
"VOIP",
"PhoneMobile",
] | en | 0.47466 | constants and utilities returns a default set of headers | 2.370382 | 2 |
transparentemail/services/Emails/emailException.py | fossabot/TransparentEmail | 5 | 6622939 | <reponame>fossabot/TransparentEmail<filename>transparentemail/services/Emails/emailException.py
class InvalidEmailException(Exception):
pass
| class InvalidEmailException(Exception):
pass | none | 1 | 1.147237 | 1 | |
setup.py | wesleyks/fine_sm | 2 | 6622940 | <gh_stars>1-10
from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='finesm',
version='1.0.0',
description='A fine state machine',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/wesleyks/finesm',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
keywords='finite-state machine state',
packages=['finesm'],
)
| from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='finesm',
version='1.0.0',
description='A fine state machine',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/wesleyks/finesm',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
keywords='finite-state machine state',
packages=['finesm'],
) | none | 1 | 1.319419 | 1 | |
video_detection.py | kihtov23/yolo_image_and_video_detection | 0 | 6622941 | from imageai.Detection import VideoObjectDetection
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath('.\\yolo.h5')
detector.loadModel()
video_path = detector.detectObjectsFromVideo(
input_file_path='.\\input_data\cars.mp4',
output_file_path='.\\output_data\cars_detected',
log_progress=True
) | from imageai.Detection import VideoObjectDetection
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath('.\\yolo.h5')
detector.loadModel()
video_path = detector.detectObjectsFromVideo(
input_file_path='.\\input_data\cars.mp4',
output_file_path='.\\output_data\cars_detected',
log_progress=True
) | none | 1 | 2.094755 | 2 | |
tests/model/test_calendar.py | gpresazzi/parrocchie-valmalenco-FM | 1 | 6622942 | import pytest
from parrocchie_valmalenco_fm.model.calendar import Calendar
from parrocchie_valmalenco_fm.model.mass import Mass
class TestCalendar:
def test_validate_ok(self):
calendar = Calendar()
mass1 = Mass("caspoggio", "27/10/2020", "18:00", "19:00")
calendar.mass_list.append(mass1)
mass2 = Mass("torre", "27/10/2020", "20:00", "21:00")
calendar.mass_list.append(mass2)
mass3 = Mass("spriana", "27/10/2020", "20:00", "21:00")
calendar.mass_list.append(mass3)
calendar.check_no_overlap()
def test_validate_fail(self):
calendar = Calendar()
mass1 = Mass("caspoggio", "27/10/2020", "18:00", "20:00")
calendar.mass_list.append(mass1)
mass2 = Mass("lanzada", "27/10/2020", "19:00", "21:00")
calendar.mass_list.append(mass2)
mass3 = Mass("spriana", "27/10/2020", "20:00", "21:00")
calendar.mass_list.append(mass3)
with(pytest.raises(ValueError)):
calendar.check_no_overlap()
def test_active_slot(self):
calendar = Calendar()
mass1 = Mass("caspoggio", "27/10/2020", "18:00", "20:00")
calendar.mass_list.append(mass1)
mass2 = Mass("torre", "28/10/2020", "19:00", "21:00")
calendar.mass_list.append(mass2)
assert calendar.active_slot("27/10/2020", "18:30") == "caspoggio"
assert calendar.active_slot("28/10/2020", "19:30") == "torre"
assert calendar.active_slot("29/10/2020", "19:30") == None
| import pytest
from parrocchie_valmalenco_fm.model.calendar import Calendar
from parrocchie_valmalenco_fm.model.mass import Mass
class TestCalendar:
def test_validate_ok(self):
calendar = Calendar()
mass1 = Mass("caspoggio", "27/10/2020", "18:00", "19:00")
calendar.mass_list.append(mass1)
mass2 = Mass("torre", "27/10/2020", "20:00", "21:00")
calendar.mass_list.append(mass2)
mass3 = Mass("spriana", "27/10/2020", "20:00", "21:00")
calendar.mass_list.append(mass3)
calendar.check_no_overlap()
def test_validate_fail(self):
calendar = Calendar()
mass1 = Mass("caspoggio", "27/10/2020", "18:00", "20:00")
calendar.mass_list.append(mass1)
mass2 = Mass("lanzada", "27/10/2020", "19:00", "21:00")
calendar.mass_list.append(mass2)
mass3 = Mass("spriana", "27/10/2020", "20:00", "21:00")
calendar.mass_list.append(mass3)
with(pytest.raises(ValueError)):
calendar.check_no_overlap()
def test_active_slot(self):
calendar = Calendar()
mass1 = Mass("caspoggio", "27/10/2020", "18:00", "20:00")
calendar.mass_list.append(mass1)
mass2 = Mass("torre", "28/10/2020", "19:00", "21:00")
calendar.mass_list.append(mass2)
assert calendar.active_slot("27/10/2020", "18:30") == "caspoggio"
assert calendar.active_slot("28/10/2020", "19:30") == "torre"
assert calendar.active_slot("29/10/2020", "19:30") == None
| none | 1 | 2.308159 | 2 | |
jasmin/protocols/cli/smppccm.py | pyghassen/jasmin | 0 | 6622943 | import pickle
from twisted.internet import defer
from jasmin.protocols.smpp.configs import SMPPClientConfig
from jasmin.protocols.cli.managers import Manager, Session
from jasmin.vendor.smpp.pdu.constants import (addr_npi_name_map, addr_ton_name_map,
replace_if_present_flap_name_map, priority_flag_name_map)
from jasmin.protocols.cli.protocol import str2num
# A config map between console-configuration keys and SMPPClientConfig keys.
SMPPClientConfigKeyMap = {'cid': 'id', 'host': 'host', 'port': 'port', 'username': 'username',
'password': 'password', 'systype': 'systemType', 'logfile': 'log_file', 'loglevel': 'log_level',
'bind_to': 'sessionInitTimerSecs', 'elink_interval': 'enquireLinkTimerSecs', 'trx_to': 'inactivityTimerSecs',
'res_to': 'responseTimerSecs', 'con_loss_retry': 'reconnectOnConnectionLoss', 'con_fail_retry': 'reconnectOnConnectionFailure',
'con_loss_delay': 'reconnectOnConnectionLossDelay', 'con_fail_delay': 'reconnectOnConnectionFailureDelay',
'pdu_red_to': 'pduReadTimerSecs', 'bind': 'bindOperation', 'bind_ton': 'bind_addr_ton', 'bind_npi': 'bind_addr_npi',
'src_ton': 'source_addr_ton', 'src_npi': 'source_addr_npi', 'dst_ton': 'dest_addr_ton', 'dst_npi': 'dest_addr_npi',
'addr_range': 'address_range', 'src_addr': 'source_addr', 'proto_id': 'protocol_id',
'priority': 'priority_flag', 'validity': 'validity_period', 'ripf': 'replace_if_present_flag',
'def_msg_id': 'sm_default_msg_id', 'coding': 'data_coding', 'requeue_delay': 'requeue_delay', 'submit_throughput': 'submit_sm_throughput',
'dlr_expiry': 'dlr_expiry'
}
# When updating a key from RequireRestartKeys, the connector need restart for update to take effect
RequireRestartKeys = ['host', 'port', 'username', 'password', 'systemType', 'logfile', 'loglevel']
def castToBuiltInType(key, value):
'Will cast value to the correct type depending on the key'
if isinstance(value, bool):
return 1 if value else 0
if key in ['bind_npi', 'dst_npi', 'src_npi']:
return addr_npi_name_map[str(value)]
if key in ['bind_ton', 'dst_ton', 'src_ton']:
return addr_ton_name_map[str(value)]
if key == 'ripf':
return replace_if_present_flap_name_map[str(value)]
if key == 'priority':
return priority_flag_name_map[str(value)]
return value
class JCliSMPPClientConfig(SMPPClientConfig):
'Overload SMPPClientConfig with getters and setters for JCli'
PendingRestart = False
def set(self, key, value):
setattr(self, key, value)
if key in RequireRestartKeys:
self.PendingRestart = True
def getAll(self):
r = {}
for key, value in SMPPClientConfigKeyMap.iteritems():
r[key] = castToBuiltInType(key, getattr(self, value))
return r
def SMPPClientConfigBuild(fCallback):
'Parse args and try to build a JCliSMPPClientConfig instance to pass it to fCallback'
def parse_args_and_call_with_instance(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Initiate JCliSMPPClientConfig with sessBuffer content
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('You must set at least connector id (cid) before saving !')
connector = {}
for key, value in self.sessBuffer.iteritems():
connector[key] = value
try:
SMPPClientConfigInstance = JCliSMPPClientConfig(**connector)
# Hand the instance to fCallback
return fCallback(self, SMPPClientConfigInstance)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
else:
# Unknown key
if not SMPPClientConfigKeyMap.has_key(cmd):
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
# Cast to boolean
if cmd in ['con_loss_retry', 'con_fail_retry']:
if arg.lower() in ['yes', 'y', '1']:
arg = True
elif arg.lower() in ['no', 'n', '0']:
arg = False
# Buffer key for later SMPPClientConfig initiating
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
if isinstance(arg, str):
self.sessBuffer[SMPPClientConfigKey] = str2num(arg)
else:
self.sessBuffer[SMPPClientConfigKey] = arg
return self.protocol.sendData()
return parse_args_and_call_with_instance
def SMPPClientConfigUpdate(fCallback):
'''Get connector configuration and log update requests passing to fCallback
The log will be handed to fCallback when 'ok' is received'''
def log_update_requests_and_call(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Pass sessBuffer as updateLog to fCallback
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('Nothing to save')
return fCallback(self, self.sessBuffer)
else:
# Unknown key
if not SMPPClientConfigKeyMap.has_key(cmd):
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
if cmd == 'cid':
return self.protocol.sendData('Connector id can not be modified !')
# Buffer key for later (when receiving 'ok')
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
self.sessBuffer[SMPPClientConfigKey] = str2num(arg)
return self.protocol.sendData()
return log_update_requests_and_call
class ConnectorExist:
'Check if connector cid exist before passing it to fCallback'
def __init__(self, cid_key):
self.cid_key = cid_key
def __call__(self, fCallback):
cid_key = self.cid_key
def exist_connector_and_call(self, *args, **kwargs):
opts = args[1]
cid = getattr(opts, cid_key)
if self.pb['smppcm'].getConnector(cid) is not None:
return fCallback(self, *args, **kwargs)
return self.protocol.sendData('Unknown connector: %s' % cid)
return exist_connector_and_call
class SmppCCManager(Manager):
managerName = 'smppcc'
def persist(self, arg, opts):
if self.pb['smppcm'].perspective_persist(opts.profile):
self.protocol.sendData('%s configuration persisted (profile:%s)' % (self.managerName, opts.profile), prompt = False)
else:
self.protocol.sendData('Failed to persist %s configuration (profile:%s)' % (self.managerName, opts.profile), prompt = False)
@defer.inlineCallbacks
def load(self, arg, opts):
r = yield self.pb['smppcm'].perspective_load(opts.profile)
if r:
self.protocol.sendData('%s configuration loaded (profile:%s)' % (self.managerName, opts.profile), prompt = False)
else:
self.protocol.sendData('Failed to load %s configuration (profile:%s)' % (self.managerName, opts.profile), prompt = False)
def list(self, arg, opts):
connectors = self.pb['smppcm'].perspective_connector_list()
counter = 0
if (len(connectors)) > 0:
self.protocol.sendData("#%s %s %s %s %s" % ('Connector id'.ljust(35),
'Service'.ljust(7),
'Session'.ljust(16),
'Starts'.ljust(6),
'Stops'.ljust(5),
), prompt=False)
for connector in connectors:
counter += 1
self.protocol.sendData("#%s %s %s %s %s" % (str(connector['id']).ljust(35),
str('started' if connector['service_status'] == 1 else 'stopped').ljust(7),
str(connector['session_state']).ljust(16),
str(connector['start_count']).ljust(6),
str(connector['stop_count']).ljust(5),
), prompt=False)
self.protocol.sendData(prompt=False)
self.protocol.sendData('Total connectors: %s' % counter)
@Session
@SMPPClientConfigBuild
@defer.inlineCallbacks
def add_session(self, SMPPClientConfigInstance):
st = yield self.pb['smppcm'].perspective_connector_add(pickle.dumps(SMPPClientConfigInstance, 2))
if st:
self.protocol.sendData('Successfully added connector [%s]' % SMPPClientConfigInstance.id, prompt=False)
self.stopSession()
else:
self.protocol.sendData('Failed adding connector, check log for details')
def add(self, arg, opts):
return self.startSession(self.add_session,
annoucement='Adding a new connector: (ok: save, ko: exit)',
completitions=SMPPClientConfigKeyMap.keys())
@Session
@SMPPClientConfigUpdate
@defer.inlineCallbacks
def update_session(self, updateLog):
connector = self.pb['smppcm'].getConnector(self.sessionContext['cid'])
connectorDetails = self.pb['smppcm'].getConnectorDetails(self.sessionContext['cid'])
for key, value in updateLog.iteritems():
connector['config'].set(key, value)
if connector['config'].PendingRestart and connectorDetails['service_status'] == 1:
self.protocol.sendData('Restarting connector [%s] for updates to take effect ...' % self.sessionContext['cid'], prompt=False)
st = yield self.pb['smppcm'].perspective_connector_stop(self.sessionContext['cid'])
if not st:
self.protocol.sendData('Failed stopping connector, check log for details', prompt=False)
else:
self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
self.protocol.sendData('Successfully updated connector [%s]' % self.sessionContext['cid'], prompt=False)
self.stopSession()
@ConnectorExist(cid_key='update')
def update(self, arg, opts):
return self.startSession(self.update_session,
annoucement='Updating connector id [%s]: (ok: save, ko: exit)' % opts.update,
completitions=SMPPClientConfigKeyMap.keys(),
sessionContext={'cid': opts.update})
@ConnectorExist(cid_key='remove')
@defer.inlineCallbacks
def remove(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_remove(opts.remove)
if st:
self.protocol.sendData('Successfully removed connector id:%s' % opts.remove)
else:
self.protocol.sendData('Failed removing connector, check log for details')
@ConnectorExist(cid_key='show')
def show(self, arg, opts):
connector = self.pb['smppcm'].getConnector(opts.show)
for k, v in connector['config'].getAll().iteritems():
self.protocol.sendData('%s %s' % (k, v), prompt=False)
self.protocol.sendData()
@ConnectorExist(cid_key='stop')
@defer.inlineCallbacks
def stop(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_stop(opts.stop)
if st:
self.protocol.sendData('Successfully stopped connector id:%s' % opts.stop)
else:
self.protocol.sendData('Failed stopping connector, check log for details')
@ConnectorExist(cid_key='start')
def start(self, arg, opts):
st = self.pb['smppcm'].perspective_connector_start(opts.start)
if st:
self.protocol.sendData('Successfully started connector id:%s' % opts.start)
else:
self.protocol.sendData('Failed starting connector, check log for details')
| import pickle
from twisted.internet import defer
from jasmin.protocols.smpp.configs import SMPPClientConfig
from jasmin.protocols.cli.managers import Manager, Session
from jasmin.vendor.smpp.pdu.constants import (addr_npi_name_map, addr_ton_name_map,
replace_if_present_flap_name_map, priority_flag_name_map)
from jasmin.protocols.cli.protocol import str2num
# A config map between console-configuration keys and SMPPClientConfig keys.
SMPPClientConfigKeyMap = {'cid': 'id', 'host': 'host', 'port': 'port', 'username': 'username',
'password': 'password', 'systype': 'systemType', 'logfile': 'log_file', 'loglevel': 'log_level',
'bind_to': 'sessionInitTimerSecs', 'elink_interval': 'enquireLinkTimerSecs', 'trx_to': 'inactivityTimerSecs',
'res_to': 'responseTimerSecs', 'con_loss_retry': 'reconnectOnConnectionLoss', 'con_fail_retry': 'reconnectOnConnectionFailure',
'con_loss_delay': 'reconnectOnConnectionLossDelay', 'con_fail_delay': 'reconnectOnConnectionFailureDelay',
'pdu_red_to': 'pduReadTimerSecs', 'bind': 'bindOperation', 'bind_ton': 'bind_addr_ton', 'bind_npi': 'bind_addr_npi',
'src_ton': 'source_addr_ton', 'src_npi': 'source_addr_npi', 'dst_ton': 'dest_addr_ton', 'dst_npi': 'dest_addr_npi',
'addr_range': 'address_range', 'src_addr': 'source_addr', 'proto_id': 'protocol_id',
'priority': 'priority_flag', 'validity': 'validity_period', 'ripf': 'replace_if_present_flag',
'def_msg_id': 'sm_default_msg_id', 'coding': 'data_coding', 'requeue_delay': 'requeue_delay', 'submit_throughput': 'submit_sm_throughput',
'dlr_expiry': 'dlr_expiry'
}
# When updating a key from RequireRestartKeys, the connector need restart for update to take effect
RequireRestartKeys = ['host', 'port', 'username', 'password', 'systemType', 'logfile', 'loglevel']
def castToBuiltInType(key, value):
'Will cast value to the correct type depending on the key'
if isinstance(value, bool):
return 1 if value else 0
if key in ['bind_npi', 'dst_npi', 'src_npi']:
return addr_npi_name_map[str(value)]
if key in ['bind_ton', 'dst_ton', 'src_ton']:
return addr_ton_name_map[str(value)]
if key == 'ripf':
return replace_if_present_flap_name_map[str(value)]
if key == 'priority':
return priority_flag_name_map[str(value)]
return value
class JCliSMPPClientConfig(SMPPClientConfig):
'Overload SMPPClientConfig with getters and setters for JCli'
PendingRestart = False
def set(self, key, value):
setattr(self, key, value)
if key in RequireRestartKeys:
self.PendingRestart = True
def getAll(self):
r = {}
for key, value in SMPPClientConfigKeyMap.iteritems():
r[key] = castToBuiltInType(key, getattr(self, value))
return r
def SMPPClientConfigBuild(fCallback):
'Parse args and try to build a JCliSMPPClientConfig instance to pass it to fCallback'
def parse_args_and_call_with_instance(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Initiate JCliSMPPClientConfig with sessBuffer content
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('You must set at least connector id (cid) before saving !')
connector = {}
for key, value in self.sessBuffer.iteritems():
connector[key] = value
try:
SMPPClientConfigInstance = JCliSMPPClientConfig(**connector)
# Hand the instance to fCallback
return fCallback(self, SMPPClientConfigInstance)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
else:
# Unknown key
if not SMPPClientConfigKeyMap.has_key(cmd):
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
# Cast to boolean
if cmd in ['con_loss_retry', 'con_fail_retry']:
if arg.lower() in ['yes', 'y', '1']:
arg = True
elif arg.lower() in ['no', 'n', '0']:
arg = False
# Buffer key for later SMPPClientConfig initiating
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
if isinstance(arg, str):
self.sessBuffer[SMPPClientConfigKey] = str2num(arg)
else:
self.sessBuffer[SMPPClientConfigKey] = arg
return self.protocol.sendData()
return parse_args_and_call_with_instance
def SMPPClientConfigUpdate(fCallback):
'''Get connector configuration and log update requests passing to fCallback
The log will be handed to fCallback when 'ok' is received'''
def log_update_requests_and_call(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Pass sessBuffer as updateLog to fCallback
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('Nothing to save')
return fCallback(self, self.sessBuffer)
else:
# Unknown key
if not SMPPClientConfigKeyMap.has_key(cmd):
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
if cmd == 'cid':
return self.protocol.sendData('Connector id can not be modified !')
# Buffer key for later (when receiving 'ok')
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
self.sessBuffer[SMPPClientConfigKey] = str2num(arg)
return self.protocol.sendData()
return log_update_requests_and_call
class ConnectorExist:
'Check if connector cid exist before passing it to fCallback'
def __init__(self, cid_key):
self.cid_key = cid_key
def __call__(self, fCallback):
cid_key = self.cid_key
def exist_connector_and_call(self, *args, **kwargs):
opts = args[1]
cid = getattr(opts, cid_key)
if self.pb['smppcm'].getConnector(cid) is not None:
return fCallback(self, *args, **kwargs)
return self.protocol.sendData('Unknown connector: %s' % cid)
return exist_connector_and_call
class SmppCCManager(Manager):
managerName = 'smppcc'
def persist(self, arg, opts):
if self.pb['smppcm'].perspective_persist(opts.profile):
self.protocol.sendData('%s configuration persisted (profile:%s)' % (self.managerName, opts.profile), prompt = False)
else:
self.protocol.sendData('Failed to persist %s configuration (profile:%s)' % (self.managerName, opts.profile), prompt = False)
@defer.inlineCallbacks
def load(self, arg, opts):
r = yield self.pb['smppcm'].perspective_load(opts.profile)
if r:
self.protocol.sendData('%s configuration loaded (profile:%s)' % (self.managerName, opts.profile), prompt = False)
else:
self.protocol.sendData('Failed to load %s configuration (profile:%s)' % (self.managerName, opts.profile), prompt = False)
def list(self, arg, opts):
connectors = self.pb['smppcm'].perspective_connector_list()
counter = 0
if (len(connectors)) > 0:
self.protocol.sendData("#%s %s %s %s %s" % ('Connector id'.ljust(35),
'Service'.ljust(7),
'Session'.ljust(16),
'Starts'.ljust(6),
'Stops'.ljust(5),
), prompt=False)
for connector in connectors:
counter += 1
self.protocol.sendData("#%s %s %s %s %s" % (str(connector['id']).ljust(35),
str('started' if connector['service_status'] == 1 else 'stopped').ljust(7),
str(connector['session_state']).ljust(16),
str(connector['start_count']).ljust(6),
str(connector['stop_count']).ljust(5),
), prompt=False)
self.protocol.sendData(prompt=False)
self.protocol.sendData('Total connectors: %s' % counter)
@Session
@SMPPClientConfigBuild
@defer.inlineCallbacks
def add_session(self, SMPPClientConfigInstance):
st = yield self.pb['smppcm'].perspective_connector_add(pickle.dumps(SMPPClientConfigInstance, 2))
if st:
self.protocol.sendData('Successfully added connector [%s]' % SMPPClientConfigInstance.id, prompt=False)
self.stopSession()
else:
self.protocol.sendData('Failed adding connector, check log for details')
def add(self, arg, opts):
return self.startSession(self.add_session,
annoucement='Adding a new connector: (ok: save, ko: exit)',
completitions=SMPPClientConfigKeyMap.keys())
@Session
@SMPPClientConfigUpdate
@defer.inlineCallbacks
def update_session(self, updateLog):
connector = self.pb['smppcm'].getConnector(self.sessionContext['cid'])
connectorDetails = self.pb['smppcm'].getConnectorDetails(self.sessionContext['cid'])
for key, value in updateLog.iteritems():
connector['config'].set(key, value)
if connector['config'].PendingRestart and connectorDetails['service_status'] == 1:
self.protocol.sendData('Restarting connector [%s] for updates to take effect ...' % self.sessionContext['cid'], prompt=False)
st = yield self.pb['smppcm'].perspective_connector_stop(self.sessionContext['cid'])
if not st:
self.protocol.sendData('Failed stopping connector, check log for details', prompt=False)
else:
self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
self.protocol.sendData('Successfully updated connector [%s]' % self.sessionContext['cid'], prompt=False)
self.stopSession()
@ConnectorExist(cid_key='update')
def update(self, arg, opts):
return self.startSession(self.update_session,
annoucement='Updating connector id [%s]: (ok: save, ko: exit)' % opts.update,
completitions=SMPPClientConfigKeyMap.keys(),
sessionContext={'cid': opts.update})
@ConnectorExist(cid_key='remove')
@defer.inlineCallbacks
def remove(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_remove(opts.remove)
if st:
self.protocol.sendData('Successfully removed connector id:%s' % opts.remove)
else:
self.protocol.sendData('Failed removing connector, check log for details')
@ConnectorExist(cid_key='show')
def show(self, arg, opts):
connector = self.pb['smppcm'].getConnector(opts.show)
for k, v in connector['config'].getAll().iteritems():
self.protocol.sendData('%s %s' % (k, v), prompt=False)
self.protocol.sendData()
@ConnectorExist(cid_key='stop')
@defer.inlineCallbacks
def stop(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_stop(opts.stop)
if st:
self.protocol.sendData('Successfully stopped connector id:%s' % opts.stop)
else:
self.protocol.sendData('Failed stopping connector, check log for details')
@ConnectorExist(cid_key='start')
def start(self, arg, opts):
st = self.pb['smppcm'].perspective_connector_start(opts.start)
if st:
self.protocol.sendData('Successfully started connector id:%s' % opts.start)
else:
self.protocol.sendData('Failed starting connector, check log for details')
| en | 0.798126 | # A config map between console-configuration keys and SMPPClientConfig keys. # When updating a key from RequireRestartKeys, the connector need restart for update to take effect # Empty line # Initiate JCliSMPPClientConfig with sessBuffer content # Hand the instance to fCallback # Unknown key # Cast to boolean # Buffer key for later SMPPClientConfig initiating Get connector configuration and log update requests passing to fCallback The log will be handed to fCallback when 'ok' is received # Empty line # Pass sessBuffer as updateLog to fCallback # Unknown key # Buffer key for later (when receiving 'ok') | 1.667145 | 2 |
server/src/api/routers/sessions/landmark.py | Mokumoku-Rin/client | 0 | 6622944 | from fastapi import APIRouter, Depends
from schemas.landmark import (
LandmarkVisitRequest, LandmarkVisitResponse,
LandmarkPostResponse, LandmarkPostRequest, LandmarkResponse)
from services.landmark import LandmarkService
from depends.auth import FirebaseToken
router = APIRouter()
@router.post("/visit/", response_model=LandmarkVisitResponse)
async def compare_image(landmark_request: LandmarkVisitRequest, fbToken: FirebaseToken = Depends()):
uid = fbToken.uid
# デバッグモードなら常にOKを返す
if landmark_request.debug == "true":
return {
"result": "OK"
}
message = await LandmarkService.ImageCompare(landmark_request)
response: WorkoutResponse = {
"result": message
}
return response
@router.get("/", response_model=LandmarkResponse)
async def get_landmark():
landmarks = await LandmarkService.fetch_all_landmarks()
response: LandmarkResponse = {
"landmarks": landmarks
}
return response
@router.post("/", response_model=LandmarkPostResponse)
async def post_landmark(request_model: LandmarkPostRequest):
name = request_model.name
description = request_model.description
img_path = request_model.img_path
pos = request_model.pos
message = await LandmarkService.post_landmark(name, description, img_path, pos)
response: response_model = {
"result": message
}
return response
| from fastapi import APIRouter, Depends
from schemas.landmark import (
LandmarkVisitRequest, LandmarkVisitResponse,
LandmarkPostResponse, LandmarkPostRequest, LandmarkResponse)
from services.landmark import LandmarkService
from depends.auth import FirebaseToken
router = APIRouter()
@router.post("/visit/", response_model=LandmarkVisitResponse)
async def compare_image(landmark_request: LandmarkVisitRequest, fbToken: FirebaseToken = Depends()):
uid = fbToken.uid
# デバッグモードなら常にOKを返す
if landmark_request.debug == "true":
return {
"result": "OK"
}
message = await LandmarkService.ImageCompare(landmark_request)
response: WorkoutResponse = {
"result": message
}
return response
@router.get("/", response_model=LandmarkResponse)
async def get_landmark():
landmarks = await LandmarkService.fetch_all_landmarks()
response: LandmarkResponse = {
"landmarks": landmarks
}
return response
@router.post("/", response_model=LandmarkPostResponse)
async def post_landmark(request_model: LandmarkPostRequest):
name = request_model.name
description = request_model.description
img_path = request_model.img_path
pos = request_model.pos
message = await LandmarkService.post_landmark(name, description, img_path, pos)
response: response_model = {
"result": message
}
return response
| ja | 0.998155 | # デバッグモードなら常にOKを返す | 2.359972 | 2 |
source/python/MonkeyWantingToBeAHorse.py | JoHyukJun/algorithm-analysis | 0 | 6622945 | '''
main.py
Created by <NAME> on 2021
Copyright © 2021 <NAME>. All rights reserved.
'''
import sys
from collections import deque
def bfs():
queue = deque([(0, 0, 0)])
while queue:
x, y, z = queue.popleft()
if x == h - 1 and y == w - 1:
return visited[x][y][z] - 1
# monkey move
for i in range(4):
nx = x + mm[i][0]
ny = y + mm[i][1]
if nx >= 0 and ny >= 0 and nx < h and ny < w and not visited[nx][ny][z] and arr[nx][ny] == 0:
queue.append((nx, ny, z))
visited[nx][ny][z] = visited[x][y][z] + 1
# horse move
if z < k:
for i in range(8):
nx = x + hm[i][0]
ny = y + hm[i][1]
if nx >= 0 and ny >= 0 and nx < h and ny < w and not visited[nx][ny][z + 1] and arr[nx][ny] == 0:
queue.append((nx, ny, z + 1))
visited[nx][ny][z + 1] = visited[x][y][z] + 1
return -1
k = int(sys.stdin.readline())
w, h = map(int, sys.stdin.readline().rstrip().split(' '))
arr = []
for _ in range(h):
arr.append(list(map(int, sys.stdin.readline().rstrip().split(' '))))
visited = [[[0 for _ in range(k + 1)] for _ in range(w)] for _ in range(h)]
visited[0][0][0] = 1
mm = [(-1, 0), (1, 0), (0, 1), (0, -1)]
hm = [(-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, -2), (2, -1), (2, 1), (1, 2)]
print(bfs()) | '''
main.py
Created by <NAME> on 2021
Copyright © 2021 <NAME>. All rights reserved.
'''
import sys
from collections import deque
def bfs():
queue = deque([(0, 0, 0)])
while queue:
x, y, z = queue.popleft()
if x == h - 1 and y == w - 1:
return visited[x][y][z] - 1
# monkey move
for i in range(4):
nx = x + mm[i][0]
ny = y + mm[i][1]
if nx >= 0 and ny >= 0 and nx < h and ny < w and not visited[nx][ny][z] and arr[nx][ny] == 0:
queue.append((nx, ny, z))
visited[nx][ny][z] = visited[x][y][z] + 1
# horse move
if z < k:
for i in range(8):
nx = x + hm[i][0]
ny = y + hm[i][1]
if nx >= 0 and ny >= 0 and nx < h and ny < w and not visited[nx][ny][z + 1] and arr[nx][ny] == 0:
queue.append((nx, ny, z + 1))
visited[nx][ny][z + 1] = visited[x][y][z] + 1
return -1
k = int(sys.stdin.readline())
w, h = map(int, sys.stdin.readline().rstrip().split(' '))
arr = []
for _ in range(h):
arr.append(list(map(int, sys.stdin.readline().rstrip().split(' '))))
visited = [[[0 for _ in range(k + 1)] for _ in range(w)] for _ in range(h)]
visited[0][0][0] = 1
mm = [(-1, 0), (1, 0), (0, 1), (0, -1)]
hm = [(-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, -2), (2, -1), (2, 1), (1, 2)]
print(bfs()) | en | 0.839279 | main.py Created by <NAME> on 2021 Copyright © 2021 <NAME>. All rights reserved. # monkey move # horse move | 3.203156 | 3 |
leetcode/1344_angle_between_hands_of_a_clock.py | jacquerie/leetcode | 3 | 6622946 | <gh_stars>1-10
# -*- coding: utf-8 -*-
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
minutes_angle = 6 * minutes
hour_angle = (30 * hour + minutes / 2) % 360
angle = abs(minutes_angle - hour_angle)
return angle if angle <= 180 else 360 - angle
if __name__ == '__main__':
solution = Solution()
assert 165 == solution.angleClock(12, 30)
assert 75 == solution.angleClock(3, 30)
assert 7.5 == solution.angleClock(3, 15)
assert 155 == solution.angleClock(4, 50)
assert 0 == solution.angleClock(12, 0)
assert 76.5 == solution.angleClock(1, 57)
| # -*- coding: utf-8 -*-
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
minutes_angle = 6 * minutes
hour_angle = (30 * hour + minutes / 2) % 360
angle = abs(minutes_angle - hour_angle)
return angle if angle <= 180 else 360 - angle
if __name__ == '__main__':
solution = Solution()
assert 165 == solution.angleClock(12, 30)
assert 75 == solution.angleClock(3, 30)
assert 7.5 == solution.angleClock(3, 15)
assert 155 == solution.angleClock(4, 50)
assert 0 == solution.angleClock(12, 0)
assert 76.5 == solution.angleClock(1, 57) | en | 0.769321 | # -*- coding: utf-8 -*- | 3.888895 | 4 |
objvar.py | git-ning/byte-of-python | 1 | 6622947 | <filename>objvar.py
#!/usr/bin/python
class Robot:
"""Represents a robot, with a name."""
# A calss variable, counting the number of robots
population = 0
def __init__(self, name):
"""Initialize the data."""
self.name = name
print('(Initialize {0})'.format(self.name))
# When this person is created, the robot adds to population
Robot.population += 1
def __del__(self):
"""I am daying."""
print('{0} is being destroyed!'.format(self.name))
Robot.population -= 1
if Robot.population == 0:
print('{0} is the last one.'.format(Robot.population))
def sayHi(self):
"""Greeting by the robot.
Yeah, they can do taht."""
print('Greetings, my master call me {0}'.format(self.name))
def howMany():
"""Prints the current population."""
print('We have {0:d} robots.'.format(Robot.population))
howMany = staticmethod(howMany)
droid1 = Robot('R2-D2')
droid1.sayHi()
Robot.howMany()
droid2 = Robot('C-3PO')
droid2.sayHi()
Robot.howMany()
print('\nRobots can do some work here.\n')
print('Robots have finished their work. So let\'s destroy them.')
del droid1
del droid2
Robot.howMany() | <filename>objvar.py
#!/usr/bin/python
class Robot:
"""Represents a robot, with a name."""
# A calss variable, counting the number of robots
population = 0
def __init__(self, name):
"""Initialize the data."""
self.name = name
print('(Initialize {0})'.format(self.name))
# When this person is created, the robot adds to population
Robot.population += 1
def __del__(self):
"""I am daying."""
print('{0} is being destroyed!'.format(self.name))
Robot.population -= 1
if Robot.population == 0:
print('{0} is the last one.'.format(Robot.population))
def sayHi(self):
"""Greeting by the robot.
Yeah, they can do taht."""
print('Greetings, my master call me {0}'.format(self.name))
def howMany():
"""Prints the current population."""
print('We have {0:d} robots.'.format(Robot.population))
howMany = staticmethod(howMany)
droid1 = Robot('R2-D2')
droid1.sayHi()
Robot.howMany()
droid2 = Robot('C-3PO')
droid2.sayHi()
Robot.howMany()
print('\nRobots can do some work here.\n')
print('Robots have finished their work. So let\'s destroy them.')
del droid1
del droid2
Robot.howMany() | en | 0.828799 | #!/usr/bin/python Represents a robot, with a name. # A calss variable, counting the number of robots Initialize the data. # When this person is created, the robot adds to population I am daying. Greeting by the robot. Yeah, they can do taht. Prints the current population. | 4.054466 | 4 |
CNN_by_Chong Feng/display_layers.py | shaw-wong/Malmo | 1 | 6622948 | import math
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image
# from tf_cnnvis import *
def weight(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias(length):
return tf.Variable(tf.constant(0.1, shape=[length]))
def layer(input, num_input_channels, filter_size, num_filters, use_bn=False,
use_relu=True, use_pool=True, use_dropout=True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = weight(shape)
biases = bias(num_filters)
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1],
padding="SAME")
layer += biases
if use_bn:
layer = tf.layers.batch_normalization(layer, training=training)
if use_relu:
layer = tf.nn.relu(layer)
if use_pool:
layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding="SAME")
if use_dropout:
layer = tf.nn.dropout(layer, keep_prob)
return layer
def save_layer(layer, image, image_name, use):
image = image.reshape(img_size_flat)
feed_dict = {x: [image], keep_prob: 0.5}
values = session.run(layer, feed_dict=feed_dict)
num_filters = values.shape[3]
num_grids = int(math.ceil(math.sqrt(num_filters)))
fig, axes = plt.subplots(num_grids, num_grids)
for i, ax in enumerate(axes.flat):
if i < num_filters:
img = values[0, :, :, i]
ax.imshow(img, interpolation='nearest', cmap='binary')
fig.savefig("data/layers/features/" + image_name +
"_" + use + ".png")
keep_prob = tf.placeholder(tf.float32)
filter_size1 = 3
num_filters1 = 32
filter_size2 = 3
num_filters2 = 64
filter_size3 = 3
num_filters3 = 128
filter_size4 = 3
num_filters4 = 256
num_channels = 3
img_size = 128
img_size_flat = img_size * img_size * num_channels
img_shape = (img_size, img_size)
training = True
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
layer1 = layer(input=x_image, num_input_channels=num_channels,
filter_size=filter_size1, num_filters=num_filters1)
session = tf.Session()
session.run(tf.global_variables_initializer())
img0 = Image.open("record/images/not_preprocessed/test/test_34.png")
image0 = np.array(img0)
img1 = Image.open("record/images/not_preprocessed/test/test_31.png")
image1 = np.array(img1)
save_layer(layer=layer1, image=image0, image_name="maze", use="conv")
save_layer(layer=layer1, image=image1, image_name="pig", use="conv")
# image0 = image0.reshape(img_size_flat)
# feed_dict = {x: [image0], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/maze/",
# path_outdir="record/images/layers/maze/")
# image1 = image1.reshape(img_size_flat)
# feed_dict = {x: [image1], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/pig/",
# path_outdir="record/images/layers/pig/")
session.close()
img0.close()
img1.close()
| import math
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image
# from tf_cnnvis import *
def weight(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias(length):
return tf.Variable(tf.constant(0.1, shape=[length]))
def layer(input, num_input_channels, filter_size, num_filters, use_bn=False,
use_relu=True, use_pool=True, use_dropout=True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = weight(shape)
biases = bias(num_filters)
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1],
padding="SAME")
layer += biases
if use_bn:
layer = tf.layers.batch_normalization(layer, training=training)
if use_relu:
layer = tf.nn.relu(layer)
if use_pool:
layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding="SAME")
if use_dropout:
layer = tf.nn.dropout(layer, keep_prob)
return layer
def save_layer(layer, image, image_name, use):
image = image.reshape(img_size_flat)
feed_dict = {x: [image], keep_prob: 0.5}
values = session.run(layer, feed_dict=feed_dict)
num_filters = values.shape[3]
num_grids = int(math.ceil(math.sqrt(num_filters)))
fig, axes = plt.subplots(num_grids, num_grids)
for i, ax in enumerate(axes.flat):
if i < num_filters:
img = values[0, :, :, i]
ax.imshow(img, interpolation='nearest', cmap='binary')
fig.savefig("data/layers/features/" + image_name +
"_" + use + ".png")
keep_prob = tf.placeholder(tf.float32)
filter_size1 = 3
num_filters1 = 32
filter_size2 = 3
num_filters2 = 64
filter_size3 = 3
num_filters3 = 128
filter_size4 = 3
num_filters4 = 256
num_channels = 3
img_size = 128
img_size_flat = img_size * img_size * num_channels
img_shape = (img_size, img_size)
training = True
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
layer1 = layer(input=x_image, num_input_channels=num_channels,
filter_size=filter_size1, num_filters=num_filters1)
session = tf.Session()
session.run(tf.global_variables_initializer())
img0 = Image.open("record/images/not_preprocessed/test/test_34.png")
image0 = np.array(img0)
img1 = Image.open("record/images/not_preprocessed/test/test_31.png")
image1 = np.array(img1)
save_layer(layer=layer1, image=image0, image_name="maze", use="conv")
save_layer(layer=layer1, image=image1, image_name="pig", use="conv")
# image0 = image0.reshape(img_size_flat)
# feed_dict = {x: [image0], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/maze/",
# path_outdir="record/images/layers/maze/")
# image1 = image1.reshape(img_size_flat)
# feed_dict = {x: [image1], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/pig/",
# path_outdir="record/images/layers/pig/")
session.close()
img0.close()
img1.close()
| en | 0.548977 | # from tf_cnnvis import * # image0 = image0.reshape(img_size_flat) # feed_dict = {x: [image0], keep_prob: 0.5} # layers = ["r", "p", "c"] # is_success = deconv_visualization(sess_graph_path=session, # value_feed_dict=feed_dict, # input_tensor=x_image, layers=layers, # path_logdir="record/images/layers/maze/", # path_outdir="record/images/layers/maze/") # image1 = image1.reshape(img_size_flat) # feed_dict = {x: [image1], keep_prob: 0.5} # layers = ["r", "p", "c"] # is_success = deconv_visualization(sess_graph_path=session, # value_feed_dict=feed_dict, # input_tensor=x_image, layers=layers, # path_logdir="record/images/layers/pig/", # path_outdir="record/images/layers/pig/") | 2.67925 | 3 |
Vocabulary/GBLxAPI_Json_Parser.py | gblxapi/UnityGBLxAPI | 14 | 6622949 | <filename>Vocabulary/GBLxAPI_Json_Parser.py
# -------------------------------------------------------------------------------------------------
# GBLxAPI_Json_Parser.py
# Project: GBLXAPI
# Created: 2018/07/21
# Copyright 2018 Dig-It! Games, LLC. All rights reserved.
# This code is licensed under the MIT License (See LICENSE.txt for details)
# -------------------------------------------------------------------------------------------------
from openpyxl import load_workbook
import json
from jsonmerge import merge
# This function takes all of the GBLxAPI Vocabulary information in the workbook named workbookName
# and parses it to json, writing to a file with the name defined in target.
def GenerateJson(workbookName, target, nameCol, uriCol, descrCol):
wb = load_workbook(filename=workbookName)
totalMap = {} # totalMap has keys in [Activity, Grade, Domain, Focus, etc]
for ws in wb:
wsName = ws._WorkbookChild__title
if wsName == "Notes": continue
print("Loading " + wsName +"..."),
sectionMap = {} # sectionMap has keys in [Counting, Algebra, Energy, etc]
# local variables to allow for column overrides
nc = nameCol
uc = uriCol
dc = descrCol
# override column values for specific manually populated sheets in the default file
# for automatically populated sheets, the default file uses columns F, I, and BB. For manual population, it's much easier to use A, B, and C.
# This should not affect the values for the user vocab, since this file uses A, B, and C already.
if wsName in ["Verb", "Activity", "Extension", "Grade"]:
nc = 0 # A
uc = 1 # B
dc = 2 # C
for row in ws.iter_rows(min_row=2): # min_row=2 to skip header row
itemMap = {} # itemMap has keys in [name, description, id]
# force all values to lowercase for easy comparison
name = str(row[nc].value).lower() if row[nc].value is not None else ""
uri = str(row[uc].value).lower() if row[uc].value is not None else ""
descr = str(row[dc].value).lower() if row[dc].value is not None else ""
# populate the map with the corresponding values
itemMap['name'] = {}
itemMap['description'] = {}
itemMap['name']['en-US'] = name
itemMap['id'] = uri
itemMap['description']['en-US'] = descr
sectionMap[name] = itemMap
totalMap[wsName.lower()] = sectionMap
print("Done.")
print("Generating Json file..."),
with open(target, 'w') as write_file:
json.dump(totalMap, write_file, sort_keys=True, indent=4, separators=(',', ': '))
print("Success!")
print("Converting your data...")
# Load default vocabulary
# 5 == row F, 8 == row I, 53 == row BB in Excel
print("Loading default vocabulary...")
GenerateJson('GBLxAPI_Vocab_Default.xlsx', 'GBLxAPI_Vocab_Default.json', 5, 53, 8)
# Load user overrides
# 0 == row A, 1 == row B, 2 == row C in Excel
print("Loading user overrides...")
GenerateJson('GBLxAPI_Vocab_User.xlsx', 'GBLxAPI_Vocab_User.json', 0, 1, 2)
print("All done! Move the two generated Json files to Resources/Data to use the GBLxAPI vocabulary in your Unity project.") | <filename>Vocabulary/GBLxAPI_Json_Parser.py
# -------------------------------------------------------------------------------------------------
# GBLxAPI_Json_Parser.py
# Project: GBLXAPI
# Created: 2018/07/21
# Copyright 2018 Dig-It! Games, LLC. All rights reserved.
# This code is licensed under the MIT License (See LICENSE.txt for details)
# -------------------------------------------------------------------------------------------------
from openpyxl import load_workbook
import json
from jsonmerge import merge
# This function takes all of the GBLxAPI Vocabulary information in the workbook named workbookName
# and parses it to json, writing to a file with the name defined in target.
def GenerateJson(workbookName, target, nameCol, uriCol, descrCol):
wb = load_workbook(filename=workbookName)
totalMap = {} # totalMap has keys in [Activity, Grade, Domain, Focus, etc]
for ws in wb:
wsName = ws._WorkbookChild__title
if wsName == "Notes": continue
print("Loading " + wsName +"..."),
sectionMap = {} # sectionMap has keys in [Counting, Algebra, Energy, etc]
# local variables to allow for column overrides
nc = nameCol
uc = uriCol
dc = descrCol
# override column values for specific manually populated sheets in the default file
# for automatically populated sheets, the default file uses columns F, I, and BB. For manual population, it's much easier to use A, B, and C.
# This should not affect the values for the user vocab, since this file uses A, B, and C already.
if wsName in ["Verb", "Activity", "Extension", "Grade"]:
nc = 0 # A
uc = 1 # B
dc = 2 # C
for row in ws.iter_rows(min_row=2): # min_row=2 to skip header row
itemMap = {} # itemMap has keys in [name, description, id]
# force all values to lowercase for easy comparison
name = str(row[nc].value).lower() if row[nc].value is not None else ""
uri = str(row[uc].value).lower() if row[uc].value is not None else ""
descr = str(row[dc].value).lower() if row[dc].value is not None else ""
# populate the map with the corresponding values
itemMap['name'] = {}
itemMap['description'] = {}
itemMap['name']['en-US'] = name
itemMap['id'] = uri
itemMap['description']['en-US'] = descr
sectionMap[name] = itemMap
totalMap[wsName.lower()] = sectionMap
print("Done.")
print("Generating Json file..."),
with open(target, 'w') as write_file:
json.dump(totalMap, write_file, sort_keys=True, indent=4, separators=(',', ': '))
print("Success!")
print("Converting your data...")
# Load default vocabulary
# 5 == row F, 8 == row I, 53 == row BB in Excel
print("Loading default vocabulary...")
GenerateJson('GBLxAPI_Vocab_Default.xlsx', 'GBLxAPI_Vocab_Default.json', 5, 53, 8)
# Load user overrides
# 0 == row A, 1 == row B, 2 == row C in Excel
print("Loading user overrides...")
GenerateJson('GBLxAPI_Vocab_User.xlsx', 'GBLxAPI_Vocab_User.json', 0, 1, 2)
print("All done! Move the two generated Json files to Resources/Data to use the GBLxAPI vocabulary in your Unity project.") | en | 0.723382 | # ------------------------------------------------------------------------------------------------- # GBLxAPI_Json_Parser.py # Project: GBLXAPI # Created: 2018/07/21 # Copyright 2018 Dig-It! Games, LLC. All rights reserved. # This code is licensed under the MIT License (See LICENSE.txt for details) # ------------------------------------------------------------------------------------------------- # This function takes all of the GBLxAPI Vocabulary information in the workbook named workbookName # and parses it to json, writing to a file with the name defined in target. # totalMap has keys in [Activity, Grade, Domain, Focus, etc] # sectionMap has keys in [Counting, Algebra, Energy, etc] # local variables to allow for column overrides # override column values for specific manually populated sheets in the default file # for automatically populated sheets, the default file uses columns F, I, and BB. For manual population, it's much easier to use A, B, and C. # This should not affect the values for the user vocab, since this file uses A, B, and C already. # A # B # C # min_row=2 to skip header row # itemMap has keys in [name, description, id] # force all values to lowercase for easy comparison # populate the map with the corresponding values # Load default vocabulary # 5 == row F, 8 == row I, 53 == row BB in Excel # Load user overrides # 0 == row A, 1 == row B, 2 == row C in Excel | 2.608183 | 3 |
tests/test_models.py | nlessmann/rse-panimg | 5 | 6622950 | <reponame>nlessmann/rse-panimg
import logging
import pytest
from panimg.exceptions import ValidationError
from panimg.image_builders.metaio_utils import load_sitk_image
from panimg.models import EXTRA_METADATA, ExtraMetaData, SimpleITKImage
from tests import RESOURCE_PATH
@pytest.mark.parametrize(
"vr,valid,invalid",
(
(
"AS",
("000D", "123W", "456M", "789Y"),
("1Y", "12D", "1234D", "123"),
),
("CS", ("M", " A_A", "", "A" * 16), ("a", "A" * 17, "\\")),
(
"DA",
("20210923", "12341231", ""),
(
"12345678",
"a",
"1",
"1234567",
"2021923",
"2021010a",
"123456789",
"20210229",
"20210931",
"12341231123456",
),
),
(
"LO",
("", "a" * 64, "😄", "😄" * 64),
("a" * 65, "\\", "😄" * 65, r"a\a"),
),
(
"PN",
("", "a" * 324, "😄", "😄" * 324),
("a" * 325, "\\", "😄" * 325, r"a\a"),
),
(
"UI",
("", "1.0", "0.0.0.0", "1." * 32),
("1." * 33, "a", "😄.😄", "1.2.+.a"),
),
),
)
def test_dicom_vr_validation(vr, valid, invalid):
md = ExtraMetaData("Test", vr, "test", "default")
for t in valid:
md.validate_value(t)
for t in invalid:
with pytest.raises(ValidationError):
md.validate_value(t)
@pytest.mark.parametrize(
["key", "value"],
[
("PatientID", "a" * 65),
("PatientName", "a" * 325),
("PatientBirthDate", "invalid date"),
("PatientAge", "invalid age"),
("PatientSex", "invalid sex"),
("StudyDate", "invalid date"),
("StudyInstanceUID", "invalid uid"),
("SeriesInstanceUID", "invalid uid"),
("StudyDescription", "a" * 65),
("SeriesDescription", "a" * 65),
],
)
def test_built_image_invalid_headers(tmpdir, caplog, key, value):
src = RESOURCE_PATH / "image3x4-extra-stuff.mhd"
sitk_image = load_sitk_image(src)
sitk_image.SetMetaData(key, value)
result = SimpleITKImage(
image=sitk_image,
name=src.name,
consumed_files={src},
spacing_valid=True,
)
result.save(output_directory=tmpdir)
assert len(caplog.records) == 1
warning = caplog.records[0]
assert warning.levelno == logging.WARNING
assert "ValidationError" in warning.msg
def test_built_image_extra_metadata_defaults(tmpdir, caplog):
src = RESOURCE_PATH / "image3x4.mhd"
sitk_image = load_sitk_image(src)
result = SimpleITKImage(
image=sitk_image,
name=src.name,
consumed_files={src},
spacing_valid=True,
)
new_image, new_files = result.save(output_directory=tmpdir)
assert len(caplog.records) == 0
expected_default_values = {
md.field_name: md.default_value for md in EXTRA_METADATA
}
for key, val in expected_default_values.items():
assert getattr(new_image, key) == val
| import logging
import pytest
from panimg.exceptions import ValidationError
from panimg.image_builders.metaio_utils import load_sitk_image
from panimg.models import EXTRA_METADATA, ExtraMetaData, SimpleITKImage
from tests import RESOURCE_PATH
@pytest.mark.parametrize(
"vr,valid,invalid",
(
(
"AS",
("000D", "123W", "456M", "789Y"),
("1Y", "12D", "1234D", "123"),
),
("CS", ("M", " A_A", "", "A" * 16), ("a", "A" * 17, "\\")),
(
"DA",
("20210923", "12341231", ""),
(
"12345678",
"a",
"1",
"1234567",
"2021923",
"2021010a",
"123456789",
"20210229",
"20210931",
"12341231123456",
),
),
(
"LO",
("", "a" * 64, "😄", "😄" * 64),
("a" * 65, "\\", "😄" * 65, r"a\a"),
),
(
"PN",
("", "a" * 324, "😄", "😄" * 324),
("a" * 325, "\\", "😄" * 325, r"a\a"),
),
(
"UI",
("", "1.0", "0.0.0.0", "1." * 32),
("1." * 33, "a", "😄.😄", "1.2.+.a"),
),
),
)
def test_dicom_vr_validation(vr, valid, invalid):
md = ExtraMetaData("Test", vr, "test", "default")
for t in valid:
md.validate_value(t)
for t in invalid:
with pytest.raises(ValidationError):
md.validate_value(t)
@pytest.mark.parametrize(
["key", "value"],
[
("PatientID", "a" * 65),
("PatientName", "a" * 325),
("PatientBirthDate", "invalid date"),
("PatientAge", "invalid age"),
("PatientSex", "invalid sex"),
("StudyDate", "invalid date"),
("StudyInstanceUID", "invalid uid"),
("SeriesInstanceUID", "invalid uid"),
("StudyDescription", "a" * 65),
("SeriesDescription", "a" * 65),
],
)
def test_built_image_invalid_headers(tmpdir, caplog, key, value):
src = RESOURCE_PATH / "image3x4-extra-stuff.mhd"
sitk_image = load_sitk_image(src)
sitk_image.SetMetaData(key, value)
result = SimpleITKImage(
image=sitk_image,
name=src.name,
consumed_files={src},
spacing_valid=True,
)
result.save(output_directory=tmpdir)
assert len(caplog.records) == 1
warning = caplog.records[0]
assert warning.levelno == logging.WARNING
assert "ValidationError" in warning.msg
def test_built_image_extra_metadata_defaults(tmpdir, caplog):
src = RESOURCE_PATH / "image3x4.mhd"
sitk_image = load_sitk_image(src)
result = SimpleITKImage(
image=sitk_image,
name=src.name,
consumed_files={src},
spacing_valid=True,
)
new_image, new_files = result.save(output_directory=tmpdir)
assert len(caplog.records) == 0
expected_default_values = {
md.field_name: md.default_value for md in EXTRA_METADATA
}
for key, val in expected_default_values.items():
assert getattr(new_image, key) == val | none | 1 | 1.94752 | 2 |