signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_next_objective ( self ) :
"""Gets the next Objective in this list .
return : ( osid . learning . Objective ) - the next Objective in this
list . The has _ next ( ) method should be used to test that
a next Objective is available before calling this
method .
raise : IllegalState - no more elements available in this list
raise : OperationFailed - unable to complete request
compliance : mandatory - This method must be implemented .""" | try :
next_object = next ( self )
except StopIteration :
raise IllegalState ( 'no more elements available in this list' )
except Exception : # Need to specify exceptions here !
raise OperationFailed ( )
else :
return next_object |
def get_neuroglancer_link ( self , resource , resolution , x_range , y_range , z_range , ** kwargs ) :
"""Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step .
Args :
resource ( intern . resource . Resource ) : Resource compatible with cutout operations .
resolution ( int ) : 0 indicates native resolution .
x _ range ( list [ int ] ) : x range such as [ 10 , 20 ] which means x > = 10 and x < 20.
y _ range ( list [ int ] ) : y range such as [ 10 , 20 ] which means y > = 10 and y < 20.
z _ range ( list [ int ] ) : z range such as [ 10 , 20 ] which means z > = 10 and z < 20.
Returns :
( string ) : Return neuroglancer link .
Raises :
RuntimeError when given invalid resource .
Other exceptions may be raised depending on the volume service ' s implementation .""" | return self . service . get_neuroglancer_link ( resource , resolution , x_range , y_range , z_range , self . url_prefix , ** kwargs ) |
def query ( ** kwargs ) :
"""Queries for work items based on their criteria .
Args :
queue _ name : Optional queue name to restrict to .
build _ id : Optional build ID to restrict to .
release _ id : Optional release ID to restrict to .
run _ id : Optional run ID to restrict to .
count : How many tasks to fetch . Defaults to None , which means all
tasks are fetch that match the query .
Returns :
Dictionaries of the most recent tasks that match the criteria , in
order of most recently created . When count is 1 the return value will
be the most recent task or None . When count is not 1 the return value
will be a list of tasks .""" | count = kwargs . get ( 'count' , None )
task_list = _query ( ** kwargs )
task_dict_list = [ _task_to_dict ( task ) for task in task_list ]
if count == 1 :
if not task_dict_list :
return None
else :
return task_dict_list [ 0 ]
return task_dict_list |
def GetElevation ( self , latitude , longitude , timeout = 0 ) :
'''Returns the altitude ( m ASL ) of a given lat / long pair , or None if unknown''' | if latitude is None or longitude is None :
return None
if self . database == 'srtm' :
TileID = ( numpy . floor ( latitude ) , numpy . floor ( longitude ) )
if TileID in self . tileDict :
alt = self . tileDict [ TileID ] . getAltitudeFromLatLon ( latitude , longitude )
else :
tile = self . downloader . getTile ( numpy . floor ( latitude ) , numpy . floor ( longitude ) )
if tile == 0 :
if timeout > 0 :
t0 = time . time ( )
while time . time ( ) < t0 + timeout and tile == 0 :
tile = self . downloader . getTile ( numpy . floor ( latitude ) , numpy . floor ( longitude ) )
if tile == 0 :
time . sleep ( 0.1 )
if tile == 0 :
return None
self . tileDict [ TileID ] = tile
alt = tile . getAltitudeFromLatLon ( latitude , longitude )
if self . database == 'geoscience' :
alt = self . mappy . getAltitudeAtPoint ( latitude , longitude )
return alt |
def get_multireddits ( self , redditor , * args , ** kwargs ) :
"""Return a list of multireddits belonging to a redditor .
: param redditor : The username or Redditor object to find multireddits
from .
: returns : The json response from the server
The additional parameters are passed directly into
: meth : ` ~ praw . _ _ init _ _ . BaseReddit . request _ json `
If the requested redditor is the current user , all multireddits
are visible . Otherwise , only public multireddits are returned .""" | redditor = six . text_type ( redditor )
url = self . config [ 'multireddit_user' ] . format ( user = redditor )
return self . request_json ( url , * args , ** kwargs ) |
def pipe_strtransform ( context = None , _INPUT = None , conf = None , ** kwargs ) :
"""A string module that splits a string into tokens delimited by
separators . Loopable .
Parameters
context : pipe2py . Context object
_ INPUT : iterable of items or strings
conf : { ' transformation ' : { value ' : < ' swapcase ' > } }
Returns
_ OUTPUT : generator of tokenized strings""" | splits = get_splits ( _INPUT , conf , ** cdicts ( opts , kwargs ) )
parsed = utils . dispatch ( splits , * get_dispatch_funcs ( ) )
_OUTPUT = starmap ( parse_result , parsed )
return _OUTPUT |
def reload ( self ) :
"""Create a new partition scheme . A scheme defines which utterances are in which partition .
The scheme only changes after every call if ` ` self . shuffle = = True ` ` .
Returns :
list : List of PartitionInfo objects , defining the new partitions ( same as ` ` self . partitions ` ` )""" | # Create the order in which utterances will be loaded
utt_ids = sorted ( self . utt_ids )
if self . shuffle :
self . rand . shuffle ( utt_ids )
partitions = [ ]
current_partition = PartitionInfo ( )
for utt_id in utt_ids :
utt_size = self . utt_sizes [ utt_id ]
utt_lengths = self . utt_lengths [ utt_id ]
# We add utterance to the partition as long the partition - size is not exceeded
# Otherwise we start with new partition .
if current_partition . size + utt_size > self . partition_size :
partitions . append ( current_partition )
current_partition = PartitionInfo ( )
current_partition . utt_ids . append ( utt_id )
current_partition . utt_lengths . append ( utt_lengths )
current_partition . size += utt_size
if current_partition . size > 0 :
partitions . append ( current_partition )
self . partitions = partitions
return self . partitions |
def convert_binary_field_to_attachment ( env , field_spec ) :
"""This method converts the 8.0 binary fields to attachments like Odoo 9.0
makes with the new attachment = True attribute . It has to be called on
post - migration script , as there ' s a call to get the res _ name of the
target model , which is not yet loaded on pre - migration .
You need to rename the involved column in pre - migration script if you
don ' t want to lose your data in the process .
This method also removes after the conversion the source column for
avoiding data duplication .
This is done through Odoo ORM , because there ' s a lot of logic associated
with guessing MIME type , format and length , file saving in store . . .
that is doesn ' t worth to recreate it via SQL as there ' s not too much
performance problem .
: param env : Odoo environment
: param field _ spec : A dictionary with the ORM model name as key , and as
dictionary values a tuple with :
* field name to be converted as attachment as first element .
* SQL column name that contains actual data as second element . If
the second element is None , then the column name is taken
calling ` get _ legacy _ name ` method , which is the typical technique .""" | logger = logging . getLogger ( 'OpenUpgrade' )
attachment_model = env [ 'ir.attachment' ]
for model_name in field_spec :
model = env [ model_name ]
for field , column in field_spec [ model_name ] :
if column is None :
column = openupgrade . get_legacy_name ( field )
logger . info ( "Converting to attachment field {} from model {} stored in " "column {}" . format ( field , model_name , column ) )
last_id = 0
while True :
env . cr . execute ( """SELECT id, {0} FROM {1} WHERE {0} IS NOT NULL AND id > {2}
ORDER BY id LIMIT 500;
""" . format ( column , model . _table , last_id ) )
rows = env . cr . fetchall ( )
if not rows :
break
logger . info ( " converting {0} items starting after {1}..." "" . format ( len ( rows ) , last_id ) )
for row in rows :
last_id = row [ 0 ]
data = bytes ( row [ 1 ] )
if data and data != 'None' :
attachment_model . create ( { 'name' : field , 'res_model' : model_name , 'res_field' : field , 'res_id' : last_id , 'type' : 'binary' , 'datas' : data , } )
# Remove source column for cleaning the room
env . cr . execute ( "ALTER TABLE {} DROP COLUMN {}" . format ( model . _table , column , ) ) |
def _calc_b ( w , aod700 ) :
"""Calculate the b coefficient .""" | b1 = 0.00925 * aod700 ** 2 + 0.0148 * aod700 - 0.0172
b0 = - 0.7565 * aod700 ** 2 + 0.5057 * aod700 + 0.4557
b = b1 * np . log ( w ) + b0
return b |
def select_resources ( self , * args , ** kwargs ) :
"""Copy the query and add filtering by resource labels .
Examples : :
query = query . select _ resources ( zone = ' us - central1 - a ' )
query = query . select _ resources ( zone _ prefix = ' europe - ' )
query = query . select _ resources ( resource _ type = ' gce _ instance ' )
A keyword argument ` ` < label > = < value > ` ` ordinarily generates a filter
expression of the form : :
resource . label . < label > = " < value > "
However , by adding ` ` " _ prefix " ` ` or ` ` " _ suffix " ` ` to the keyword ,
you can specify a partial match .
` ` < label > _ prefix = < value > ` ` generates : :
resource . label . < label > = starts _ with ( " < value > " )
` ` < label > _ suffix = < value > ` ` generates : :
resource . label . < label > = ends _ with ( " < value > " )
As a special case , ` ` " resource _ type " ` ` is treated as a special
pseudo - label corresponding to the filter object ` ` resource . type ` ` .
For example , ` ` resource _ type = < value > ` ` generates : :
resource . type = " < value > "
See the ` defined resource types ` _ .
. . note : :
The label ` ` " instance _ name " ` ` is a metric label ,
not a resource label . You would filter on it using
` ` select _ metrics ( instance _ name = . . . ) ` ` .
: type args : tuple
: param args : Raw filter expression strings to include in the
conjunction . If just one is provided and no keyword arguments
are provided , it can be a disjunction .
: type kwargs : dict
: param kwargs : Label filters to include in the conjunction as
described above .
: rtype : : class : ` Query `
: returns : The new query object .
. . _ defined resource types :
https : / / cloud . google . com / monitoring / api / v3 / monitored - resources""" | new_query = copy . deepcopy ( self )
new_query . _filter . select_resources ( * args , ** kwargs )
return new_query |
def _Pcn ( x , dsz , Nv , dimN = 2 , dimC = 1 ) :
"""Projection onto dictionary update constraint set : support
projection and normalisation . The result has the full spatial
dimensions of the input .
Parameters
x : array _ like
Input array
dsz : tuple
Filter support size ( s ) , specified using the same format as the
` dsz ` parameter of : func : ` bcrop `
Nv : tuple
Sizes of problem spatial indices
dimN : int , optional ( default 2)
Number of problem spatial indices
dimC : int , optional ( default 1)
Number of problem channel indices
Returns
y : ndarray
Projection of input onto constraint set""" | return normalise ( zpad ( bcrop ( x , dsz , dimN ) , Nv ) , dimN + dimC ) |
def by_skills ( queryset , skill_string = None ) :
"""Filter queryset by a comma delimeted skill list""" | if skill_string :
operator , items = get_operator_and_items ( skill_string )
q_obj = SQ ( )
for s in items :
if len ( s ) > 0 :
q_obj . add ( SQ ( skills = s ) , operator )
queryset = queryset . filter ( q_obj )
return queryset |
def exec_command ( self , cmd ) :
"""Executes the given command .
This method executes a command by calling the DLL ' s exec method .
Direct API methods should be prioritized over calling this method .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
cmd ( str ) : the command to run
Returns :
The return code of running the command .
Raises :
JLinkException : if the command is invalid or fails .
See Also :
For a full list of the supported commands , please see the SEGGER
J - Link documentation ,
` UM08001 < https : / / www . segger . com / downloads / jlink > ` _ _ .""" | err_buf = ( ctypes . c_char * self . MAX_BUF_SIZE ) ( )
res = self . _dll . JLINKARM_ExecCommand ( cmd . encode ( ) , err_buf , self . MAX_BUF_SIZE )
err_buf = ctypes . string_at ( err_buf ) . decode ( )
if len ( err_buf ) > 0 : # This is how they check for error in the documentation , so check
# this way as well .
raise errors . JLinkException ( err_buf . strip ( ) )
return res |
def digest ( self , ** args ) :
"""calculate a digest based on the hash of the XML content""" | return String ( XML . canonicalized_string ( self . root ) ) . digest ( ** args ) |
def encode_quopri ( msg ) :
"""Encode the message ' s payload in quoted - printable .
Also , add an appropriate Content - Transfer - Encoding header .""" | orig = msg . get_payload ( )
encdata = _qencode ( orig )
msg . set_payload ( encdata )
msg [ 'Content-Transfer-Encoding' ] = 'quoted-printable' |
def get_bool_attr ( self , name ) :
"""Returns the value of a boolean HTML attribute like ` checked ` or ` disabled `""" | val = self . get_attr ( name )
return val is not None and val . lower ( ) in ( "true" , name ) |
def create_config_files ( directory ) :
"""Initialize directory ready for vpn walker
: param directory : the path where you want this to happen
: return :""" | # Some constant strings
config_zip_url = "https://s3-us-west-1.amazonaws.com/heartbleed/linux/linux-files.zip"
if not os . path . exists ( directory ) :
os . makedirs ( directory )
logging . info ( "Starting to download PureVPN config file zip" )
url_opener = urllib . URLopener ( )
zip_path = os . path . join ( directory , '../linux_files.zip' )
url_opener . retrieve ( config_zip_url , zip_path )
logging . info ( "Extracting zip file" )
unzip ( zip_path , os . path . join ( directory , '../' ) )
# remove zip file
os . remove ( zip_path )
# copy ca and key to root path
shutil . copyfile ( os . path . join ( directory , '../Linux OpenVPN Updated files' , 'ca.crt' ) , os . path . join ( directory , '../ca.crt' ) )
shutil . copyfile ( os . path . join ( directory , '../Linux OpenVPN Updated files' , 'Wdc.key' ) , os . path . join ( directory , '../Wdc.key' ) )
# move all config files to / vpns
orig_path = os . path . join ( directory , '../Linux OpenVPN Updated files/TCP' )
server_country = { }
for filename in os . listdir ( orig_path ) :
if filename . endswith ( '.ovpn' ) :
country = filename . split ( '-' ) [ 0 ]
if '(V)' in country :
country = country [ : country . find ( '(V)' ) ]
file_path = os . path . join ( orig_path , filename )
lines = [ line . rstrip ( '\n' ) for line in open ( file_path ) ]
# get ip address for this vpn
ip = ""
for line in lines :
if line . startswith ( 'remote' ) :
hostname = line . split ( ' ' ) [ 1 ]
ip = socket . gethostbyname ( hostname )
break
if len ( ip ) > 0 :
new_path = os . path . join ( directory , ip + '.ovpn' )
shutil . copyfile ( file_path , new_path )
server_country [ ip ] = country
# remove extracted folder
shutil . rmtree ( os . path . join ( directory , '../Linux OpenVPN Updated files' ) )
# add dns update options to each file
logging . info ( "Appending DNS update options" )
for filename in os . listdir ( directory ) :
file_path = os . path . join ( directory , filename )
with open ( file_path , 'a' ) as f :
f . write ( "\n" )
f . write ( "up /etc/openvpn/update-resolv-conf\n" )
f . write ( "down /etc/openvpn/update-resolv-conf\n" )
print os . path . join ( directory , 'servers.txt' ) , len ( server_country )
with open ( os . path . join ( directory , 'servers.txt' ) , 'w' ) as f :
for ip in server_country :
f . write ( '|' . join ( [ ip , server_country [ ip ] ] ) + '\n' ) |
def create_migration_ctx ( ** kwargs ) :
"""Create an alembic migration context .""" | env = EnvironmentContext ( Config ( ) , None )
env . configure ( connection = db . engine . connect ( ) , sqlalchemy_module_prefix = 'db.' , ** kwargs )
return env . get_context ( ) |
def _parse_tile_part_bit_stream ( self , fptr , sod_marker , tile_length ) :
"""Parse the tile part bit stream for SOP , EPH marker segments .""" | read_buffer = fptr . read ( tile_length )
# The tile length could possibly be too large and extend past
# the end of file . We need to be a bit resilient .
count = min ( tile_length , len ( read_buffer ) )
packet = np . frombuffer ( read_buffer , dtype = np . uint8 , count = count )
indices = np . where ( packet == 0xff )
for idx in indices [ 0 ] :
try :
if packet [ idx + 1 ] == 0x91 and ( idx < ( len ( packet ) - 5 ) ) :
offset = sod_marker . offset + 2 + idx
length = 4
nsop = packet [ ( idx + 4 ) : ( idx + 6 ) ] . view ( 'uint16' ) [ 0 ]
if sys . byteorder == 'little' :
nsop = nsop . byteswap ( )
segment = SOPsegment ( nsop , length , offset )
self . segment . append ( segment )
elif packet [ idx + 1 ] == 0x92 :
offset = sod_marker . offset + 2 + idx
length = 0
segment = EPHsegment ( length , offset )
self . segment . append ( segment )
except IndexError :
continue |
async def start ( self ) :
"""Enter the transaction or savepoint block .""" | self . __check_state_base ( 'start' )
if self . _state is TransactionState . STARTED :
raise apg_errors . InterfaceError ( 'cannot start; the transaction is already started' )
con = self . _connection
if con . _top_xact is None :
if con . _protocol . is_in_transaction ( ) :
raise apg_errors . InterfaceError ( 'cannot use Connection.transaction() in ' 'a manually started transaction' )
con . _top_xact = self
else : # Nested transaction block
top_xact = con . _top_xact
if self . _isolation != top_xact . _isolation :
raise apg_errors . InterfaceError ( 'nested transaction has a different isolation level: ' 'current {!r} != outer {!r}' . format ( self . _isolation , top_xact . _isolation ) )
self . _nested = True
if self . _nested :
self . _id = con . _get_unique_id ( 'savepoint' )
query = 'SAVEPOINT {};' . format ( self . _id )
else :
if self . _isolation == 'read_committed' :
query = 'BEGIN;'
elif self . _isolation == 'repeatable_read' :
query = 'BEGIN ISOLATION LEVEL REPEATABLE READ;'
else :
query = 'BEGIN ISOLATION LEVEL SERIALIZABLE'
if self . _readonly :
query += ' READ ONLY'
if self . _deferrable :
query += ' DEFERRABLE'
query += ';'
try :
await self . _connection . execute ( query )
except BaseException :
self . _state = TransactionState . FAILED
raise
else :
self . _state = TransactionState . STARTED |
def audio_set_format ( self , format , rate , channels ) :
'''Set decoded audio format .
This only works in combination with L { audio _ set _ callbacks } ( ) ,
and is mutually exclusive with L { audio _ set _ format _ callbacks } ( ) .
@ param format : a four - characters string identifying the sample format ( e . g . " S16N " or " FL32 " ) .
@ param rate : sample rate ( expressed in Hz ) .
@ param channels : channels count .
@ version : LibVLC 2.0.0 or later .''' | return libvlc_audio_set_format ( self , str_to_bytes ( format ) , rate , channels ) |
def pkginfo ( name , version , arch , repoid , install_date = None , install_date_time_t = None ) :
'''Build and return a pkginfo namedtuple''' | pkginfo_tuple = collections . namedtuple ( 'PkgInfo' , ( 'name' , 'version' , 'arch' , 'repoid' , 'install_date' , 'install_date_time_t' ) )
return pkginfo_tuple ( name , version , arch , repoid , install_date , install_date_time_t ) |
def create_datapoint ( value , timestamp = None , ** tags ) :
"""Creates a single datapoint dict with a value , timestamp and tags .
: param value : Value of the datapoint . Type depends on the id ' s MetricType
: param timestamp : Optional timestamp of the datapoint . Uses client current time if not set . Millisecond accuracy . Can be datetime instance also .
: param tags : Optional datapoint tags . Not to be confused with metric definition tags""" | if timestamp is None :
timestamp = time_millis ( )
if type ( timestamp ) is datetime :
timestamp = datetime_to_time_millis ( timestamp )
item = { 'timestamp' : timestamp , 'value' : value }
if tags is not None :
item [ 'tags' ] = tags
return item |
def user_password_update ( user_id = None , name = None , password = None , profile = None , ** connection_args ) :
'''Update a user ' s password ( keystone user - password - update )
CLI Examples :
. . code - block : : bash
salt ' * ' keystone . user _ password _ update c965f79c4f864eaaa9c3b41904e67082 password = 12345
salt ' * ' keystone . user _ password _ update user _ id = c965f79c4f864eaaa9c3b41904e67082 password = 12345
salt ' * ' keystone . user _ password _ update name = nova password = 12345''' | kstone = auth ( profile , ** connection_args )
if name :
for user in kstone . users . list ( ) :
if user . name == name :
user_id = user . id
break
if not user_id :
return { 'Error' : 'Unable to resolve user id' }
if _OS_IDENTITY_API_VERSION > 2 :
kstone . users . update ( user = user_id , password = password )
else :
kstone . users . update_password ( user = user_id , password = password )
ret = 'Password updated for user ID {0}' . format ( user_id )
if name :
ret += ' ({0})' . format ( name )
return ret |
def _on_namreply ( self , connection , event ) :
"""event . arguments [ 0 ] = = " @ " for secret channels ,
" * " for private channels ,
" = " for others ( public channels )
event . arguments [ 1 ] = = channel
event . arguments [ 2 ] = = nick list""" | ch_type , channel , nick_list = event . arguments
if channel == '*' : # User is not in any visible channel
# http : / / tools . ietf . org / html / rfc2812 # section - 3.2.5
return
for nick in nick_list . split ( ) :
nick_modes = [ ]
if nick [ 0 ] in self . connection . features . prefix :
nick_modes . append ( self . connection . features . prefix [ nick [ 0 ] ] )
nick = nick [ 1 : ]
for mode in nick_modes :
self . channels [ channel ] . set_mode ( mode , nick )
self . channels [ channel ] . add_user ( nick ) |
def convert_missing_indexer ( indexer ) :
"""reverse convert a missing indexer , which is a dict
return the scalar indexer and a boolean indicating if we converted""" | if isinstance ( indexer , dict ) : # a missing key ( but not a tuple indexer )
indexer = indexer [ 'key' ]
if isinstance ( indexer , bool ) :
raise KeyError ( "cannot use a single bool to index into setitem" )
return indexer , True
return indexer , False |
def new_canvas ( self , figure = None , col = 1 , row = 1 , projection = '2d' , xlabel = None , ylabel = None , zlabel = None , title = None , xlim = None , ylim = None , zlim = None , ** kwargs ) :
"""Return a canvas , kwargupdate for your plotting library .
if figure is not None , create a canvas in the figure
at subplot position ( col , row ) .
This method does two things , it creates an empty canvas
and updates the kwargs ( deletes the unnecessary kwargs )
for further usage in normal plotting .
the kwargs are plotting library specific kwargs !
: param { ' 2d ' | ' 3d ' } projection : The projection to use .
E . g . in matplotlib this means it deletes references to ax , as
plotting is done on the axis itself and is not a kwarg .
: param xlabel : the label to put on the xaxis
: param ylabel : the label to put on the yaxis
: param zlabel : the label to put on the zaxis ( if plotting in 3d )
: param title : the title of the plot
: param legend : if True , plot a legend , if int make legend rows in the legend
: param ( float , float ) xlim : the limits for the xaxis
: param ( float , float ) ylim : the limits for the yaxis
: param ( float , float ) zlim : the limits for the zaxis ( if plotting in 3d )""" | raise NotImplementedError ( "Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library" ) |
def missing_parameter_values ( self , parameter_values ) :
"""Checks if the given input contains values for all parameters used by this template
: param dict parameter _ values : Dictionary of values for each parameter used in the template
: return list : List of names of parameters that are missing .
: raises InvalidParameterValues : When parameter values is not a valid dictionary""" | if not self . _is_valid_parameter_values ( parameter_values ) :
raise InvalidParameterValues ( "Parameter values are required to process a policy template" )
return list ( set ( self . parameters . keys ( ) ) - set ( parameter_values . keys ( ) ) ) |
def register ( self , subject , avro_schema ) :
"""POST / subjects / ( string : subject ) / versions
Register a schema with the registry under the given subject
and receive a schema id .
avro _ schema must be a parsed schema from the python avro library
Multiple instances of the same schema will result in cache misses .
: param str subject : subject name
: param schema avro _ schema : Avro schema to be registered
: returns : schema _ id
: rtype : int""" | schemas_to_id = self . subject_to_schema_ids [ subject ]
schema_id = schemas_to_id . get ( avro_schema , None )
if schema_id is not None :
return schema_id
# send it up
url = '/' . join ( [ self . url , 'subjects' , subject , 'versions' ] )
# body is { schema : json _ string }
body = { 'schema' : json . dumps ( avro_schema . to_json ( ) ) }
result , code = self . _send_request ( url , method = 'POST' , body = body )
if ( code == 401 or code == 403 ) :
raise ClientError ( "Unauthorized access. Error code:" + str ( code ) )
elif code == 409 :
raise ClientError ( "Incompatible Avro schema:" + str ( code ) )
elif code == 422 :
raise ClientError ( "Invalid Avro schema:" + str ( code ) )
elif not ( code >= 200 and code <= 299 ) :
raise ClientError ( "Unable to register schema. Error code:" + str ( code ) )
# result is a dict
schema_id = result [ 'id' ]
# cache it
self . _cache_schema ( avro_schema , schema_id , subject )
return schema_id |
def pkcs_mgf1 ( mgfSeed , maskLen , h ) :
"""Implements generic MGF1 Mask Generation function as described in
Appendix B . 2.1 of RFC 3447 . The hash function is passed by name .
valid values are ' md2 ' , ' md4 ' , ' md5 ' , ' sha1 ' , ' tls , ' sha256 ' ,
' sha384 ' and ' sha512 ' . Returns None on error .
Input :
mgfSeed : seed from which mask is generated , an octet string
maskLen : intended length in octets of the mask , at most 2 ^ 32 * hLen
hLen ( see below )
h : hash function name ( in ' md2 ' , ' md4 ' , ' md5 ' , ' sha1 ' , ' tls ' ,
' sha256 ' , ' sha384 ' ) . hLen denotes the length in octets of
the hash function output .
Output :
an octet string of length maskLen""" | # steps are those of Appendix B . 2.1
if not h in _hashFuncParams :
warning ( "pkcs_mgf1: invalid hash (%s) provided" )
return None
hLen = _hashFuncParams [ h ] [ 0 ]
hFunc = _hashFuncParams [ h ] [ 1 ]
if maskLen > 2 ** 32 * hLen :
warning ( "pkcs_mgf1: maskLen > 2**32 * hLen" )
return None
T = ""
maxCounter = math . ceil ( float ( maskLen ) / float ( hLen ) )
counter = 0
while counter < maxCounter :
C = pkcs_i2osp ( counter , 4 )
T += hFunc ( mgfSeed + C )
counter += 1
return T [ : maskLen ] |
def _get_or_create_group_parent ( message_body , user_id ) :
"""Determine if the given task belongs to a group or not , and if so , get or create a status record for the group .
Arguments :
message _ body ( dict ) : The body of the before _ task _ publish signal for the task in question
user _ id ( int ) : The primary key of the user model record for the user who triggered the task .
( If using a custom user model , this may not be an integer . )
Returns
UserTaskStatus : The status record for the containing group , or ` None ` if there isn ' t one""" | parent_id = message_body . get ( 'taskset' , None )
if not parent_id : # Not part of a group
return None
parent_class = 'celery.group'
parent_name = message_body [ 'kwargs' ] . get ( 'user_task_name' , '' )
parent , _ = UserTaskStatus . objects . get_or_create ( task_id = parent_id , defaults = { 'is_container' : True , 'name' : parent_name , 'task_class' : parent_class , 'total_steps' : 0 , 'user_id' : user_id } )
if parent_name and not parent . name :
parent . name = parent_name
parent . save ( update_fields = { 'name' , 'modified' } )
return parent |
def _onWhat ( self , name , line , pos , absPosition ) :
"""Memorizes an imported item""" | self . __lastImport . what . append ( ImportWhat ( name , line , pos , absPosition ) ) |
def find ( self , * tags ) :
"""Find containers that matches set of tags
: param tags :
: return :""" | tags = list ( map ( str , tags ) )
return self . _client . json ( 'corex.find' , { 'tags' : tags } ) |
def first ( values , axis , skipna = None ) :
"""Return the first non - NA elements in this array along the given axis""" | if ( skipna or skipna is None ) and values . dtype . kind not in 'iSU' : # only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna ( values )
return nanfirst ( values , axis )
return take ( values , 0 , axis = axis ) |
def start_task ( self , task ) :
"""Begin logging of a task
Stores the time this task was started in order to return
time lapsed when ` complete _ task ` is called .
Parameters
task : str
Name of the task to be started""" | self . info ( "Calculating {}..." . format ( task ) )
self . tasks [ task ] = self . timer ( ) |
def add ( self , packet ) :
"""Add the given Packet to this PacketHistory .""" | for name in self . _names :
value = getattr ( packet , name )
if value is not None :
self . _dict [ name ] = value |
def description ( tag ) :
"""Gets a list of descriptions given the tag .
: param str tag : ( hyphen - separated ) tag .
: return : list of string descriptions . The return list can be empty .""" | tag_object = Tag ( tag )
results = [ ]
results . extend ( tag_object . descriptions )
subtags = tag_object . subtags
for subtag in subtags :
results += subtag . description
return results |
def exp ( computation : BaseComputation , gas_per_byte : int ) -> None :
"""Exponentiation""" | base , exponent = computation . stack_pop ( num_items = 2 , type_hint = constants . UINT256 )
bit_size = exponent . bit_length ( )
byte_size = ceil8 ( bit_size ) // 8
if exponent == 0 :
result = 1
elif base == 0 :
result = 0
else :
result = pow ( base , exponent , constants . UINT_256_CEILING )
computation . consume_gas ( gas_per_byte * byte_size , reason = "EXP: exponent bytes" , )
computation . stack_push ( result ) |
def __load_all ( self , config_filename ) :
"""Loads all stored routines into the RDBMS instance .
: param str config _ filename : string The filename of the configuration file .""" | self . _read_configuration_file ( config_filename )
self . connect ( )
self . __find_source_files ( )
self . _get_column_type ( )
self . __read_stored_routine_metadata ( )
self . __get_constants ( )
self . _get_old_stored_routine_info ( )
self . _get_correct_sql_mode ( )
self . __load_stored_routines ( )
self . _drop_obsolete_routines ( )
self . __remove_obsolete_metadata ( )
self . __write_stored_routine_metadata ( )
self . disconnect ( ) |
def replies ( self , delegate , params = { } , extra_args = None ) :
"""Get the most recent replies for the authenticating user .
See search for example of how results are returned .""" | return self . __get ( '/statuses/replies.atom' , delegate , params , extra_args = extra_args ) |
def get_model_fields ( model , add_reserver_flag = True ) :
"""Creating fields suit for model _ config , id will be skipped .""" | import uliweb . orm as orm
fields = [ ]
m = { 'type' : 'type_name' , 'hint' : 'hint' , 'default' : 'default' , 'required' : 'required' }
m1 = { 'index' : 'index' , 'unique' : 'unique' }
for name , prop in model . properties . items ( ) :
if name == 'id' :
continue
d = { }
for k , v in m . items ( ) :
d [ k ] = getattr ( prop , v )
for k , v in m1 . items ( ) :
d [ k ] = bool ( prop . kwargs . get ( v ) )
d [ 'name' ] = prop . fieldname or name
d [ 'verbose_name' ] = unicode ( prop . verbose_name )
d [ 'nullable' ] = bool ( prop . kwargs . get ( 'nullable' , orm . __nullable__ ) )
if d [ 'type' ] in ( 'VARCHAR' , 'CHAR' , 'BINARY' , 'VARBINARY' ) :
d [ 'max_length' ] = prop . max_length
if d [ 'type' ] in ( 'Reference' , 'OneToOne' , 'ManyToMany' ) :
d [ 'reference_class' ] = prop . reference_class
# collection _ name will be _ collection _ name , it the original value
d [ 'collection_name' ] = prop . _collection_name
d [ 'server_default' ] = prop . kwargs . get ( 'server_default' )
d [ '_reserved' ] = True
fields . append ( d )
return fields |
def _initURL ( self , org_url , referer_url ) :
"""sets proper URLs for AGOL""" | if org_url is not None and org_url != '' :
if not org_url . startswith ( 'http://' ) and not org_url . startswith ( 'https://' ) :
org_url = 'https://' + org_url
self . _org_url = org_url
if self . _org_url . lower ( ) . find ( '/sharing/rest' ) > - 1 :
self . _url = self . _org_url
else :
self . _url = self . _org_url + "/sharing/rest"
if self . _url . startswith ( 'http://' ) :
self . _surl = self . _url . replace ( 'http://' , 'https://' )
else :
self . _surl = self . _url
parsed_url = urlparse ( self . _org_url )
self . _parsed_org_url = urlunparse ( ( parsed_url [ 0 ] , parsed_url [ 1 ] , "" , "" , "" , "" ) )
if referer_url is None :
parsed_org = urlparse ( self . _org_url )
self . _referer_url = parsed_org . netloc
url = '{}/portals/self' . format ( self . _url )
parameters = { 'f' : 'json' }
portal_info = self . _post ( url = url , param_dict = parameters , securityHandler = self , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
if 'user' in portal_info :
if 'username' in portal_info [ 'user' ] :
self . _username = portal_info [ 'user' ] [ 'username' ] |
def modify_handle_value ( self , handle , ttl = None , add_if_not_exist = True , ** kvpairs ) :
'''Modify entries ( key - value - pairs ) in a handle record . If the key
does not exist yet , it is created .
* Note : * We assume that a key exists only once . In case a key exists
several time , an exception will be raised .
* Note : * To modify 10320 / LOC , please use : meth : ` ~ b2handle . handleclient . EUDATHandleClient . add _ additional _ URL ` or
: meth : ` ~ b2handle . handleclient . EUDATHandleClient . remove _ additional _ URL ` .
: param handle : Handle whose record is to be modified
: param ttl : Optional . Integer value . If ttl should be set to a
non - default value .
: param all other args : The user can specify several key - value - pairs .
These will be the handle value types and values that will be
modified . The keys are the names or the handle value types ( e . g .
" URL " ) . The values are the new values to store in " data " . If the
key is ' HS _ ADMIN ' , the new value needs to be of the form
{ ' handle ' : ' xyz ' , ' index ' : xyz } . The permissions will be set to the
default permissions .
: raises : : exc : ` ~ b2handle . handleexceptions . HandleAuthenticationError `
: raises : : exc : ` ~ b2handle . handleexceptions . HandleNotFoundException `
: raises : : exc : ` ~ b2handle . handleexceptions . HandleSyntaxError `''' | LOGGER . debug ( 'modify_handle_value...' )
# Read handle record :
handlerecord_json = self . retrieve_handle_record_json ( handle )
if handlerecord_json is None :
msg = 'Cannot modify unexisting handle'
raise HandleNotFoundException ( handle = handle , msg = msg )
list_of_entries = handlerecord_json [ 'values' ]
# HS _ ADMIN
if 'HS_ADMIN' in kvpairs . keys ( ) and not self . __modify_HS_ADMIN :
msg = 'You may not modify HS_ADMIN'
raise IllegalOperationException ( msg = msg , operation = 'modifying HS_ADMIN' , handle = handle )
nothingchanged = True
new_list_of_entries = [ ]
list_of_old_and_new_entries = list_of_entries [ : ]
keys = kvpairs . keys ( )
for key , newval in kvpairs . items ( ) : # Change existing entry :
changed = False
for i in xrange ( len ( list_of_entries ) ) :
if list_of_entries [ i ] [ 'type' ] == key :
if not changed :
list_of_entries [ i ] [ 'data' ] = newval
list_of_entries [ i ] . pop ( 'timestamp' )
# will be ignored anyway
if key == 'HS_ADMIN' :
newval [ 'permissions' ] = self . __HS_ADMIN_permissions
list_of_entries [ i ] . pop ( 'timestamp' )
# will be ignored anyway
list_of_entries [ i ] [ 'data' ] = { 'format' : 'admin' , 'value' : newval }
LOGGER . info ( 'Modified' + ' "HS_ADMIN" of handle ' + handle )
changed = True
nothingchanged = False
new_list_of_entries . append ( list_of_entries [ i ] )
list_of_old_and_new_entries . append ( list_of_entries [ i ] )
else :
msg = 'There is several entries of type "' + key + '".' + ' This can lead to unexpected behaviour.' + ' Please clean up before modifying the record.'
raise BrokenHandleRecordException ( handle = handle , msg = msg )
# If the entry doesn ' t exist yet , add it :
if not changed :
if add_if_not_exist :
LOGGER . debug ( 'modify_handle_value: Adding entry "' + key + '"' + ' to handle ' + handle )
index = self . __make_another_index ( list_of_old_and_new_entries )
entry_to_add = self . __create_entry ( key , newval , index , ttl )
new_list_of_entries . append ( entry_to_add )
list_of_old_and_new_entries . append ( entry_to_add )
changed = True
nothingchanged = False
# Add the indices
indices = [ ]
for i in xrange ( len ( new_list_of_entries ) ) :
indices . append ( new_list_of_entries [ i ] [ 'index' ] )
# append to the old record :
if nothingchanged :
LOGGER . debug ( 'modify_handle_value: There was no entries ' + str ( kvpairs . keys ( ) ) + ' to be modified (handle ' + handle + ').' + ' To add them, set add_if_not_exist = True' )
else :
op = 'modifying handle values'
resp , put_payload = self . __send_handle_put_request ( handle , new_list_of_entries , indices = indices , overwrite = True , op = op )
if hsresponses . handle_success ( resp ) :
LOGGER . info ( 'Handle modified: ' + handle )
else :
msg = 'Values: ' + str ( kvpairs )
raise GenericHandleError ( operation = op , handle = handle , response = resp , msg = msg , payload = put_payload ) |
def get_atlas_zonefile_data ( zonefile_hash , zonefile_dir , check = True ) :
"""Get a serialized cached zonefile from local disk
Return None if not found""" | zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash )
zonefile_path_legacy = atlas_zonefile_path_legacy ( zonefile_dir , zonefile_hash )
for zfp in [ zonefile_path , zonefile_path_legacy ] :
if not os . path . exists ( zfp ) :
continue
if check :
res = _read_atlas_zonefile ( zfp , zonefile_hash )
else :
res = _read_atlas_zonefile ( zfp , None )
if res :
return res
return None |
def get_robust_background_threshold ( image , mask = None , lower_outlier_fraction = 0.05 , upper_outlier_fraction = 0.05 , deviations_above_average = 2.0 , average_fn = np . mean , variance_fn = np . std ) :
"""Calculate threshold based on mean & standard deviation
The threshold is calculated by trimming the top and bottom 5 % of
pixels off the image , then calculating the mean and standard deviation
of the remaining image . The threshold is then set at 2 ( empirical
value ) standard deviations above the mean .
image - the image to threshold
mask - mask of pixels to consider ( default = all pixels )
lower _ outlier _ fraction - after ordering the pixels by intensity , remove
the pixels from 0 to len ( image ) * lower _ outlier _ fraction from
the threshold calculation ( default = . 05 ) .
upper _ outlier _ fraction - remove the pixels from
len ( image ) * ( 1 - upper _ outlier _ fraction ) to len ( image ) from
consideration ( default = . 05 ) .
deviations _ above _ average - calculate the standard deviation or MAD and
multiply by this number and add to the average to get the final
threshold ( default = 2)
average _ fn - function used to calculate the average intensity ( e . g .
np . mean , np . median or some sort of mode function ) . Default = np . mean
variance _ fn - function used to calculate the amount of variance .
Default = np . sd""" | cropped_image = np . array ( image . flat ) if mask is None else image [ mask ]
n_pixels = np . product ( cropped_image . shape )
if n_pixels < 3 :
return 0
cropped_image . sort ( )
if cropped_image [ 0 ] == cropped_image [ - 1 ] :
return cropped_image [ 0 ]
low_chop = int ( round ( n_pixels * lower_outlier_fraction ) )
hi_chop = n_pixels - int ( round ( n_pixels * upper_outlier_fraction ) )
im = cropped_image if low_chop == 0 else cropped_image [ low_chop : hi_chop ]
mean = average_fn ( im )
sd = variance_fn ( im )
return mean + sd * deviations_above_average |
def start_tensorboard ( args ) :
'''start tensorboard''' | experiment_id = check_experiment_id ( args )
experiment_config = Experiments ( )
experiment_dict = experiment_config . get_all_experiments ( )
config_file_name = experiment_dict [ experiment_id ] [ 'fileName' ]
nni_config = Config ( config_file_name )
rest_port = nni_config . get_config ( 'restServerPort' )
rest_pid = nni_config . get_config ( 'restServerPid' )
if not detect_process ( rest_pid ) :
print_error ( 'Experiment is not running...' )
return
running , response = check_rest_server_quick ( rest_port )
trial_content = None
if running :
response = rest_get ( trial_jobs_url ( rest_port ) , REST_TIME_OUT )
if response and check_response ( response ) :
trial_content = json . loads ( response . text )
else :
print_error ( 'List trial failed...' )
else :
print_error ( 'Restful server is not running...' )
if not trial_content :
print_error ( 'No trial information!' )
exit ( 1 )
if len ( trial_content ) > 1 and not args . trial_id :
print_error ( 'There are multiple trials, please set trial id!' )
exit ( 1 )
experiment_id = nni_config . get_config ( 'experimentId' )
temp_nni_path = os . path . join ( tempfile . gettempdir ( ) , 'nni' , experiment_id )
os . makedirs ( temp_nni_path , exist_ok = True )
path_list = get_path_list ( args , nni_config , trial_content , temp_nni_path )
start_tensorboard_process ( args , nni_config , path_list , temp_nni_path ) |
def create ( vm_ ) :
'''Create a single Packet VM .''' | name = vm_ [ 'name' ]
if not is_profile_configured ( vm_ ) :
return False
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'starting create' , 'salt/cloud/{0}/creating' . format ( name ) , args = __utils__ [ 'cloud.filter_event' ] ( 'creating' , vm_ , [ 'name' , 'profile' , 'provider' , 'driver' ] ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
log . info ( 'Creating Packet VM %s' , name )
manager = packet . Manager ( auth_token = vm_ [ 'token' ] )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'requesting instance' , 'salt/cloud/{0}/requesting' . format ( vm_ [ 'name' ] ) , args = __utils__ [ 'cloud.filter_event' ] ( 'requesting' , vm_ , [ 'name' , 'profile' , 'provider' , 'driver' ] ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
device = manager . create_device ( project_id = vm_ [ 'project_id' ] , hostname = name , plan = vm_ [ 'size' ] , facility = vm_ [ 'location' ] , operating_system = vm_ [ 'image' ] )
device = _wait_for_status ( 'device' , device . id , status = "active" )
if device . state != "active" :
log . error ( 'Error creating %s on PACKET\n\n' 'while waiting for initial ready status' , name , exc_info_on_loglevel = logging . DEBUG )
# Define which ssh _ interface to use
ssh_interface = _get_ssh_interface ( vm_ )
# Pass the correct IP address to the bootstrap ssh _ host key
if ssh_interface == 'private_ips' :
for ip in device . ip_addresses :
if ip [ 'public' ] is False :
vm_ [ 'ssh_host' ] = ip [ 'address' ]
break
else :
for ip in device . ip_addresses :
if ip [ 'public' ] is True :
vm_ [ 'ssh_host' ] = ip [ 'address' ]
break
key_filename = config . get_cloud_config_value ( 'private_key' , vm_ , __opts__ , search_global = False , default = None )
vm_ [ 'key_filename' ] = key_filename
vm_ [ 'private_key' ] = key_filename
# Bootstrap !
ret = __utils__ [ 'cloud.bootstrap' ] ( vm_ , __opts__ )
ret . update ( { 'device' : device . __dict__ } )
if vm_ . get ( 'storage_tier' ) and vm_ . get ( 'storage_size' ) : # create storage and attach it to device
volume = manager . create_volume ( vm_ [ 'project_id' ] , "{0}_storage" . format ( name ) , vm_ . get ( 'storage_tier' ) , vm_ . get ( 'storage_size' ) , vm_ . get ( 'location' ) , snapshot_count = vm_ . get ( 'storage_snapshot_count' , 0 ) , snapshot_frequency = vm_ . get ( 'storage_snapshot_frequency' ) )
volume . attach ( device . id )
volume = _wait_for_status ( 'volume' , volume . id , status = "active" )
if volume . state != "active" :
log . error ( 'Error creating %s on PACKET\n\n' 'while waiting for initial ready status' , name , exc_info_on_loglevel = logging . DEBUG )
ret . update ( { 'volume' : volume . __dict__ } )
log . info ( 'Created Cloud VM \'%s\'' , name )
log . debug ( '\'%s\' VM creation details:\n%s' , name , pprint . pformat ( device . __dict__ ) )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'created instance' , 'salt/cloud/{0}/created' . format ( name ) , args = __utils__ [ 'cloud.filter_event' ] ( 'created' , vm_ , [ 'name' , 'profile' , 'provider' , 'driver' ] ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
return ret |
def convert_value ( value , source_currency , target_currency ) :
"""Converts the price of a currency to another one using exchange rates
: param price : the price value
: param type : decimal
: param source _ currency : source ISO - 4217 currency code
: param type : str
: param target _ currency : target ISO - 4217 currency code
: param type : str
: returns : converted price instance
: rtype : ` ` Price ` `""" | # If price currency and target currency is same
# return given currency as is
if source_currency == target_currency :
return value
rate = get_rate ( source_currency , target_currency )
return value * rate |
def eye ( n , d = None ) :
"""Creates an identity TT - matrix""" | c = _matrix . matrix ( )
c . tt = _vector . vector ( )
if d is None :
n0 = _np . asanyarray ( n , dtype = _np . int32 )
c . tt . d = n0 . size
else :
n0 = _np . asanyarray ( [ n ] * d , dtype = _np . int32 )
c . tt . d = d
c . n = n0 . copy ( )
c . m = n0 . copy ( )
c . tt . n = ( c . n ) * ( c . m )
c . tt . r = _np . ones ( ( c . tt . d + 1 , ) , dtype = _np . int32 )
c . tt . get_ps ( )
c . tt . alloc_core ( )
for i in xrange ( c . tt . d ) :
c . tt . core [ c . tt . ps [ i ] - 1 : c . tt . ps [ i + 1 ] - 1 ] = _np . eye ( c . n [ i ] ) . flatten ( )
return c |
def start_app ( self , bundle_id ) :
'''Start app by bundle _ id
Args :
- bundle _ id ( string ) : ex com . netease . my
Returns :
idevicedebug subprocess instance''' | idevicedebug = must_look_exec ( 'idevicedebug' )
# run in background
kwargs = { 'stdout' : subprocess . PIPE , 'stderr' : subprocess . PIPE }
if sys . platform != 'darwin' :
kwargs [ 'close_fds' ] = True
return subprocess . Popen ( [ idevicedebug , "--udid" , self . udid , 'run' , bundle_id ] , ** kwargs ) |
def text ( self , paths , wholetext = False , lineSep = None ) :
"""Loads text files and returns a : class : ` DataFrame ` whose schema starts with a
string column named " value " , and followed by partitioned columns if there
are any .
The text files must be encoded as UTF - 8.
By default , each line in the text file is a new row in the resulting DataFrame .
: param paths : string , or list of strings , for input path ( s ) .
: param wholetext : if true , read each file from input path ( s ) as a single row .
: param lineSep : defines the line separator that should be used for parsing . If None is
set , it covers all ` ` \\ r ` ` , ` ` \\ r \\ n ` ` and ` ` \\ n ` ` .
> > > df = spark . read . text ( ' python / test _ support / sql / text - test . txt ' )
> > > df . collect ( )
[ Row ( value = u ' hello ' ) , Row ( value = u ' this ' ) ]
> > > df = spark . read . text ( ' python / test _ support / sql / text - test . txt ' , wholetext = True )
> > > df . collect ( )
[ Row ( value = u ' hello \\ nthis ' ) ]""" | self . _set_opts ( wholetext = wholetext , lineSep = lineSep )
if isinstance ( paths , basestring ) :
paths = [ paths ]
return self . _df ( self . _jreader . text ( self . _spark . _sc . _jvm . PythonUtils . toSeq ( paths ) ) ) |
def validate_header ( cls , header : BlockHeader , parent_header : BlockHeader , check_seal : bool = True ) -> None :
""": raise eth . exceptions . ValidationError : if the header is not valid""" | if parent_header is None : # to validate genesis header , check if it equals canonical header at block number 0
raise ValidationError ( "Must have access to parent header to validate current header" )
else :
validate_length_lte ( header . extra_data , 32 , title = "BlockHeader.extra_data" )
validate_gas_limit ( header . gas_limit , parent_header . gas_limit )
if header . block_number != parent_header . block_number + 1 :
raise ValidationError ( "Blocks must be numbered consecutively. Block number #{} has parent #{}" . format ( header . block_number , parent_header . block_number , ) )
# timestamp
if header . timestamp <= parent_header . timestamp :
raise ValidationError ( "timestamp must be strictly later than parent, but is {} seconds before.\n" "- child : {}\n" "- parent : {}. " . format ( parent_header . timestamp - header . timestamp , header . timestamp , parent_header . timestamp , ) )
if check_seal :
cls . validate_seal ( header ) |
def _decode16 ( self , offset ) :
"""Decode an UTF - 16 String at the given offset
: param offset : offset of the string inside the data
: return : str""" | str_len , skip = self . _decode_length ( offset , 2 )
offset += skip
# The len is the string len in utf - 16 units
encoded_bytes = str_len * 2
data = self . m_charbuff [ offset : offset + encoded_bytes ]
assert self . m_charbuff [ offset + encoded_bytes : offset + encoded_bytes + 2 ] == b"\x00\x00" , "UTF-16 String is not null terminated! At offset={}" . format ( offset )
return self . _decode_bytes ( data , 'utf-16' , str_len ) |
def find_video_by_url ( self , video_url ) :
"""doc : http : / / open . youku . com / docs / doc ? id = 44""" | url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = { 'client_id' : self . client_id , 'video_url' : video_url }
r = requests . get ( url , params = params )
check_error ( r )
return r . json ( ) |
def parse_name_altree ( record ) :
"""Parse NAME structure assuming ALTREE dialect .
In ALTREE dialect maiden name ( if present ) is saved as SURN sub - record
and is also appended to family name in parens . Given name is saved in
GIVN sub - record . Few examples :
No maiden name :
1 NAME John / Smith /
2 GIVN John
With maiden name :
1 NAME Jane / Smith ( Ivanova ) /
2 GIVN Jane
2 SURN Ivanova
No maiden name
1 NAME Mers / Daimler ( - Benz ) /
2 GIVN Mers
Because family name can also contain parens it ' s not enough to parse
family name and guess maiden name from it , we also have to check for
SURN record .
ALTREE also replaces empty names with question mark , we undo that too .
: param record : NAME record
: return : tuple with 3 or 4 elements , first three elements of tuple are
the same as returned from : py : meth : ` split _ name ` method , fourth element
( if present ) denotes maiden name .""" | name_tuple = split_name ( record . value )
if name_tuple [ 1 ] == '?' :
name_tuple = ( name_tuple [ 0 ] , '' , name_tuple [ 2 ] )
maiden = record . sub_tag_value ( "SURN" )
if maiden : # strip " ( maiden ) " from family name
ending = '(' + maiden + ')'
surname = name_tuple [ 1 ]
if surname . endswith ( ending ) :
surname = surname [ : - len ( ending ) ] . rstrip ( )
if surname == '?' :
surname = ''
name_tuple = ( name_tuple [ 0 ] , surname , name_tuple [ 2 ] , maiden )
return name_tuple |
def find_multiplicity ( knot , knot_vector , ** kwargs ) :
"""Finds knot multiplicity over the knot vector .
Keyword Arguments :
* ` ` tol ` ` : tolerance ( delta ) value for equality checking
: param knot : knot or parameter , : math : ` u `
: type knot : float
: param knot _ vector : knot vector , : math : ` U `
: type knot _ vector : list , tuple
: return : knot multiplicity , : math : ` s `
: rtype : int""" | # Get tolerance value
tol = kwargs . get ( 'tol' , 10e-8 )
mult = 0
# initial multiplicity
for kv in knot_vector :
if abs ( knot - kv ) <= tol :
mult += 1
return mult |
def find_model_by_table_name ( name ) :
"""Find a model reference by its table name""" | for model in ModelBase . _decl_class_registry . values ( ) :
if hasattr ( model , '__table__' ) and model . __table__ . fullname == name :
return model
return None |
async def container ( self , container = None , container_type = None , params = None ) :
"""Loads / dumps container
: return :""" | if hasattr ( container_type , 'blob_serialize' ) :
container = container_type ( ) if container is None else container
return await container . blob_serialize ( self , elem = container , elem_type = container_type , params = params )
# Container entry version + container
if self . writing :
return await self . container_dump ( container , container_type , params )
else :
return await self . container_load ( container_type , params = params , container = container ) |
def from_fields ( cls , ** kwargs ) :
'''Create an ` Atom ` instance from a set of fields . This is a
slightly faster way to initialize an Atom .
* * Example * *
> > > Atom . from _ fields ( type = ' Ar ' ,
r _ array = np . array ( [ 0.0 , 0.0 , 0.0 ] ) ,
mass = 39.948,
export = { } )''' | obj = cls . __new__ ( cls )
for name , field in obj . __fields__ . items ( ) :
if name in kwargs :
field . value = kwargs [ name ]
return obj |
def _predicate ( self , i ) :
"""Given a valid datetime or slace , return the predicate portion
of the SQL query , a boolean indicating whether multiple items are
expected from the result , and a dictionary of parameters for the query""" | if isinstance ( i , slice ) :
if i . step is not None :
raise TypeError ( "Slice step not permitted" )
if ( ( i . start is not None and not isinstance ( i . start , datetime ) ) or ( i . stop is not None and not isinstance ( i . stop , datetime ) ) ) :
raise TypeError ( "Slice indices must be {} or None" . format ( datetime ) )
if i . start is not None and i . stop is not None :
if i . start > i . stop :
raise ValueError ( "Start index is greater than the End index" )
else : # Substitution of the key coloumn , but the
# parameters themselves will be substituted by sqlite3
predicate = "WHERE {} BETWEEN :start AND :stop" . format ( self . _keycol )
elif i . start is not None : # i . stop will also be None
predicate = "WHERE {} >= :start" . format ( self . _keycol )
elif i . stop is not None : # i . start will also be None
predicate = "WHERE {} <= :stop" . format ( self . _keycol )
else : # both are None , so equivelent to wanting everything
predicate = ""
multi = True
pred = { "start" : i . start , "stop" : i . stop }
elif isinstance ( i , datetime ) : # Substitution of the key coloumn , but the
# parameters themselves will be substituted by sqlite3
predicate = "WHERE {} = :key" . format ( self . _keycol )
multi = False
pred = { "key" : i }
else : # not a slice or a datetime object
raise TypeError ( "List indices must be {}" . format ( datetime ) )
# predicate is the end of the query string .
# multi is a boolean indicating whether the result should be iterable
# or not . pred is a dict of the parameters for substitution
return ( predicate , multi , pred ) |
def combination_step ( self ) :
"""Build next update by a smart combination of previous updates .
( standard FISTA : cite : ` beck - 2009 - fast ` ) .""" | # Update t step
tprv = self . t
self . t = 0.5 * float ( 1. + np . sqrt ( 1. + 4. * tprv ** 2 ) )
# Update Y
if not self . opt [ 'FastSolve' ] :
self . Yprv = self . Y . copy ( )
self . Y = self . X + ( ( tprv - 1. ) / self . t ) * ( self . X - self . Xprv ) |
def parse_stack_refs ( stack_references : List [ str ] ) -> List [ str ] :
'''Check if items included in ` stack _ references ` are Senza definition
file paths or stack name reference . If Senza definition file path ,
substitute the definition file path by the stack name in the same
position on the list .''' | stack_names = [ ]
references = list ( stack_references )
references . reverse ( )
while references :
current = references . pop ( )
# current that might be a file
file_path = os . path . abspath ( current )
if os . path . exists ( file_path ) and os . path . isfile ( file_path ) :
try :
with open ( file_path ) as fd :
data = yaml . safe_load ( fd )
current = data [ 'SenzaInfo' ] [ 'StackName' ]
except ( KeyError , TypeError , YAMLError ) :
raise click . UsageError ( 'Invalid senza definition {}' . format ( current ) )
stack_names . append ( current )
return stack_names |
def format_raw_data ( self , tpe , raw_data ) :
"""uses type to format the raw information to a dictionary
usable by the mapper""" | if tpe == 'text' :
formatted_raw_data = self . parse_text_to_dict ( raw_data )
elif tpe == 'file' :
formatted_raw_data = self . parse_file_to_dict ( raw_data )
else :
formatted_raw_data = { 'ERROR' : 'unknown data type' , 'data' : [ raw_data ] }
return formatted_raw_data |
def tryload ( self , cfgstr = None ) :
"""Like load , but returns None if the load fails""" | if cfgstr is None :
cfgstr = self . cfgstr
if cfgstr is None :
import warnings
warnings . warn ( 'No cfgstr given in Cacher constructor or call' )
cfgstr = ''
# assert cfgstr is not None , (
# ' must specify cfgstr in constructor or call ' )
if not self . enabled :
if self . verbose > 0 :
print ( '[cache] ... %s Cacher disabled' % ( self . fname ) )
return None
try :
if self . verbose > 1 :
print ( '[cache] tryload fname=%s' % ( self . fname , ) )
# if self . verbose > 2:
# print ( ' [ cache ] cfgstr = % r ' % ( cfgstr , ) )
return self . load ( cfgstr )
except IOError :
if self . verbose > 0 :
print ( '[cache] ... %s Cacher miss' % ( self . fname ) ) |
def _read_ascii ( self ) :
"""Reads data and metadata from ASCII format .""" | # NOTE : ascii files store binned data using C - like ordering .
# Dimensions are iterated like x , y , z ( so z changes fastest )
header_str = ''
with open ( self . path ) as f :
for line in f :
if line . startswith ( '#' ) :
header_str += line
self . _read_header ( header_str )
data = np . loadtxt ( self . path , delimiter = ',' , unpack = True , ndmin = 1 )
# separate data by statistic ( neglecting bin columns when necessary )
n_dim = len ( self . dimensions )
data = { stat : data [ n_dim + i ] for i , stat in enumerate ( self . statistics ) }
# reshape data according to binning
data_shape = [ dim . n_bins for dim in self . dimensions ]
data = { k : v . reshape ( data_shape ) for k , v in data . items ( ) }
self . data = data |
def cos_distance ( t1 , t2 , epsilon = 1e-12 , name = None ) :
"""Cos distance between t1 and t2 and caps the gradient of the Square Root .
Args :
t1 : A tensor
t2 : A tensor that can be multiplied by t1.
epsilon : A lower bound value for the distance . The square root is used as
the normalizer .
name : Optional name for this op .
Returns :
The cos distance between t1 and t2.""" | with tf . name_scope ( name , 'cos_distance' , [ t1 , t2 ] ) as scope :
t1 = tf . convert_to_tensor ( t1 , name = 't1' )
t2 = tf . convert_to_tensor ( t2 , name = 't2' )
x_inv_norm = tf . rsqrt ( tf . maximum ( length_squared ( t1 ) * length_squared ( t2 ) , epsilon ) )
return tf . subtract ( 1.0 , dot_product ( t1 , t2 ) * x_inv_norm , name = scope ) |
def createRole ( self , name , description = "" , privileges = None ) :
"""Creates a new role .
Args :
name ( str ) : The name of the new role .
description ( str ) : The description of the new role . Defaults to ` ` " " ` ` .
privileges ( str ) : A comma delimited list of privileges to apply to the new role .
Defaults to ` ` None ` ` .
Returns :
If ` ` privileges ` ` is ` ` None ` ` , the result from : py : func : ` arcrest . manageorg . _ portals . Portal . createRole ` .
If ` ` privileges ` ` were succesfully added , the result from : py : func : ` arcrest . manageorg . _ portals . Roles . setPrivileges ` .""" | admin = None
portal = None
setPrivResults = None
roleID = None
createResults = None
try :
admin = arcrest . manageorg . Administration ( securityHandler = self . _securityHandler )
portal = admin . portals . portalSelf
try :
roleID = portal . roles . findRoleID ( name )
if roleID is None :
createResults = portal . createRole ( name = name , description = description )
if 'success' in createResults :
if createResults [ 'success' ] == True :
setPrivResults = portal . roles . setPrivileges ( createResults [ 'id' ] , privileges )
if 'success' in setPrivResults :
print ( "%s role created" % name )
else :
print ( setPrivResults )
else :
print ( createResults )
else :
print ( createResults )
else :
print ( "'%s' role already exists" % name )
except Exception as e :
print ( e )
return None
except :
line , filename , synerror = trace ( )
raise common . ArcRestHelperError ( { "function" : "createGroup" , "line" : line , "filename" : filename , "synerror" : synerror , } )
finally :
admin = None
portal = None
setPrivResults = None
roleID = None
createResults = None
del admin
del portal
del setPrivResults
del roleID
del createResults
gc . collect ( ) |
def map ( self , timeout = None , max_concurrency = 64 , auto_batch = None ) :
"""Returns a context manager for a map operation . This runs
multiple queries in parallel and then joins in the end to collect
all results .
In the context manager the client available is a
: class : ` MappingClient ` . Example usage : :
results = { }
with cluster . map ( ) as client :
for key in keys _ to _ fetch :
results [ key ] = client . get ( key )
for key , promise in results . iteritems ( ) :
print ' % s = > % s ' % ( key , promise . value )""" | return MapManager ( self . get_mapping_client ( max_concurrency , auto_batch ) , timeout = timeout ) |
def _sign_data ( secret , data ) :
"""Sign data .
: param data : the string to sign
: return : string base64 encoding of the HMAC - SHA1 hash of the data parameter using { @ code secretKey } as cipher key .""" | sha1_hash = hmac . new ( secret . encode ( ) , data . encode ( ) , sha1 )
return binascii . b2a_base64 ( sha1_hash . digest ( ) ) [ : - 1 ] . decode ( 'utf8' ) |
def phase2t ( self , psi ) :
"""Given phase - pi < psi < = pi ,
returns the t value such that
exp ( 1j * psi ) = self . u1transform ( self . point ( t ) ) .""" | def _deg ( rads , domain_lower_limit ) : # Convert rads to degrees in [ 0 , 360 ) domain
degs = degrees ( rads % ( 2 * pi ) )
# Convert to [ domain _ lower _ limit , domain _ lower _ limit + 360 ) domain
k = domain_lower_limit // 360
degs += k * 360
if degs < domain_lower_limit :
degs += 360
return degs
if self . delta > 0 :
degs = _deg ( psi , domain_lower_limit = self . theta )
else :
degs = _deg ( psi , domain_lower_limit = self . theta )
return ( degs - self . theta ) / self . delta |
def pprint_arg ( vnames , value ) :
"""pretty print argument
: param vnames :
: param value :
: return :""" | ret = ''
for name , v in zip ( vnames , value ) :
ret += '%s=%s;' % ( name , str ( v ) )
return ret ; |
def of ( cls , key : SearchKey , params : SearchParams ) -> 'SearchCriteria' :
"""Factory method for producing a search criteria sub - class from a
search key .
Args :
key : The search key defining the criteria .
params : The parameters that may be used by some searches .""" | key_name = key . value
if key_name in params . disabled :
raise SearchNotAllowed ( key_name )
elif key . inverse :
return InverseSearchCriteria ( key . not_inverse , params )
elif key_name == b'SEQSET' :
return SequenceSetSearchCriteria ( key . filter_sequence_set , params )
elif key_name == b'KEYSET' :
return SearchCriteriaSet ( key . filter_key_set , params )
elif key_name == b'ALL' :
return AllSearchCriteria ( params )
elif key_name == b'OR' :
left_key , right_key = key . filter_key_or
return OrSearchCriteria ( left_key , right_key , params )
elif key_name == b'ANSWERED' :
return HasFlagSearchCriteria ( Answered , True , params )
elif key_name == b'UNANSWERED' :
return HasFlagSearchCriteria ( Answered , False , params )
elif key_name == b'DELETED' :
return HasFlagSearchCriteria ( Deleted , True , params )
elif key_name == b'UNDELETED' :
return HasFlagSearchCriteria ( Deleted , False , params )
elif key_name == b'DRAFT' :
return HasFlagSearchCriteria ( Draft , True , params )
elif key_name == b'UNDRAFT' :
return HasFlagSearchCriteria ( Draft , False , params )
elif key_name == b'FLAGGED' :
return HasFlagSearchCriteria ( Flagged , True , params )
elif key_name == b'UNFLAGGED' :
return HasFlagSearchCriteria ( Flagged , False , params )
elif key_name == b'RECENT' :
return HasFlagSearchCriteria ( Recent , True , params )
elif key_name == b'OLD' :
return HasFlagSearchCriteria ( Recent , False , params )
elif key_name == b'SEEN' :
return HasFlagSearchCriteria ( Seen , True , params )
elif key_name == b'UNSEEN' :
return HasFlagSearchCriteria ( Seen , False , params )
elif key_name == b'KEYWORD' :
return HasFlagSearchCriteria ( key . filter_flag , True , params )
elif key_name == b'UNKEYWORD' :
return HasFlagSearchCriteria ( key . filter_flag , False , params )
elif key_name == b'NEW' :
return NewSearchCriteria ( params )
elif key_name == b'BEFORE' :
return DateSearchCriteria ( key . filter_datetime , '<' , params )
elif key_name == b'ON' :
return DateSearchCriteria ( key . filter_datetime , '=' , params )
elif key_name == b'SINCE' :
return DateSearchCriteria ( key . filter_datetime , '>=' , params )
elif key_name == b'SENTBEFORE' :
return HeaderDateSearchCriteria ( key . filter_datetime , '<' , params )
elif key_name == b'SENTON' :
return HeaderDateSearchCriteria ( key . filter_datetime , '=' , params )
elif key_name == b'SENTSINCE' :
return HeaderDateSearchCriteria ( key . filter_datetime , '>=' , params )
elif key_name == b'SMALLER' :
return SizeSearchCriteria ( key . filter_int , '<' , params )
elif key_name == b'LARGER' :
return SizeSearchCriteria ( key . filter_int , '>' , params )
elif key_name in ( b'BCC' , b'CC' , b'FROM' , b'SUBJECT' , b'TO' ) :
return EnvelopeSearchCriteria ( key_name , key . filter_str , params )
elif key_name == b'HEADER' :
name , value = key . filter_header
return HeaderSearchCriteria ( name , value , params )
elif key_name in ( b'BODY' , b'TEXT' ) :
return BodySearchCriteria ( key . filter_str , params )
raise SearchNotAllowed ( key_name ) |
def custom_modify_user_view ( request , targetUsername ) :
'''The page to modify a user .''' | if targetUsername == ANONYMOUS_USERNAME :
messages . add_message ( request , messages . WARNING , MESSAGES [ 'ANONYMOUS_EDIT' ] )
page_name = "Admin - Modify User"
targetUser = get_object_or_404 ( User , username = targetUsername )
targetProfile = get_object_or_404 ( UserProfile , user = targetUser )
update_user_form = UpdateUserForm ( request . POST if "update_user_profile" in request . POST else None , instance = targetUser , profile = UserProfile . objects . get ( user = request . user ) , prefix = "user" , )
update_profile_form = FullProfileForm ( request . POST if "update_user_profile" in request . POST else None , instance = targetProfile , prefix = "profile" , )
change_user_password_form = AdminPasswordChangeForm ( targetUser , request . POST if "change_user_password" in request . POST else None , )
delete_user_form = DeleteUserForm ( request . POST if "delete_user" in request . POST else None , user = targetUser , request = request , )
if update_user_form . is_valid ( ) and update_profile_form . is_valid ( ) :
update_user_form . save ( )
update_profile_form . save ( )
messages . add_message ( request , messages . SUCCESS , MESSAGES [ 'USER_PROFILE_SAVED' ] . format ( username = targetUser . username ) , )
return HttpResponseRedirect ( reverse ( 'custom_modify_user' , kwargs = { 'targetUsername' : targetUsername } ) )
if change_user_password_form . is_valid ( ) :
change_user_password_form . save ( )
messages . add_message ( request , messages . SUCCESS , MESSAGES [ 'USER_PW_CHANGED' ] . format ( username = targetUser . username ) , )
return HttpResponseRedirect ( reverse ( 'custom_modify_user' , kwargs = { 'targetUsername' : targetUsername } ) )
if delete_user_form . is_valid ( ) :
delete_user_form . save ( )
messages . add_message ( request , messages . SUCCESS , MESSAGES [ 'USER_DELETED' ] . format ( username = targetUser . username ) , )
return HttpResponseRedirect ( reverse ( "custom_manage_users" ) )
template_dict = { 'targetUser' : targetUser , 'targetProfile' : targetProfile , 'page_name' : page_name , 'update_user_form' : update_user_form , 'update_profile_form' : update_profile_form , 'change_user_password_form' : change_user_password_form , 'delete_user_form' : delete_user_form , }
if "wiki" in settings . INSTALLED_APPS :
from wiki . models import Revision
template_dict [ "revision_count" ] = Revision . objects . filter ( created_by = targetUser ) . count ( )
template_dict [ 'thread_count' ] = Thread . objects . filter ( owner = targetProfile ) . count ( )
template_dict [ 'message_count' ] = Message . objects . filter ( owner = targetProfile ) . count ( )
template_dict [ 'request_count' ] = Request . objects . filter ( owner = targetProfile ) . count ( )
template_dict [ 'response_count' ] = Response . objects . filter ( owner = targetProfile ) . count ( )
template_dict [ 'announcement_count' ] = Announcement . objects . filter ( incumbent = targetProfile ) . count ( )
template_dict [ 'event_count' ] = Event . objects . filter ( owner = targetProfile ) . count ( )
return render_to_response ( 'custom_modify_user.html' , template_dict , context_instance = RequestContext ( request ) , ) |
def pid_from_context ( _ , context ) :
"""Get PID from marshmallow context .""" | pid = ( context or { } ) . get ( 'pid' )
return pid . pid_value if pid else missing |
def latex ( source : str ) :
"""Add a mathematical equation in latex math - mode syntax to the display .
Instead of the traditional backslash escape character , the @ character is
used instead to prevent backslash conflicts with Python strings . For
example , \\ delta would be @ delta .
: param source :
The string representing the latex equation to be rendered .""" | r = _get_report ( )
if 'katex' not in r . library_includes :
r . library_includes . append ( 'katex' )
r . append_body ( render_texts . latex ( source . replace ( '@' , '\\' ) ) )
r . stdout_interceptor . write_source ( '[ADDED] Latex equation\n' ) |
def execute ( self , input_data ) :
'''Execute the VTQuery worker''' | md5 = input_data [ 'meta' ] [ 'md5' ]
response = requests . get ( 'http://www.virustotal.com/vtapi/v2/file/report' , params = { 'apikey' : self . apikey , 'resource' : md5 , 'allinfo' : 1 } )
# Make sure we got a json blob back
try :
vt_output = response . json ( )
except ValueError :
return { 'vt_error' : 'VirusTotal Query Error, no valid response... past per min quota?' }
# Just pull some of the fields
output = { field : vt_output [ field ] for field in vt_output . keys ( ) if field not in self . exclude }
# Check for not - found
not_found = False if output else True
# Add in file _ type
output [ 'file_type' ] = input_data [ 'meta' ] [ 'file_type' ]
# Toss back a not found
if not_found :
output [ 'not_found' ] = True
return output
# Organize the scans fields
scan_results = collections . Counter ( )
for scan in vt_output [ 'scans' ] . values ( ) :
if 'result' in scan :
if scan [ 'result' ] :
scan_results [ scan [ 'result' ] ] += 1
output [ 'scan_results' ] = scan_results . most_common ( 5 )
return output |
def close ( self ) :
"""Closes the hid device""" | if self . _is_open :
self . _is_open = False
hidapi . hid_close ( self . _device ) |
def map_property_instances ( original_part , new_part ) :
"""Map the id of the original part with the ` Part ` object of the newly created one .
Updated the singleton ` mapping dictionary ` with the new mapping table values .
: param original _ part : ` Part ` object to be copied / moved
: type original _ part : : class : ` Part `
: param new _ part : ` Part ` object copied / moved
: type new _ part : : class : ` Part `
: return : None""" | # Map the original part with the new one
get_mapping_dictionary ( ) [ original_part . id ] = new_part
# Do the same for each Property of original part instance , using the ' model ' id and the get _ mapping _ dictionary
for prop_original in original_part . properties :
get_mapping_dictionary ( ) [ prop_original . id ] = [ prop_new for prop_new in new_part . properties if get_mapping_dictionary ( ) [ prop_original . _json_data [ 'model' ] ] . id == prop_new . _json_data [ 'model' ] ] [ 0 ] |
def run ( self , ** kwargs ) :
"""Runs this command .
: param kwargs : Any extra keyword arguments to pass along to ` subprocess . Popen ` .
: returns : A handle to the running command .
: rtype : : class : ` subprocess . Popen `""" | env , kwargs = self . _prepare_env ( kwargs )
logger . debug ( 'Running command {}' . format ( self . cmd ) )
return subprocess . Popen ( self . cmd , env = env , ** kwargs ) |
def factors ( n ) :
"""Computes all the integer factors of the number ` n `
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ alg import * # NOQA
> > > import utool as ut
> > > result = sorted ( ut . factors ( 10 ) )
> > > print ( result )
[1 , 2 , 5 , 10]
References :
http : / / stackoverflow . com / questions / 6800193 / finding - all - the - factors""" | return set ( reduce ( list . __add__ , ( [ i , n // i ] for i in range ( 1 , int ( n ** 0.5 ) + 1 ) if n % i == 0 ) ) ) |
def screen ( self ) :
"""PIL Image of current window screen . ( the window must be on the top )
reference : https : / / msdn . microsoft . com / en - us / library / dd183402 ( v = vs . 85 ) . aspx""" | # opengl windows cannot get from it ' s hwnd , so we use the screen
hwnd = win32gui . GetDesktopWindow ( )
# get window size and offset
left , top , right , bottom = self . rect
width , height = right - left , bottom - top
# the device context of the window
hdcwin = win32gui . GetWindowDC ( hwnd )
# make a temporary dc
hdcmem = win32gui . CreateCompatibleDC ( hdcwin )
# make a temporary bitmap in memory , this is a PyHANDLE object
hbmp = win32gui . CreateCompatibleBitmap ( hdcwin , width , height )
# select bitmap for temporary dc
win32gui . SelectObject ( hdcmem , hbmp )
# copy bits to temporary dc
win32gui . BitBlt ( hdcmem , 0 , 0 , width , height , hdcwin , left , top , win32con . SRCCOPY )
# check the bitmap object infomation
bmp = win32gui . GetObject ( hbmp )
bi = BITMAPINFOHEADER ( )
bi . biSize = ctypes . sizeof ( BITMAPINFOHEADER )
bi . biWidth = bmp . bmWidth
bi . biHeight = bmp . bmHeight
bi . biPlanes = bmp . bmPlanes
bi . biBitCount = bmp . bmBitsPixel
bi . biCompression = 0
# BI _ RGB
bi . biSizeImage = 0
bi . biXPelsPerMeter = 0
bi . biYPelsPerMeter = 0
bi . biClrUsed = 0
bi . biClrImportant = 0
# calculate total size for bits
pixel = bmp . bmBitsPixel
size = ( ( bmp . bmWidth * pixel + pixel - 1 ) / pixel ) * 4 * bmp . bmHeight
buf = ( ctypes . c_char * size ) ( )
# read bits into buffer
windll . gdi32 . GetDIBits ( hdcmem , hbmp . handle , 0 , bmp . bmHeight , buf , ctypes . byref ( bi ) , win32con . DIB_RGB_COLORS )
# make a PIL Image
img = Image . frombuffer ( 'RGB' , ( bmp . bmWidth , bmp . bmHeight ) , buf , 'raw' , 'BGRX' , 0 , 1 )
img = img . transpose ( Image . FLIP_TOP_BOTTOM )
# cleanup
win32gui . DeleteObject ( hbmp )
win32gui . DeleteObject ( hdcmem )
win32gui . ReleaseDC ( hwnd , hdcwin )
return img |
def find_elements ( self , by = By . ID , value = None ) :
"""Find elements given a By strategy and locator . Prefer the find _ elements _ by _ * methods when
possible .
: Usage :
elements = driver . find _ elements ( By . CLASS _ NAME , ' foo ' )
: rtype : list of WebElement""" | if self . w3c :
if by == By . ID :
by = By . CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By . TAG_NAME :
by = By . CSS_SELECTOR
elif by == By . CLASS_NAME :
by = By . CSS_SELECTOR
value = ".%s" % value
elif by == By . NAME :
by = By . CSS_SELECTOR
value = '[name="%s"]' % value
# Return empty list if driver returns null
# See https : / / github . com / SeleniumHQ / selenium / issues / 4555
return self . execute ( Command . FIND_ELEMENTS , { 'using' : by , 'value' : value } ) [ 'value' ] or [ ] |
def hicup_alignment_chart ( self ) :
"""Generate the HiCUP Aligned reads plot""" | # Specify the order of the different possible categories
keys = OrderedDict ( )
keys [ 'Unique_Alignments_Read' ] = { 'color' : '#2f7ed8' , 'name' : 'Unique Alignments' }
keys [ 'Multiple_Alignments_Read' ] = { 'color' : '#492970' , 'name' : 'Multiple Alignments' }
keys [ 'Failed_To_Align_Read' ] = { 'color' : '#0d233a' , 'name' : 'Failed To Align' }
keys [ 'Too_Short_To_Map_Read' ] = { 'color' : '#f28f43' , 'name' : 'Too short to map' }
# Construct a data structure for the plot - duplicate the samples for read 1 and read 2
data = { }
for s_name in self . hicup_data :
data [ '{} Read 1' . format ( s_name ) ] = { }
data [ '{} Read 2' . format ( s_name ) ] = { }
data [ '{} Read 1' . format ( s_name ) ] [ 'Unique_Alignments_Read' ] = self . hicup_data [ s_name ] [ 'Unique_Alignments_Read_1' ]
data [ '{} Read 2' . format ( s_name ) ] [ 'Unique_Alignments_Read' ] = self . hicup_data [ s_name ] [ 'Unique_Alignments_Read_2' ]
data [ '{} Read 1' . format ( s_name ) ] [ 'Multiple_Alignments_Read' ] = self . hicup_data [ s_name ] [ 'Multiple_Alignments_Read_1' ]
data [ '{} Read 2' . format ( s_name ) ] [ 'Multiple_Alignments_Read' ] = self . hicup_data [ s_name ] [ 'Multiple_Alignments_Read_2' ]
data [ '{} Read 1' . format ( s_name ) ] [ 'Failed_To_Align_Read' ] = self . hicup_data [ s_name ] [ 'Failed_To_Align_Read_1' ]
data [ '{} Read 2' . format ( s_name ) ] [ 'Failed_To_Align_Read' ] = self . hicup_data [ s_name ] [ 'Failed_To_Align_Read_2' ]
data [ '{} Read 1' . format ( s_name ) ] [ 'Too_Short_To_Map_Read' ] = self . hicup_data [ s_name ] [ 'Too_Short_To_Map_Read_1' ]
data [ '{} Read 2' . format ( s_name ) ] [ 'Too_Short_To_Map_Read' ] = self . hicup_data [ s_name ] [ 'Too_Short_To_Map_Read_2' ]
# Config for the plot
config = { 'id' : 'hicup_mapping_stats_plot' , 'title' : 'HiCUP: Mapping Statistics' , 'ylab' : '# Reads' , 'cpswitch_counts_label' : 'Number of Reads' }
return bargraph . plot ( data , keys , config ) |
def fallbackPackage ( package_name , fallback_package_name ) :
'''if an import cannot be resolved
import from fallback package
example :
from [ package _ name ] . doesnt _ exist import Foo
results in
from [ fallback _ package _ name ] . doesnt _ exist import Foo''' | importer = Finder ( package_name , fallback_package_name )
sys . meta_path . append ( importer ) |
def particles ( self , include_ports = False ) :
"""Return all Particles of the Compound .
Parameters
include _ ports : bool , optional , default = False
Include port particles
Yields
mb . Compound
The next Particle in the Compound""" | if not self . children :
yield self
else :
for particle in self . _particles ( include_ports ) :
yield particle |
def get_consistent_resource ( self ) :
""": return a payment that you can trust .
: rtype Payment""" | http_client = HttpClient ( )
response , _ = http_client . get ( routes . url ( routes . PAYMENT_RESOURCE , resource_id = self . id ) )
return Payment ( ** response ) |
def dump ( obj , fp ) :
"""Write a GSFont object to a . glyphs file .
' fp ' should be a ( writable ) file object .""" | writer = Writer ( fp )
logger . info ( "Writing .glyphs file" )
writer . write ( obj ) |
def subset_sum ( x , R ) :
"""Subsetsum
: param x : table of non negative values
: param R : target value
: returns bool : True if a subset of x sums to R
: complexity : O ( n * R )""" | b = [ False ] * ( R + 1 )
b [ 0 ] = True
for xi in x :
for s in range ( R , xi - 1 , - 1 ) :
b [ s ] |= b [ s - xi ]
return b [ R ] |
def ensure_ceph_keyring ( service , user = None , group = None , relation = 'ceph' , key = None ) :
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership .
@ returns boolean : Flag to indicate whether a key was successfully written
to disk based on either relation data or a supplied key""" | if not key :
for rid in relation_ids ( relation ) :
for unit in related_units ( rid ) :
key = relation_get ( 'key' , rid = rid , unit = unit )
if key :
break
if not key :
return False
add_key ( service = service , key = key )
keyring = _keyring_path ( service )
if user and group :
check_call ( [ 'chown' , '%s.%s' % ( user , group ) , keyring ] )
return True |
def _iscomment ( line ) :
"""Determine if a line is a comment line . A valid line contains at least three
words , with the first two being integers . Note that Python 2 and 3 deal
with strings differently .""" | if line . isspace ( ) :
return True
elif len ( line . split ( ) ) >= 3 :
try : # python 3 str
if line . split ( ) [ 0 ] . isdecimal ( ) and line . split ( ) [ 1 ] . isdecimal ( ) :
return False
except : # python 2 str
if ( line . decode ( ) . split ( ) [ 0 ] . isdecimal ( ) and line . split ( ) [ 1 ] . decode ( ) . isdecimal ( ) ) :
return False
return True
else :
return True |
def parse_band_request_termination ( self , message ) :
"""Service declares it should be terminated .""" | self . log . debug ( "Service requests termination" )
self . _terminate_service ( )
if not self . restart_service :
self . shutdown = True |
def main ( model = None , output_dir = None , n_iter = 100 ) :
"""Load the model , set up the pipeline and train the entity recognizer .""" | if model is not None :
nlp = spacy . load ( model )
# load existing spaCy model
print ( "Loaded model '%s'" % model )
else :
nlp = spacy . blank ( "en" )
# create blank Language class
print ( "Created blank 'en' model" )
# create the built - in pipeline components and add them to the pipeline
# nlp . create _ pipe works for built - ins that are registered with spaCy
if "ner" not in nlp . pipe_names :
ner = nlp . create_pipe ( "ner" )
nlp . add_pipe ( ner , last = True )
# otherwise , get it so we can add labels
else :
ner = nlp . get_pipe ( "ner" )
# add labels
for _ , annotations in TRAIN_DATA :
for ent in annotations . get ( "entities" ) :
ner . add_label ( ent [ 2 ] )
# get names of other pipes to disable them during training
other_pipes = [ pipe for pipe in nlp . pipe_names if pipe != "ner" ]
with nlp . disable_pipes ( * other_pipes ) : # only train NER
# reset and initialize the weights randomly – but only if we ' re
# training a new model
if model is None :
nlp . begin_training ( )
for itn in range ( n_iter ) :
random . shuffle ( TRAIN_DATA )
losses = { }
# batch up the examples using spaCy ' s minibatch
batches = minibatch ( TRAIN_DATA , size = compounding ( 4.0 , 32.0 , 1.001 ) )
for batch in batches :
texts , annotations = zip ( * batch )
nlp . update ( texts , # batch of texts
annotations , # batch of annotations
drop = 0.5 , # dropout - make it harder to memorise data
losses = losses , )
print ( "Losses" , losses )
# test the trained model
for text , _ in TRAIN_DATA :
doc = nlp ( text )
print ( "Entities" , [ ( ent . text , ent . label_ ) for ent in doc . ents ] )
print ( "Tokens" , [ ( t . text , t . ent_type_ , t . ent_iob ) for t in doc ] )
# save model to output directory
if output_dir is not None :
output_dir = Path ( output_dir )
if not output_dir . exists ( ) :
output_dir . mkdir ( )
nlp . to_disk ( output_dir )
print ( "Saved model to" , output_dir )
# test the saved model
print ( "Loading from" , output_dir )
nlp2 = spacy . load ( output_dir )
for text , _ in TRAIN_DATA :
doc = nlp2 ( text )
print ( "Entities" , [ ( ent . text , ent . label_ ) for ent in doc . ents ] )
print ( "Tokens" , [ ( t . text , t . ent_type_ , t . ent_iob ) for t in doc ] ) |
def job_list ( ) :
'''List all jobs .
: param _ limit : maximum number of jobs to show ( default 100)
: type _ limit : int
: param _ offset : how many jobs to skip before showin the first one ( default 0)
: type _ offset : int
: param _ status : filter jobs by status ( complete , error )
: type _ status : string
Also , you can filter the jobs by their metadata . Use the metadata key
as parameter key and the value as value .
: rtype : A list of job ids''' | args = dict ( ( key , value ) for key , value in flask . request . args . items ( ) )
limit = args . pop ( '_limit' , 100 )
offset = args . pop ( '_offset' , 0 )
select = sql . select ( [ db . JOBS_TABLE . c . job_id ] , from_obj = [ db . JOBS_TABLE . outerjoin ( db . METADATA_TABLE , db . JOBS_TABLE . c . job_id == db . METADATA_TABLE . c . job_id ) ] ) . group_by ( db . JOBS_TABLE . c . job_id ) . order_by ( db . JOBS_TABLE . c . requested_timestamp . desc ( ) ) . limit ( limit ) . offset ( offset )
status = args . pop ( '_status' , None )
if status :
select = select . where ( db . JOBS_TABLE . c . status == status )
ors = [ ]
for key , value in args . iteritems ( ) : # Turn strings into unicode to stop SQLAlchemy
# " Unicode type received non - unicode bind param value " warnings .
key = unicode ( key )
ors . append ( sql . and_ ( db . METADATA_TABLE . c . key == key , db . METADATA_TABLE . c . value == value ) )
if ors :
select = select . where ( sql . or_ ( * ors ) )
select = select . having ( sql . func . count ( db . JOBS_TABLE . c . job_id ) == len ( ors ) )
result = db . ENGINE . execute ( select )
listing = [ ]
for ( job_id , ) in result :
listing . append ( flask . url_for ( 'job_status' , job_id = job_id ) )
return flask . jsonify ( list = listing ) |
def md5 ( self ) :
"""Return a md5 key string based on position , ref and alt""" | return hashlib . md5 ( '_' . join ( [ self . CHROM , str ( self . POS ) , self . REF , self . ALT ] ) ) . hexdigest ( ) |
def getBaseSpec ( cls ) :
"""Return the base Spec for TemporalPoolerRegion .
Doesn ' t include the pooler parameters""" | spec = dict ( description = TemporalPoolerRegion . __doc__ , singleNodeOnly = True , inputs = dict ( activeCells = dict ( description = "Active cells" , dataType = "Real32" , count = 0 , required = True , regionLevel = False , isDefaultInput = True , requireSplitterMap = False ) , predictedActiveCells = dict ( description = "Predicted Active Cells" , dataType = "Real32" , count = 0 , required = True , regionLevel = True , isDefaultInput = False , requireSplitterMap = False ) , resetIn = dict ( description = """A boolean flag that indicates whether
or not the input vector received in this compute cycle
represents the start of a new temporal sequence.""" , dataType = 'Real32' , count = 1 , required = False , regionLevel = True , isDefaultInput = False , requireSplitterMap = False ) , sequenceIdIn = dict ( description = "Sequence ID" , dataType = 'UInt64' , count = 1 , required = False , regionLevel = True , isDefaultInput = False , requireSplitterMap = False ) , ) , outputs = dict ( mostActiveCells = dict ( description = "Most active cells in the pooler SDR having non-zero activation" , dataType = "Real32" , count = 0 , regionLevel = True , isDefaultOutput = True ) , ) , parameters = dict ( ) , )
return spec |
def write ( self , s : str ) -> None :
"""Add str to internal bytes buffer and if echo is True , echo contents to inner stream""" | if not isinstance ( s , str ) :
raise TypeError ( 'write() argument must be str, not {}' . format ( type ( s ) ) )
if not self . pause_storage :
self . buffer . byte_buf += s . encode ( encoding = self . encoding , errors = self . errors )
if self . echo :
self . inner_stream . write ( s ) |
def delete_project ( self , project ) :
"""Deletes all versions of a project . First class , maps to Scrapyd ' s
delete project endpoint .""" | url = self . _build_url ( constants . DELETE_PROJECT_ENDPOINT )
data = { 'project' : project , }
self . client . post ( url , data = data , timeout = self . timeout )
return True |
def _DateToEpoch ( date ) :
"""Converts python datetime to epoch microseconds .""" | tz_zero = datetime . datetime . utcfromtimestamp ( 0 )
diff_sec = int ( ( date - tz_zero ) . total_seconds ( ) )
return diff_sec * 1000000 |
def qteSplitApplet ( self , applet : ( QtmacsApplet , str ) = None , splitHoriz : bool = True , windowObj : QtmacsWindow = None ) :
"""Reveal ` ` applet ` ` by splitting the space occupied by the
current applet .
If ` ` applet ` ` is already visible then the method does
nothing . Furthermore , this method does not change the focus ,
ie . the currently active applet will remain active .
If ` ` applet ` ` is * * None * * then the next invisible applet
will be shown . If ` ` windowObj ` ` is * * None * * then the
currently active window will be used .
The ` ` applet ` ` parameter can either be an instance of
` ` QtmacsApplet ` ` or a string denoting an applet ID . In the
latter case the ` ` qteGetAppletHandle ` ` method is used to fetch
the respective applet instance .
| Args |
* ` ` applet ` ` ( * * QtmacsApplet * * , * * str * * ) : the applet to reveal .
* ` ` splitHoriz ` ` ( * * bool * * ) : whether to split horizontally
or vertically .
* ` ` windowObj ` ` ( * * QtmacsWindow * * ) : the window in which to
reveal ` ` applet ` ` .
| Returns |
* * * bool * * : if * * True * * , ` ` applet ` ` was revealed .
| Raises |
* * * QtmacsArgumentError * * if at least one argument has an invalid type .""" | # If ` ` newAppObj ` ` was specified by its ID ( ie . a string ) then
# fetch the associated ` ` QtmacsApplet ` ` instance . If
# ` ` newAppObj ` ` is already an instance of ` ` QtmacsApplet ` `
# then use it directly .
if isinstance ( applet , str ) :
newAppObj = self . qteGetAppletHandle ( applet )
else :
newAppObj = applet
# Use the currently active window if none was specified .
if windowObj is None :
windowObj = self . qteActiveWindow ( )
if windowObj is None :
msg = 'Cannot determine the currently active window.'
self . qteLogger . error ( msg , stack_info = True )
return
# Convert ` ` splitHoriz ` ` to the respective Qt constant .
if splitHoriz :
splitOrientation = QtCore . Qt . Horizontal
else :
splitOrientation = QtCore . Qt . Vertical
if newAppObj is None : # If no new applet was specified use the next available
# invisible applet .
newAppObj = self . qteNextApplet ( skipVisible = True , skipInvisible = False )
else : # Do nothing if the new applet is already visible .
if newAppObj . qteIsVisible ( ) :
return False
# If we still have not found an applet then there are no
# invisible applets left to show . Therefore , splitting makes
# no sense .
if newAppObj is None :
self . qteLogger . warning ( 'All applets are already visible.' )
return False
# If the root splitter is empty then add the new applet and
# return immediately .
if windowObj . qteAppletSplitter . count ( ) == 0 :
windowObj . qteAppletSplitter . qteAddWidget ( newAppObj )
windowObj . qteAppletSplitter . setOrientation ( splitOrientation )
return True
# The root splitter contains at least one widget , if we got
# this far .
# Shorthand to last active applet in the current window . Query
# this applet with qteNextApplet method because
# self . _ qteActiveApplet may be a mini applet , and we are only
# interested in genuine applets .
curApp = self . qteNextApplet ( numSkip = 0 , windowObj = windowObj )
# Get a reference to the splitter in which the currently
# active applet lives . This may be the root splitter , or one
# of its child splitters .
split = self . _qteFindAppletInSplitter ( curApp , windowObj . qteAppletSplitter )
if split is None :
msg = 'Active applet <b>{}</b> not in the layout.'
msg = msg . format ( curApp . qteAppletID ( ) )
self . qteLogger . error ( msg , stack_info = True )
return False
# If ' curApp ' lives in the root splitter , and the root
# splitter contains only a single element , then simply add the
# new applet as the second element and return .
if split is windowObj . qteAppletSplitter :
if split . count ( ) == 1 :
split . qteAddWidget ( newAppObj )
split . setOrientation ( splitOrientation )
return True
# The splitter ( root or not ) contains two widgets , if we got
# this far .
# Determine the index of the applet inside the splitter .
curAppIdx = split . indexOf ( curApp )
# Create a new splitter and populate it with ' curApp ' and the
# previously invisible ` ` newAppObj ` ` . Then insert this new splitter at
# the position where the old applet was taken from . Note : widgets are
# inserted with ` ` qteAddWidget ` ` ( because they are ` ` QtmacsApplet ` `
# instances ) , whereas splitters are added with ` ` insertWidget ` ` , NOT
# ` ` qteInsertWidget ` ` . The reason is that splitters do not require the
# extra TLC necessary for applets in terms of how and where to show
# them .
newSplit = QtmacsSplitter ( splitOrientation , windowObj )
curApp . setParent ( None )
newSplit . qteAddWidget ( curApp )
newSplit . qteAddWidget ( newAppObj )
split . insertWidget ( curAppIdx , newSplit )
# Adjust the size of two widgets in ` ` split ` ` ( ie . ` ` newSplit ` ` and
# whatever other widget ) to take up equal space . The same adjusment is
# made for ` ` newSplit ` ` , but there the ` ` qteAddWidget ` ` methods have
# already taken care of it .
split . qteAdjustWidgetSizes ( )
return True |
def set_sampled_topics ( self , sampled_topics ) :
"""Allocate sampled topics to the documents rather than estimate them .
Automatically generate term - topic and document - topic matrices .""" | assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2
if len ( sampled_topics . shape ) == 1 :
self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] )
else :
self . sampled_topics = sampled_topics
self . samples = self . sampled_topics . shape [ 0 ]
self . tt = self . tt_comp ( self . sampled_topics )
self . dt = self . dt_comp ( self . sampled_topics ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.