signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_tensors_inputs ( placeholders , tensors , names ) :
"""Args :
placeholders ( list [ Tensor ] ) :
tensors ( list [ Tensor ] ) : list of tf . Tensor
names ( list [ str ] ) : names matching the given tensors
Returns :
list [ Tensor ] : inputs to used for the tower function ,
with the corresponding placeholders replaced by tensors .""" | assert len ( tensors ) == len ( names ) , "Input tensors {} and input names {} have different length!" . format ( tensors , names )
ret = copy . copy ( placeholders )
placeholder_names = [ p . name for p in placeholders ]
for name , tensor in zip ( names , tensors ) :
tensorname = get_op_tensor_name ( name ) [ 1 ]
try :
idx = placeholder_names . index ( tensorname )
except ValueError :
logger . error ( "Name {} is not a model input!" . format ( tensorname ) )
raise
ret [ idx ] = tensor
return ret |
def main ( ) :
"""NAME
change _ case _ magic . py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change _ case _ magic . py [ command line options ]
OPTIONS
- h prints help message and quits
- f FILE : specify input magic format file
- F FILE : specify output magic format file , default is to overwrite input file
- keys KEY1 : KEY2 specify colon delimited list of keys to convert
- [ U , l ] : specify [ U ] PPER or [ l ] ower case , default is lower""" | dir_path = "./"
change = 'l'
if '-WD' in sys . argv :
ind = sys . argv . index ( '-WD' )
dir_path = sys . argv [ ind + 1 ]
if '-h' in sys . argv :
print ( main . __doc__ )
sys . exit ( )
if '-f' in sys . argv :
ind = sys . argv . index ( '-f' )
magic_file = dir_path + '/' + sys . argv [ ind + 1 ]
else :
print ( main . __doc__ )
sys . exit ( )
if '-F' in sys . argv :
ind = sys . argv . index ( '-F' )
out_file = dir_path + '/' + sys . argv [ ind + 1 ]
else :
out_file = magic_file
if '-keys' in sys . argv :
ind = sys . argv . index ( '-keys' )
grab_keys = sys . argv [ ind + 1 ] . split ( ":" )
else :
print ( main . __doc__ )
sys . exit ( )
if '-U' in sys . argv :
change = 'U'
# get data read in
Data , file_type = pmag . magic_read ( magic_file )
if len ( Data ) > 0 :
for grab_key in grab_keys :
for rec in Data :
if change == 'l' :
rec [ grab_key ] = rec [ grab_key ] . lower ( )
else :
rec [ grab_key ] = rec [ grab_key ] . upper ( )
else :
print ( 'bad file name' )
pmag . magic_write ( out_file , Data , file_type ) |
def vesting_balance_withdraw ( self , vesting_id , amount = None , account = None , ** kwargs ) :
"""Withdraw vesting balance
: param str vesting _ id : Id of the vesting object
: param bitshares . amount . Amount Amount : to withdraw ( " all " if not
provided " )
: param str account : ( optional ) the account to allow access
to ( defaults to ` ` default _ account ` ` )""" | if not account :
if "default_account" in self . config :
account = self . config [ "default_account" ]
if not account :
raise ValueError ( "You need to provide an account" )
account = Account ( account , blockchain_instance = self )
if not amount :
obj = Vesting ( vesting_id , blockchain_instance = self )
amount = obj . claimable
op = operations . Vesting_balance_withdraw ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "vesting_balance" : vesting_id , "owner" : account [ "id" ] , "amount" : { "amount" : int ( amount ) , "asset_id" : amount [ "asset" ] [ "id" ] } , "prefix" : self . prefix , } )
return self . finalizeOp ( op , account [ "name" ] , "active" ) |
def make_regression ( func , n_samples = 100 , n_features = 1 , bias = 0.0 , noise = 0.0 , random_state = None ) :
"""Make dataset for a regression problem .
Examples
> > > f = lambda x : 0.5 * x + np . sin ( 2 * x )
> > > X , y = make _ regression ( f , bias = . 5 , noise = 1 . , random _ state = 1)
> > > X . shape
(100 , 1)
> > > y . shape
(100 , )
> > > X [ : 5 ] . round ( 2)
array ( [ [ 1.62 ] ,
[ - 0.61 ] ,
[ - 0.53 ] ,
[ - 1.07 ] ,
[ 0.87 ] ] )
> > > y [ : 5 ] . round ( 2)
array ( [ 0.76 , 0.48 , - 0.23 , - 0.28 , 0.83 ] )""" | generator = check_random_state ( random_state )
X = generator . randn ( n_samples , n_features )
# unpack the columns of X
y = func ( * X . T ) + bias
if noise > 0.0 :
y += generator . normal ( scale = noise , size = y . shape )
return X , y |
def getargspec ( obj ) :
"""An improved inspect . getargspec .
Has a slightly different return value from the default getargspec .
Returns a tuple of :
required , optional , args , kwargs
list , dict , bool , bool
Required is a list of required named arguments .
Optional is a dictionary mapping optional arguments to defaults .
Args and kwargs are True for the respective unlimited argument type .""" | argnames , varargs , varkw , _defaults = None , None , None , None
if inspect . isfunction ( obj ) or inspect . ismethod ( obj ) :
argnames , varargs , varkw , _defaults = inspect . getargspec ( obj )
elif inspect . isclass ( obj ) :
if inspect . ismethoddescriptor ( obj . __init__ ) :
argnames , varargs , varkw , _defaults = [ ] , False , False , None
else :
argnames , varargs , varkw , _defaults = inspect . getargspec ( obj . __init__ )
elif hasattr ( obj , '__call__' ) :
argnames , varargs , varkw , _defaults = inspect . getargspec ( obj . __call__ )
else :
raise TypeError ( "Object not callable?" )
# Need test case to prove this is even possible .
# if ( argnames , varargs , varkw , defaults ) is ( None , None , None , None ) :
# raise InspectionFailed ( )
if argnames and argnames [ 0 ] == 'self' :
del argnames [ 0 ]
if _defaults is None :
_defaults = [ ]
defaults = dict ( )
else : # Create a mapping dictionary of defaults ; this is slightly more useful .
defaults = dict ( )
_defaults = list ( _defaults )
_defaults . reverse ( )
argnames . reverse ( )
for i , default in enumerate ( _defaults ) :
defaults [ argnames [ i ] ] = default
argnames . reverse ( )
# del argnames [ - len ( _ defaults ) : ]
return argnames , defaults , True if varargs else False , True if varkw else False |
def create_dir ( entry , section , domain , output ) :
"""Create the output directory for the entry if needed .""" | full_output_dir = os . path . join ( output , section , domain , entry [ 'assembly_accession' ] )
try :
os . makedirs ( full_output_dir )
except OSError as err :
if err . errno == errno . EEXIST and os . path . isdir ( full_output_dir ) :
pass
else :
raise
return full_output_dir |
def convert ( self , targetunits ) :
"""Set new user unit , for either wavelength or flux .
This effectively converts the spectrum wavelength or flux
to given unit . Note that actual data are always kept in
internal units ( Angstrom and ` ` photlam ` ` ) , and only converted
to user units by : meth : ` getArrays ` during actual computation .
User units are stored in ` ` self . waveunits ` ` and ` ` self . fluxunits ` ` .
Parameters
targetunits : str
New unit name , as accepted by ` ~ pysynphot . units . Units ` .""" | nunits = units . Units ( targetunits )
if nunits . isFlux :
self . fluxunits = nunits
else :
self . waveunits = nunits |
def clone ( self , repo , ref , deps = ( ) ) :
"""Clone the given url and checkout the specific ref .""" | if os . path . isdir ( repo ) :
repo = os . path . abspath ( repo )
def clone_strategy ( directory ) :
env = git . no_git_env ( )
def _git_cmd ( * args ) :
cmd_output ( 'git' , * args , cwd = directory , env = env )
_git_cmd ( 'init' , '.' )
_git_cmd ( 'remote' , 'add' , 'origin' , repo )
try :
self . _shallow_clone ( ref , _git_cmd )
except CalledProcessError :
self . _complete_clone ( ref , _git_cmd )
return self . _new_repo ( repo , ref , deps , clone_strategy ) |
def _custom_response_edit ( self , method , url , headers , body , response ) :
"""This method allows a service to edit a response .
If you want to do this , you probably really want to use
_ edit _ mock _ response - this method will operate on Live resources .""" | if self . get_implementation ( ) . is_mock ( ) :
delay = self . get_setting ( "MOCKDATA_DELAY" , 0.0 )
time . sleep ( delay )
self . _edit_mock_response ( method , url , headers , body , response ) |
def tsp ( points , start = 0 ) :
"""Find an ordering of points where each is visited and
the next point is the closest in euclidean distance ,
and if there are multiple points with equal distance
go to an arbitrary one .
Assumes every point is visitable from every other point ,
i . e . the travelling salesman problem on a fully connected
graph . It is not a MINIMUM traversal ; rather it is a
" not totally goofy traversal , quickly . " On random points
this traversal is often ~ 20x shorter than random ordering .
Parameters
points : ( n , dimension ) float
ND points in space
start : int
The index of points we should start at
Returns
traversal : ( n , ) int
Ordered traversal visiting every point
distances : ( n - 1 , ) float
The euclidean distance between points in traversal""" | # points should be float
points = np . asanyarray ( points , dtype = np . float64 )
if len ( points . shape ) != 2 :
raise ValueError ( 'points must be (n, dimension)!' )
# start should be an index
start = int ( start )
# a mask of unvisited points by index
unvisited = np . ones ( len ( points ) , dtype = np . bool )
unvisited [ start ] = False
# traversal of points by index
traversal = np . zeros ( len ( points ) , dtype = np . int64 ) - 1
traversal [ 0 ] = start
# list of distances
distances = np . zeros ( len ( points ) - 1 , dtype = np . float64 )
# a mask of indexes in order
index_mask = np . arange ( len ( points ) , dtype = np . int64 )
# in the loop we want to call distances . sum ( axis = 1)
# a lot and it ' s actually kind of slow for " reasons "
# dot products with ones is equivalent and ~ 2x faster
sum_ones = np . ones ( points . shape [ 1 ] )
# loop through all points
for i in range ( len ( points ) - 1 ) : # which point are we currently on
current = points [ traversal [ i ] ]
# do NlogN distance query
# use dot instead of . sum ( axis = 1 ) or np . linalg . norm
# as it is faster , also don ' t square root here
dist = np . dot ( ( points [ unvisited ] - current ) ** 2 , sum_ones )
# minimum distance index
min_index = dist . argmin ( )
# successor is closest unvisited point
successor = index_mask [ unvisited ] [ min_index ]
# update the mask
unvisited [ successor ] = False
# store the index to the traversal
traversal [ i + 1 ] = successor
# store the distance
distances [ i ] = dist [ min_index ]
# we were comparing distance ^ 2 so take square root
distances **= 0.5
return traversal , distances |
def _run_dragonpy_cli ( self , * args ) :
"""Run DragonPy cli with given args .
Add " - - verbosity " from GUI .""" | verbosity = self . frame_settings . var_verbosity . get ( )
verbosity_no = VERBOSITY_DICT2 [ verbosity ]
log . debug ( "Verbosity: %i (%s)" % ( verbosity_no , verbosity ) )
args = ( "--verbosity" , "%s" % verbosity_no # " - - log _ list " ,
# " - - log " ,
# " dragonpy . components . cpu6809,40 " ,
# " dragonpy . Dragon32 . MC6821 _ PIA , 50 " ,
) + args
click . echo ( "\n" )
run_dragonpy ( * args , verbose = True ) |
def pso ( self , n_particles = 10 , n_iterations = 10 , lowerLimit = - 0.2 , upperLimit = 0.2 , threadCount = 1 , mpi = False , print_key = 'default' ) :
"""returns the best fit for the lense model on catalogue basis with particle swarm optimizer""" | init_pos = self . chain . get_args ( self . chain . kwargs_data_init )
num_param = self . chain . num_param
lowerLimit = [ lowerLimit ] * num_param
upperLimit = [ upperLimit ] * num_param
if mpi is True :
pso = MpiParticleSwarmOptimizer ( self . chain , lowerLimit , upperLimit , n_particles , threads = 1 )
else :
pso = ParticleSwarmOptimizer ( self . chain , lowerLimit , upperLimit , n_particles , threads = threadCount )
if not init_pos is None :
pso . gbest . position = init_pos
pso . gbest . velocity = [ 0 ] * len ( init_pos )
pso . gbest . fitness , _ = self . chain . likelihood ( init_pos )
X2_list = [ ]
vel_list = [ ]
pos_list = [ ]
time_start = time . time ( )
if pso . isMaster ( ) :
print ( 'Computing the %s ...' % print_key )
num_iter = 0
for swarm in pso . sample ( n_iterations ) :
X2_list . append ( pso . gbest . fitness * 2 )
vel_list . append ( pso . gbest . velocity )
pos_list . append ( pso . gbest . position )
num_iter += 1
if pso . isMaster ( ) :
if num_iter % 10 == 0 :
print ( num_iter )
if not mpi :
result = pso . gbest . position
else :
result = MpiUtil . mpiBCast ( pso . gbest . position )
kwargs_data = self . chain . update_data ( result )
if mpi is True and not pso . isMaster ( ) :
pass
else :
time_end = time . time ( )
print ( "Shifts found: " , result )
print ( time_end - time_start , 'time used for PSO' , print_key )
return kwargs_data , [ X2_list , pos_list , vel_list , [ ] ] |
def refresh ( self ) :
"""refreshes a service""" | params = { "f" : "json" }
uURL = self . _url + "/refresh"
res = self . _get ( url = uURL , param_dict = params , securityHandler = self . _securityHandler , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
self . __init ( )
return res |
def run_fsm ( self , name , command , events , transitions , timeout , max_transitions = 20 ) :
"""Wrap the FSM code .""" | self . ctrl . send_command ( command )
return FSM ( name , self , events , transitions , timeout = timeout , max_transitions = max_transitions ) . run ( ) |
def convert_model_to_external_data ( model , all_tensors_to_one_file = True , location = None ) : # type : ( ModelProto , bool , Optional [ Text ] ) - > None
"""call to set all tensors as external data . save _ model saves all the tensors data as external data after calling this function .
@ params
model : ModelProto to be converted .
all _ tensors _ to _ one _ file : If true , save all tensors to one external file specified by location .
If false , save each tensor to a file named with the tensor name .
location : specify the external file that all tensors to save to .
If not specified , will use the model name .""" | if all_tensors_to_one_file :
file_name = Text ( uuid . uuid1 ( ) )
if location :
file_name = location
for tensor in _get_all_tensors ( model ) :
set_external_data ( tensor , file_name )
else :
for tensor in _get_all_tensors ( model ) :
set_external_data ( tensor , tensor . name ) |
def OutputDocumentFor ( objs , apply_theme = None , always_new = False ) :
'''Find or create a ( possibly temporary ) Document to use for serializing
Bokeh content .
Typical usage is similar to :
. . code - block : : python
with OutputDocumentFor ( models ) :
( docs _ json , [ render _ item ] ) = standalone _ docs _ json _ and _ render _ items ( models )
Inside the context manager , the models will be considered to be part of a single
Document , with any theme specified , which can thus be serialized as a unit . Where
possible , OutputDocumentFor attempts to use an existing Document . However , this is
not possible in three cases :
* If passed a series of models that have no Document at all , a new Document will
be created , and all the models will be added as roots . After the context manager
exits , the new Document will continue to be the models ' document .
* If passed a subset of Document . roots , then OutputDocumentFor temporarily " re - homes "
the models in a new bare Document that is only available inside the context manager .
* If passed a list of models that have differnet documents , then OutputDocumentFor
temporarily " re - homes " the models in a new bare Document that is only available
inside the context manager .
OutputDocumentFor will also perfom document validation before yielding , if
` ` settings . perform _ document _ validation ( ) ` ` is True .
objs ( seq [ Model ] ) :
a sequence of Models that will be serialized , and need a common document
apply _ theme ( Theme or FromCurdoc or None , optional ) :
Sets the theme for the doc while inside this context manager . ( default : None )
If None , use whatever theme is on the document that is found or created
If FromCurdoc , use curdoc ( ) . theme , restoring any previous theme afterwards
If a Theme instance , use that theme , restoring any previous theme afterwards
always _ new ( bool , optional ) :
Always return a new document , even in cases where it is otherwise possible
to use an existing document on models .
Yields :
Document''' | # Note : Comms handling relies on the fact that the new _ doc returned
# has models with the same IDs as they were started with
if not isinstance ( objs , collections_abc . Sequence ) or len ( objs ) == 0 or not all ( isinstance ( x , Model ) for x in objs ) :
raise ValueError ( "OutputDocumentFor expects a sequence of Models" )
def finish ( ) :
pass
docs = set ( x . document for x in objs )
if None in docs :
docs . remove ( None )
if always_new :
def finish ( ) : # NOQA
_dispose_temp_doc ( objs )
doc = _create_temp_doc ( objs )
else :
if len ( docs ) == 0 :
doc = Document ( )
for model in objs :
doc . add_root ( model )
# handle a single shared document
elif len ( docs ) == 1 :
doc = docs . pop ( )
# we are not using all the roots , make a quick clone for outputting purposes
if set ( objs ) != set ( doc . roots ) :
def finish ( ) : # NOQA
_dispose_temp_doc ( objs )
doc = _create_temp_doc ( objs )
# we are using all the roots of a single doc , just use doc as - is
pass
# models have mixed docs , just make a quick clone
else :
def finish ( ) : # NOQA
_dispose_temp_doc ( objs )
doc = _create_temp_doc ( objs )
if settings . perform_document_validation ( ) :
doc . validate ( )
_set_temp_theme ( doc , apply_theme )
yield doc
_unset_temp_theme ( doc )
finish ( ) |
def _common_scan ( self , values_function , cursor = '0' , match = None , count = 10 , key = None ) :
"""Common scanning skeleton .
: param key : optional function used to identify what ' match ' is applied to""" | if count is None :
count = 10
cursor = int ( cursor )
count = int ( count )
if not count :
raise ValueError ( 'if specified, count must be > 0: %s' % count )
values = values_function ( )
if cursor + count >= len ( values ) : # we reached the end , back to zero
result_cursor = 0
else :
result_cursor = cursor + count
values = values [ cursor : cursor + count ]
if match is not None :
regex = re . compile ( b'^' + re . escape ( self . _encode ( match ) ) . replace ( b'\\*' , b'.*' ) + b'$' )
if not key :
key = lambda v : v
values = [ v for v in values if regex . match ( key ( v ) ) ]
return [ result_cursor , values ] |
def are_symmetrically_equivalent ( self , sites1 , sites2 , symm_prec = 1e-3 ) :
"""Given two sets of PeriodicSites , test if they are actually
symmetrically equivalent under this space group . Useful , for example ,
if you want to test if selecting atoms 1 and 2 out of a set of 4 atoms
are symmetrically the same as selecting atoms 3 and 4 , etc .
One use is in PartialRemoveSpecie transformation to return only
symmetrically distinct arrangements of atoms .
Args :
sites1 ( [ Site ] ) : 1st set of sites
sites2 ( [ Site ] ) : 2nd set of sites
symm _ prec ( float ) : Tolerance in atomic distance to test if atoms
are symmetrically similar .
Returns :
( bool ) : Whether the two sets of sites are symmetrically
equivalent .""" | def in_sites ( site ) :
for test_site in sites1 :
if test_site . is_periodic_image ( site , symm_prec , False ) :
return True
return False
for op in self :
newsites2 = [ PeriodicSite ( site . species , op . operate ( site . frac_coords ) , site . lattice ) for site in sites2 ]
for site in newsites2 :
if not in_sites ( site ) :
break
else :
return True
return False |
def can_resume ( self ) :
"""Test if the generator can be resumed , i . e . is not running or closed .""" | # TOCHECK relies on generator . gi _ frame
# Equivalent to ` inspect . getgeneratorstate ( self . generator ) in
# ( inspect . GEN _ CREATED , inspect . GEN _ SUSPENDED ) ` ,
# which is only available starting 3.2.
gen = self . generator
return ( gen is not None and not gen . gi_running and gen . gi_frame is not None ) |
def Instance ( expected , message = "Not an instance of {}" ) :
"""Creates a validator that checks if the given value is an instance of
` ` expected ` ` .
A custom message can be specified with ` ` message ` ` .""" | @ wraps ( Instance )
def built ( value ) :
if not isinstance ( value , expected ) :
raise Error ( message . format ( expected . __name__ ) )
return value
return built |
def get_slotname ( slot , host = None , admin_username = None , admin_password = None ) :
'''Get the name of a slot number in the chassis .
slot
The number of the slot for which to obtain the name .
host
The chassis host .
admin _ username
The username used to access the chassis .
admin _ password
The password used to access the chassis .
CLI Example :
. . code - block : : bash
salt - call - - local dracr . get _ slotname 0 host = 111.222.333.444
admin _ username = root admin _ password = secret''' | slots = list_slotnames ( host = host , admin_username = admin_username , admin_password = admin_password )
# The keys for this dictionary are strings , not integers , so convert the
# argument to a string
slot = six . text_type ( slot )
return slots [ slot ] [ 'slotname' ] |
def getinputfile ( self , outputfile , loadmetadata = True , client = None , requiremetadata = False ) :
"""Grabs one input file for the specified output filename ( raises a KeyError exception if there is no such output , StopIteration if there are no input files for it ) . Shortcut for getinputfiles ( )""" | if isinstance ( outputfile , CLAMOutputFile ) :
outputfilename = str ( outputfile ) . replace ( os . path . join ( self . projectpath , 'output/' ) , '' )
else :
outputfilename = outputfile
if outputfilename not in self :
raise KeyError ( "No such outputfile " + outputfilename )
try :
return next ( self . getinputfiles ( outputfile , loadmetadata , client , requiremetadata ) )
except StopIteration :
raise StopIteration ( "No input files for outputfile " + outputfilename ) |
def encode ( self , b64 = False , always_bytes = True ) :
"""Encode the packet for transmission .""" | if self . binary and not b64 :
encoded_packet = six . int2byte ( self . packet_type )
else :
encoded_packet = six . text_type ( self . packet_type )
if self . binary and b64 :
encoded_packet = 'b' + encoded_packet
if self . binary :
if b64 :
encoded_packet += base64 . b64encode ( self . data ) . decode ( 'utf-8' )
else :
encoded_packet += self . data
elif isinstance ( self . data , six . string_types ) :
encoded_packet += self . data
elif isinstance ( self . data , dict ) or isinstance ( self . data , list ) :
encoded_packet += self . json . dumps ( self . data , separators = ( ',' , ':' ) )
elif self . data is not None :
encoded_packet += str ( self . data )
if always_bytes and not isinstance ( encoded_packet , binary_types ) :
encoded_packet = encoded_packet . encode ( 'utf-8' )
return encoded_packet |
def _parse ( self , html ) :
"""Parse given string as HTML and return it ' s etree representation .""" | if self . _has_body_re . search ( html ) :
tree = lxml . html . document_fromstring ( html ) . find ( './/body' )
self . has_body = True
else :
tree = lxml . html . fragment_fromstring ( html , create_parent = self . _root_tag )
if tree . tag != self . _root_tag : # ensure the root element exists even if not really needed ,
# so the tree has always the same structure
root = lxml . html . HtmlElement ( )
root . tag = self . _root_tag
root . append ( tree )
return root
return tree |
def _show_list_message ( resolved_config ) :
"""Show the message for when a user has passed in - - list .""" | # Show what ' s available .
supported_programs = util . get_list_of_all_supported_commands ( resolved_config )
msg_line_1 = 'Legend: '
msg_line_2 = ( ' ' + util . FLAG_ONLY_CUSTOM + ' only custom files' )
msg_line_3 = ( ' ' + util . FLAG_CUSTOM_AND_DEFAULT + ' custom and default files' )
msg_line_4 = ' ' + ' only default files (no symbol)'
msg_line_5 = ''
msg_line_6 = 'Programs supported by eg: '
preamble = [ msg_line_1 , msg_line_2 , msg_line_3 , msg_line_4 , msg_line_5 , msg_line_6 ]
complete_message = '\n' . join ( preamble )
complete_message += '\n' + '\n' . join ( supported_programs )
pydoc . pager ( complete_message ) |
def destroy ( self ) :
"""从服务器上删除这个对象
: rtype : None""" | if not self . id :
return
client . delete ( '/classes/{0}/{1}' . format ( self . _class_name , self . id ) , self . _flags ) |
def kmer_counter ( seq , k = 4 ) :
"""Return a sequence of all the unique substrings ( k - mer or q - gram ) within a short ( < 128 symbol ) string
Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing .
jellyfish is a C implementation of k - mer counting
If seq is a string generate a sequence of k - mer string
If seq is a sequence of strings then generate a sequence of generators or sequences of k - mer strings
If seq is a sequence of sequences of strings generate a sequence of sequence of generators . . .
Default k = 4 because that ' s the length of a gene base - pair ?
> > > kmer _ counter ( ' AGATAGATAGACACAGAAATGGGACCACAC ' ) = = Counter ( { ' ACAC ' : 2 , ' ATAG ' : 2 , ' CACA ' : 2,
. . . ' TAGA ' : 2 , ' AGAT ' : 2 , ' GATA ' : 2 , ' AGAC ' : 1 , ' ACAG ' : 1 , ' AGAA ' : 1 , ' AAAT ' : 1 , ' TGGG ' : 1 , ' ATGG ' : 1,
. . . ' ACCA ' : 1 , ' GGAC ' : 1 , ' CCAC ' : 1 , ' CAGA ' : 1 , ' GAAA ' : 1 , ' GGGA ' : 1 , ' GACA ' : 1 , ' GACC ' : 1 , ' AATG ' : 1 } )
True""" | if isinstance ( seq , basestring ) :
return Counter ( generate_kmers ( seq , k ) ) |
def _tile_read ( src_dst , bounds , tilesize , indexes = None , nodata = None , resampling_method = "bilinear" ) :
"""Read data and mask .
Attributes
src _ dst : rasterio . io . DatasetReader
rasterio . io . DatasetReader object
bounds : list
Mercator tile bounds ( left , bottom , right , top )
tilesize : int
Output image size
indexes : list of ints or a single int , optional , ( defaults : None )
If ` indexes ` is a list , the result is a 3D array , but is
a 2D array if it is a band index number .
nodata : int or float , optional ( defaults : None )
resampling _ method : str , optional ( default : " bilinear " )
Resampling algorithm
Returns
out : array , int
returns pixel value .""" | if isinstance ( indexes , int ) :
indexes = [ indexes ]
elif isinstance ( indexes , tuple ) :
indexes = list ( indexes )
vrt_params = dict ( add_alpha = True , crs = "epsg:3857" , resampling = Resampling [ resampling_method ] )
vrt_transform , vrt_width , vrt_height = get_vrt_transform ( src_dst , bounds )
vrt_params . update ( dict ( transform = vrt_transform , width = vrt_width , height = vrt_height ) )
indexes = indexes if indexes is not None else src_dst . indexes
out_shape = ( len ( indexes ) , tilesize , tilesize )
nodata = nodata if nodata is not None else src_dst . nodata
if nodata is not None :
vrt_params . update ( dict ( nodata = nodata , add_alpha = False , src_nodata = nodata ) )
if has_alpha_band ( src_dst ) :
vrt_params . update ( dict ( add_alpha = False ) )
with WarpedVRT ( src_dst , ** vrt_params ) as vrt :
data = vrt . read ( out_shape = out_shape , indexes = indexes , resampling = Resampling [ resampling_method ] , )
mask = vrt . dataset_mask ( out_shape = ( tilesize , tilesize ) )
return data , mask |
def crop_to_ratio ( self ) :
"Get crop coordinates and perform the crop if we get any ." | crop_box = self . get_crop_box ( )
if not crop_box :
return
crop_box = self . center_important_part ( crop_box )
iw , ih = self . image . size
# see if we want to crop something from outside of the image
out_of_photo = min ( crop_box [ 0 ] , crop_box [ 1 ] ) < 0 or crop_box [ 2 ] > iw or crop_box [ 3 ] > ih
# check whether there ' s transparent information in the image
transparent = self . image . mode in ( 'RGBA' , 'LA' )
if photos_settings . DEFAULT_BG_COLOR != 'black' and out_of_photo and not transparent : # if we do , just crop the image to the portion that will be visible
updated_crop_box = ( max ( 0 , crop_box [ 0 ] ) , max ( 0 , crop_box [ 1 ] ) , min ( iw , crop_box [ 2 ] ) , min ( ih , crop_box [ 3 ] ) , )
cropped = self . image . crop ( updated_crop_box )
# create new image of the proper size and color
self . image = Image . new ( 'RGB' , ( crop_box [ 2 ] - crop_box [ 0 ] , crop_box [ 3 ] - crop_box [ 1 ] ) , photos_settings . DEFAULT_BG_COLOR )
# and paste the cropped part into it ' s proper position
self . image . paste ( cropped , ( abs ( min ( crop_box [ 0 ] , 0 ) ) , abs ( min ( crop_box [ 1 ] , 0 ) ) ) )
else : # crop normally if not the case
self . image = self . image . crop ( crop_box )
return crop_box |
def write_psts ( self , prefix , existing_jco = None , noptmax = None ) :
"""write parameter and optionally observation realizations
to a series of pest control files
Parameters
prefix : str
pest control file prefix
existing _ jco : str
filename of an existing jacobian matrix to add to the
pest + + options in the control file . This is useful for
NSMC since this jco can be used to get the first set of
parameter upgrades for free ! Needs to be the path the jco
file as seen from the location where pest + + will be run
noptmax : int
value of NOPTMAX to set in new pest control files
Example
` ` > > > import pyemu ` `
` ` > > > mc = pyemu . MonteCarlo ( jco = " pest . jcb " ) ` `
` ` > > > mc . draw ( 1000 , obs = True ) ` `
` ` > > > mc . write _ psts ( " mc _ " , existing _ jco = " pest . jcb " , noptmax = 1 ) ` `""" | self . log ( "writing realized pest control files" )
# get a copy of the pest control file
pst = self . pst . get ( par_names = self . pst . par_names , obs_names = self . pst . obs_names )
if noptmax is not None :
pst . control_data . noptmax = noptmax
pst . control_data . noptmax = noptmax
if existing_jco is not None :
pst . pestpp_options [ "BASE_JACOBIAN" ] = existing_jco
# set the indices
pst . parameter_data . index = pst . parameter_data . parnme
pst . observation_data . index = pst . observation_data . obsnme
if self . parensemble . istransformed :
par_en = self . parensemble . _back_transform ( inplace = False )
else :
par_en = self . parensemble
for i in range ( self . num_reals ) :
pst_name = prefix + "{0:d}.pst" . format ( i )
self . log ( "writing realized pest control file " + pst_name )
pst . parameter_data . loc [ par_en . columns , "parval1" ] = par_en . iloc [ i , : ] . T
# reset the regularization
# if pst . control _ data . pestmode = = " regularization " :
# pst . zero _ order _ tikhonov ( parbounds = True )
# zero _ order _ tikhonov ( pst , parbounds = True )
# add the obs noise realization if needed
if self . obsensemble . shape [ 0 ] == self . num_reals :
pst . observation_data . loc [ self . obsensemble . columns , "obsval" ] = self . obsensemble . iloc [ i , : ] . T
# write
pst . write ( pst_name )
self . log ( "writing realized pest control file " + pst_name )
self . log ( "writing realized pest control files" ) |
def _check_is_max_context ( doc_spans , cur_span_index , position ) :
"""Check if this is the ' max context ' doc span for the token .""" | # Because of the sliding window approach taken to scoring documents , a single
# token can appear in multiple documents . E . g .
# Doc : the man went to the store and bought a gallon of milk
# Span A : the man went to the
# Span B : to the store and bought
# Span C : and bought a gallon of
# Now the word ' bought ' will have two scores from spans B and C . We only
# want to consider the score with " maximum context " , which we define as
# the * minimum * of its left and right context ( the * sum * of left and
# right context will always be the same , of course ) .
# In the example the maximum context for ' bought ' would be span C since
# it has 1 left context and 3 right context , while span B has 4 left context
# and 0 right context .
best_score = None
best_span_index = None
for ( span_index , doc_span ) in enumerate ( doc_spans ) :
end = doc_span . start + doc_span . length - 1
if position < doc_span . start :
continue
if position > end :
continue
num_left_context = position - doc_span . start
num_right_context = end - position
score = min ( num_left_context , num_right_context ) + 0.01 * doc_span . length
if best_score is None or score > best_score :
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index |
def check_permissions ( self , request ) :
"""Retrieves the controlled object and perform the permissions check .""" | obj = ( hasattr ( self , 'get_controlled_object' ) and self . get_controlled_object ( ) or hasattr ( self , 'get_object' ) and self . get_object ( ) or getattr ( self , 'object' , None ) )
user = request . user
# Get the permissions to check
perms = self . get_required_permissions ( self )
# Check permissions
has_permissions = self . perform_permissions_check ( user , obj , perms )
if not has_permissions and not user . is_authenticated :
return HttpResponseRedirect ( '{}?{}={}' . format ( resolve_url ( self . login_url ) , self . redirect_field_name , urlquote ( request . get_full_path ( ) ) ) )
elif not has_permissions :
raise PermissionDenied |
def importRootCertificate ( self , alias , rootCACertificate ) :
"""This operation imports a certificate authority ( CA ) ' s root and intermediate certificates into the keystore .""" | url = self . _url + "/sslcertificates/importRootOrIntermediate"
files = { }
files [ 'rootCACertificate' ] = rootCACertificate
params = { "f" : "json" , "alias" : alias }
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , files = files , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) |
def count_children ( obj , type = None ) :
"""Return the number of children of obj , optionally restricting by class""" | if type is None :
return len ( obj )
else : # there doesn ' t appear to be any hdf5 function for getting this
# information without inspecting each child , which makes this somewhat
# slow
return sum ( 1 for x in obj if obj . get ( x , getclass = True ) is type ) |
def preview_unmount ( self ) :
"""Returns a list of all commands that would be executed if the : func : ` unmount ` method would be called .
Note : any system changes between calling this method and calling : func : ` unmount ` aren ' t listed by this command .""" | commands = [ ]
for mountpoint in self . find_bindmounts ( ) :
commands . append ( 'umount {0}' . format ( mountpoint ) )
for mountpoint in self . find_mounts ( ) :
commands . append ( 'umount {0}' . format ( mountpoint ) )
commands . append ( 'rm -Rf {0}' . format ( mountpoint ) )
for vgname , pvname in self . find_volume_groups ( ) :
commands . append ( 'lvchange -a n {0}' . format ( vgname ) )
commands . append ( 'losetup -d {0}' . format ( pvname ) )
for device in self . find_loopbacks ( ) :
commands . append ( 'losetup -d {0}' . format ( device ) )
for mountpoint in self . find_base_images ( ) :
commands . append ( 'fusermount -u {0}' . format ( mountpoint ) )
commands . append ( 'rm -Rf {0}' . format ( mountpoint ) )
for folder in self . find_clean_dirs ( ) :
cmd = 'rm -Rf {0}' . format ( folder )
if cmd not in commands :
commands . append ( cmd )
return commands |
def printer ( data , depth = 0 ) :
"""Prepare data for printing .
: param data : a data value that will be processed by method
: param int depth : recurrency indicator , to maintain proper indent
: returns : string with formatted config
: rtype : str""" | indent = _INDENT * depth
config_string = '' if not depth else ':\n'
if isinstance ( data , dict ) :
for key , val in data . items ( ) :
line = '{0}{1}' . format ( indent , key )
values = printer ( val , depth + 1 )
if not values . count ( '\n' ) :
values = ': {0}' . format ( values . lstrip ( ) )
line = '{line}{values}' . format ( line = line , values = values )
config_string += '{0}\n' . format ( line )
elif isinstance ( data , list ) :
for elem in data :
config_string += '{0} - {1}\n' . format ( indent , elem )
else :
config_string = '{0}{1} ({2})' . format ( indent , data , data . __class__ . __name__ )
return config_string . rstrip ( '\n' ) |
def get_protocol ( handle : Handle , want_v2 : bool ) -> Protocol :
"""Make a Protocol instance for the given handle .
Each transport can have a preference for using a particular protocol version .
This preference is overridable through ` TREZOR _ PROTOCOL _ V1 ` environment variable ,
which forces the library to use V1 anyways .
As of 11/2018 , no devices support V2 , so we enforce V1 here . It is still possible
to set ` TREZOR _ PROTOCOL _ V1 = 0 ` and thus enable V2 protocol for transports that ask
for it ( i . e . , USB transports for Trezor T ) .""" | force_v1 = int ( os . environ . get ( "TREZOR_PROTOCOL_V1" , 1 ) )
if want_v2 and not force_v1 :
return ProtocolV2 ( handle )
else :
return ProtocolV1 ( handle ) |
def delete_session ( sid_s ) :
"""Delete entries in the data - and kvsessionstore with the given sid _ s .
On a successful deletion , the flask - kvsession store returns 1 while the
sqlalchemy datastore returns None .
: param sid _ s : The session ID .
: returns : ` ` 1 ` ` if deletion was successful .""" | # Remove entries from sessionstore
_sessionstore . delete ( sid_s )
# Find and remove the corresponding SessionActivity entry
with db . session . begin_nested ( ) :
SessionActivity . query . filter_by ( sid_s = sid_s ) . delete ( )
return 1 |
def fix_pdf ( pdf_file , destination ) :
"""Fix malformed pdf files when data are present after ' % % EOF '
. . note : :
Originally from sciunto , https : / / github . com / sciunto / tear - pages
: param pdfFile : PDF filepath
: param destination : destination""" | tmp = tempfile . NamedTemporaryFile ( )
with open ( tmp . name , 'wb' ) as output :
with open ( pdf_file , "rb" ) as fh :
for line in fh :
output . write ( line )
if b'%%EOF' in line :
break
shutil . copy ( tmp . name , destination ) |
def as_text ( self ) :
'''Fetch and render all regions
For search and test purposes
just a prototype''' | from leonardo . templatetags . leonardo_tags import _render_content
request = get_anonymous_request ( self )
content = ''
try :
for region in [ region . key for region in self . _feincms_all_regions ] :
content += '' . join ( _render_content ( content , request = request , context = { } ) for content in getattr ( self . content , region ) )
except PermissionDenied :
pass
except Exception as e :
LOG . exception ( e )
return content |
def Parse ( self , cmd , args , stdout , stderr , return_val , time_taken , knowledge_base ) :
"""Parse the yum repolist output .""" | _ = stderr , time_taken , args , knowledge_base
# Unused .
self . CheckReturn ( cmd , return_val )
output = iter ( stdout . decode ( "utf-8" ) . splitlines ( ) )
repo_regexes = { "name" : self . _re_compile ( "Repo-name" ) , "revision" : self . _re_compile ( "Repo-revision" ) , "last_update" : self . _re_compile ( "Repo-updated" ) , "num_packages" : self . _re_compile ( "Repo-pkgs" ) , "size" : self . _re_compile ( "Repo-size" ) , "baseurl" : self . _re_compile ( "Repo-baseurl" ) , "timeout" : self . _re_compile ( "Repo-expire" ) }
repo_id_re = self . _re_compile ( "Repo-id" )
for line in output :
match = repo_id_re . match ( line )
if match :
repo_info = rdf_client . PackageRepository ( )
setattr ( repo_info , "id" , match . group ( 1 ) . strip ( ) )
while line :
for attr , regex in iteritems ( repo_regexes ) :
match = regex . match ( line )
if match :
setattr ( repo_info , attr , match . group ( 1 ) . strip ( ) )
break
line = next ( output )
yield repo_info |
def get_package_dir ( self , package ) :
"""Returns the directory , relative to the top of the source
distribution , where package ` ` package ` ` should be found
( at least according to the : attr : ` package _ dir ` option , if any ) .
Copied from : meth : ` distutils . command . build _ py . get _ package _ dir ( ) `
method .""" | path = package . split ( '.' )
if not self . package_dir :
if path :
return os . path . join ( * path )
return ''
tail = [ ]
while path :
try :
pdir = self . package_dir [ '.' . join ( path ) ]
except KeyError :
tail . insert ( 0 , path [ - 1 ] )
del path [ - 1 ]
else :
tail . insert ( 0 , pdir )
return os . path . join ( * tail )
else :
pdir = self . package_dir . get ( '' )
if pdir is not None :
tail . insert ( 0 , pdir )
if tail :
return os . path . join ( * tail )
return '' |
def run_band_structure ( self , paths , with_eigenvectors = False , with_group_velocities = False , is_band_connection = False , path_connections = None , labels = None , is_legacy_plot = False ) :
"""Run phonon band structure calculation .
Parameters
paths : List of array _ like
Sets of qpoints that can be passed to phonopy . set _ band _ structure ( ) .
Numbers of qpoints can be different .
shape of each array _ like : ( qpoints , 3)
with _ eigenvectors : bool , optional
Flag whether eigenvectors are calculated or not . Default is False .
with _ group _ velocities : bool , optional
Flag whether group velocities are calculated or not . Default is
False .
is _ band _ connection : bool , optional
Flag whether each band is connected or not . This is achieved by
comparing similarity of eigenvectors of neghboring poins . Sometimes
this fails . Default is False .
path _ connections : List of bool , optional
This is only used in graphical plot of band structure and gives
whether each path is connected to the next path or not ,
i . e . , if False , there is a jump of q - points . Number of elements is
the same at that of paths . Default is None .
labels : List of str , optional
This is only used in graphical plot of band structure and gives
labels of end points of each path . The number of labels is equal
to ( 2 - np . array ( path _ connections ) ) . sum ( ) .
is _ legacy _ plot : bool , optional
This makes the old style band structure plot . Default is False .""" | if self . _dynamical_matrix is None :
msg = ( "Dynamical matrix has not yet built." )
raise RuntimeError ( msg )
if with_group_velocities :
if self . _group_velocity is None :
self . _set_group_velocity ( )
group_velocity = self . _group_velocity
else :
group_velocity = None
self . _band_structure = BandStructure ( paths , self . _dynamical_matrix , with_eigenvectors = with_eigenvectors , is_band_connection = is_band_connection , group_velocity = group_velocity , path_connections = path_connections , labels = labels , is_legacy_plot = is_legacy_plot , factor = self . _factor ) |
def writeline ( self , data ) :
"""Writes data to serial port .
: param data : Data to write
: return : Nothing
: raises : IOError if SerialException occurs .""" | try :
if self . ch_mode :
data += "\n"
parts = split_by_n ( data , self . ch_mode_chunk_size )
for split_str in parts :
self . port . write ( split_str . encode ( ) )
time . sleep ( self . ch_mode_ch_delay )
else :
self . port . write ( ( data + "\n" ) . encode ( ) )
except SerialException as err :
self . logger . exception ( "SerialError occured while trying to write data {}." . format ( data ) )
raise RuntimeError ( str ( err ) ) |
def QA_fetch_stock_block_adv ( code = None , blockname = None , collections = DATABASE . stock_block ) :
'''返回板块 ❌
: param code :
: param blockname :
: param collections : 默认数据库 stock _ block
: return : QA _ DataStruct _ Stock _ block''' | if code is not None and blockname is None : # 返回这个股票代码所属的板块
data = pd . DataFrame ( [ item for item in collections . find ( { 'code' : { '$in' : code } } ) ] )
data = data . drop ( [ '_id' ] , axis = 1 )
return QA_DataStruct_Stock_block ( data . set_index ( [ 'blockname' , 'code' ] , drop = True ) . drop_duplicates ( ) )
elif blockname is not None and code is None : # 🛠 todo fnished 返回 这个板块所有的股票
# 返回该板块所属的股票
# print ( " QA Error blockname is Not none code none , return all code from its block name have not implemented yet ! " )
items_from_collections = [ item for item in collections . find ( { 'blockname' : re . compile ( blockname ) } ) ]
data = pd . DataFrame ( items_from_collections ) . drop ( [ '_id' ] , axis = 1 )
data_set_index = data . set_index ( [ 'blockname' , 'code' ] , drop = True )
return QA_DataStruct_Stock_block ( data_set_index )
else : # 🛠 todo 返回 判断 这个股票是否和属于该板块
data = pd . DataFrame ( [ item for item in collections . find ( ) ] ) . drop ( [ '_id' ] , axis = 1 )
data_set_index = data . set_index ( [ 'blockname' , 'code' ] , drop = True )
return QA_DataStruct_Stock_block ( data_set_index ) |
def get_search_schema ( self , schema ) :
"""Fetch a Solr schema from Yokozuna .
: param schema : name of Solr schema
: type schema : string
: rtype dict""" | if not self . yz_wm_schema :
raise NotImplementedError ( "Search 2.0 administration is not " "supported for this version" )
url = self . search_schema_path ( schema )
# Run the request . . .
status , _ , body = self . _request ( 'GET' , url )
if status == 200 :
result = { }
result [ 'name' ] = schema
result [ 'content' ] = bytes_to_str ( body )
return result
else :
raise RiakError ( 'Error getting Search 2.0 schema.' ) |
def replace_by_key ( pif , key , subs , new_key = None , remove = False ) :
"""Replace values that match a key
Deeply traverses the pif object , looking for ` key ` and
replacing values in accordance with ` subs ` . If ` new _ key `
is set , the replaced values are assigned to that key . If
` remove ` is ` True ` , the old ` key ` pairs are removed .""" | if not new_key :
new_key = key
remove = False
orig = pif . as_dictionary ( )
new = _recurse_replace ( orig , to_camel_case ( key ) , to_camel_case ( new_key ) , subs , remove )
return pypif . pif . loads ( json . dumps ( new ) ) |
def _format_metric_name ( self , m_name , cfunc ) :
'''Format a cacti metric name into a Datadog - friendly name''' | try :
aggr = CFUNC_TO_AGGR [ cfunc ]
except KeyError :
aggr = cfunc . lower ( )
try :
m_name = CACTI_TO_DD [ m_name ]
if aggr != 'avg' :
m_name += '.{}' . format ( aggr )
return m_name
except KeyError :
return "cacti.{}.{}" . format ( m_name . lower ( ) , aggr ) |
def check_page ( fn ) :
"Decorator to protect drawing methods" | @ wraps ( fn )
def wrapper ( self , * args , ** kwargs ) :
if not self . page and not kwargs . get ( 'split_only' ) :
self . error ( "No page open, you need to call add_page() first" )
else :
return fn ( self , * args , ** kwargs )
return wrapper |
def sys_prefix ( self ) :
"""The prefix run inside the context of the environment
: return : The python prefix inside the environment
: rtype : : data : ` sys . prefix `""" | command = [ self . python , "-c" "import sys; print(sys.prefix)" ]
c = vistir . misc . run ( command , return_object = True , block = True , nospin = True , write_to_stdout = False )
sys_prefix = vistir . compat . Path ( vistir . misc . to_text ( c . out ) . strip ( ) ) . as_posix ( )
return sys_prefix |
def get_connection ( cls , pid , connection ) :
"""Return the specified : class : ` ~ queries . pool . Connection ` from the
pool .
: param str pid : The pool ID
: param connection : The connection to return for
: type connection : psycopg2 . extensions . connection
: rtype : queries . pool . Connection""" | with cls . _lock :
return cls . _pools [ pid ] . connection_handle ( connection ) |
def str2int ( self , str_value ) :
"""Conversion class name string = > integer .""" | str_value = tf . compat . as_text ( str_value )
if self . _str2int :
return self . _str2int [ str_value ]
# No names provided , try to integerize
failed_parse = False
try :
int_value = int ( str_value )
except ValueError :
failed_parse = True
if failed_parse or not 0 <= int_value < self . _num_classes :
raise ValueError ( "Invalid string class label %s" % str_value )
return int_value |
def encode ( val ) :
"""Encode a string assuming the encoding is UTF - 8.
: param : a unicode or bytes object
: returns : bytes""" | if isinstance ( val , ( list , tuple ) ) : # encode a list or tuple of strings
return [ encode ( v ) for v in val ]
elif isinstance ( val , str ) :
return val . encode ( 'utf-8' )
else : # assume it was an already encoded object
return val |
def connect_gs ( gs_access_key_id = None , gs_secret_access_key = None , ** kwargs ) :
"""@ type gs _ access _ key _ id : string
@ param gs _ access _ key _ id : Your Google Cloud Storage Access Key ID
@ type gs _ secret _ access _ key : string
@ param gs _ secret _ access _ key : Your Google Cloud Storage Secret Access Key
@ rtype : L { GSConnection < boto . gs . connection . GSConnection > }
@ return : A connection to Google ' s Storage service""" | from boto . gs . connection import GSConnection
return GSConnection ( gs_access_key_id , gs_secret_access_key , ** kwargs ) |
def get_option_names ( self ) :
"""returns a list of fully qualified option names .
returns :
a list of strings representing the Options in the source Namespace
list . Each item will be fully qualified with dot delimited
Namespace names .""" | return [ x for x in self . option_definitions . keys_breadth_first ( ) if isinstance ( self . option_definitions [ x ] , Option ) ] |
def __fa_process_sequence ( self , sequence , avoid , initial_state , execution_state , trace_current , next_addr ) :
"""Process a REIL sequence .
Args :
sequence ( ReilSequence ) : A REIL sequence to process .
avoid ( list ) : List of address to avoid .
initial _ state : Initial state .
execution _ state : Execution state queue .
trace _ current ( list ) : Current trace .
next _ addr : Address of the next instruction following the current one .
Returns :
Returns the next instruction to execute in case there is one , otherwise returns None .""" | # TODO : Process execution intra states .
ip = sequence . address
next_ip = None
while ip : # Fetch next instruction in the sequence .
try :
instr = sequence . fetch ( ip )
except ReilSequenceInvalidAddressError : # At this point , ip should be a native instruction address , therefore
# the index should be zero .
assert split_address ( ip ) [ 1 ] == 0x0
next_ip = ip
break
try :
target_addr = sequence . get_next_address ( ip )
except ReilSequenceInvalidAddressError : # We reached the end of the sequence . Execution continues on the next native instruction
# ( it ' s a REIL address ) .
target_addr = next_addr
next_ip = self . __process_instr ( instr , avoid , target_addr , initial_state , execution_state , trace_current )
# Update instruction pointer .
try :
ip = next_ip if next_ip else sequence . get_next_address ( ip )
except ReilSequenceInvalidAddressError :
break
return next_ip |
def increase_and_check_counter ( self ) :
'''increase counter by one and check whether a period is end''' | self . counter += 1
self . counter %= self . period
if not self . counter :
return True
else :
return False |
def save_conf ( fn = None ) :
"""Save current configuration to file as YAML
If not given , uses current config directory , ` ` confdir ` ` , which can be
set by INTAKE _ CONF _ DIR .""" | if fn is None :
fn = cfile ( )
try :
os . makedirs ( os . path . dirname ( fn ) )
except ( OSError , IOError ) :
pass
with open ( fn , 'w' ) as f :
yaml . dump ( conf , f ) |
def answerShippingQuery ( self , shipping_query_id , ok , shipping_options = None , error_message = None ) :
"""See : https : / / core . telegram . org / bots / api # answershippingquery""" | p = _strip ( locals ( ) )
return self . _api_request ( 'answerShippingQuery' , _rectify ( p ) ) |
def _determine_tool ( files ) :
"""Yields tuples in the form of ( linker file , tool the file links for""" | for file in files :
linker_ext = file . split ( '.' ) [ - 1 ]
if "sct" in linker_ext or "lin" in linker_ext :
yield ( str ( file ) , "uvision" )
elif "ld" in linker_ext :
yield ( str ( file ) , "make_gcc_arm" )
elif "icf" in linker_ext :
yield ( str ( file ) , "iar_arm" ) |
def decrypt ( self , encryption_key , iv , encrypted_data ) :
"""Decrypt encrypted subtitle data
@ param int subtitle _ id
@ param str iv
@ param str encrypted _ data
@ return str""" | logger . info ( 'Decrypting subtitles with length (%d bytes), key=%r' , len ( encrypted_data ) , encryption_key )
return zlib . decompress ( aes_decrypt ( encryption_key , iv , encrypted_data ) ) |
def _ISO8601_to_UNIXtime ( iso ) :
"""Converts an ISO8601 - formatted string in the format
` ` YYYY - MM - DD HH : MM : SS + 00 ` ` to the correspondant UNIXtime
: param iso : the ISO8601 - formatted string
: type iso : string
: returns : an int UNIXtime
: raises : * TypeError * when bad argument types are provided , * ValueError *
when the ISO8601 string is badly formatted""" | try :
d = datetime . strptime ( iso , '%Y-%m-%d %H:%M:%S+00' )
except ValueError :
raise ValueError ( __name__ + ": bad format for input ISO8601 string, ' \
'should have been: YYYY-MM-DD HH:MM:SS+00" )
return _datetime_to_UNIXtime ( d ) |
def update ( self , weight = values . unset , priority = values . unset , enabled = values . unset , friendly_name = values . unset , sip_url = values . unset ) :
"""Update the OriginationUrlInstance
: param unicode weight : The value that determines the relative load the URI should receive compared to others with the same priority
: param unicode priority : The relative importance of the URI
: param bool enabled : Whether the URL is enabled
: param unicode friendly _ name : A string to describe the resource
: param unicode sip _ url : The SIP address you want Twilio to route your Origination calls to
: returns : Updated OriginationUrlInstance
: rtype : twilio . rest . trunking . v1 . trunk . origination _ url . OriginationUrlInstance""" | return self . _proxy . update ( weight = weight , priority = priority , enabled = enabled , friendly_name = friendly_name , sip_url = sip_url , ) |
def collect_garbage ( ) :
'''Completely removed all currently ' uninstalled ' packages in the nix store .
Tells the user how many store paths were removed and how much space was freed .
: return : How much space was freed and how many derivations were removed
: rtype : str
. . warning : :
This is a destructive action on the nix store .
. . code - block : : bash
salt ' * ' nix . collect _ garbage''' | cmd = _nix_collect_garbage ( )
cmd . append ( '--delete-old' )
out = _run ( cmd )
return out [ 'stdout' ] . splitlines ( ) |
def openflow_controller_connection_address_active_controller_vrf ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
openflow_controller = ET . SubElement ( config , "openflow-controller" , xmlns = "urn:brocade.com:mgmt:brocade-openflow" )
controller_name_key = ET . SubElement ( openflow_controller , "controller-name" )
controller_name_key . text = kwargs . pop ( 'controller_name' )
connection_address = ET . SubElement ( openflow_controller , "connection-address" )
active_controller_vrf = ET . SubElement ( connection_address , "active-controller-vrf" )
active_controller_vrf . text = kwargs . pop ( 'active_controller_vrf' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def add_timedelta ( self , delta ) :
"""Add timedelta duration to the instance .
: param delta : The timedelta instance
: type delta : datetime . timedelta
: rtype : Time""" | if delta . days :
raise TypeError ( "Cannot add timedelta with days to Time." )
return self . add ( seconds = delta . seconds , microseconds = delta . microseconds ) |
def get_game ( self , name ) :
"""Get the game instance for a game name
: param name : the name of the game
: type name : : class : ` str `
: returns : the game instance
: rtype : : class : ` models . Game ` | None
: raises : None""" | games = self . search_games ( query = name , live = False )
for g in games :
if g . name == name :
return g |
def main ( ) :
"""Main method .""" | args = parse_cmd_arguments ( )
html_file = args . file
try :
json . loads ( args . add_tags or '{}' )
json . loads ( args . exc_tags or '{}' )
except ValueError :
print ( '\033[91m' + 'Invalid json string: please provide a valid json ' 'string e.g {}' . format ( '\'{"img": "data-url"}\'' ) + '\033[0m' )
sys . exit ( 1 )
staticfied = staticfy ( html_file , args = args ) . encode ( 'utf-8' )
file_ops ( staticfied , args = args ) |
def main ( gi , ranges ) :
"""Print the features of the genbank entry given by gi . If ranges is
non - emtpy , only print features that include the ranges .
gi : either a hit from a BLAST record , in the form
' gi | 63148399 | gb | DQ011818.1 | ' or a gi number ( 63148399 in this example ) .
ranges : a possibly empty list of ranges to print information for . Each
range is a non - descending ( start , end ) pair of integers .""" | # TODO : Make it so we can pass a ' db ' argument to getSequence .
record = getSequence ( gi )
if record is None :
print ( "Looks like you're offline." )
sys . exit ( 3 )
else :
printed = set ( )
if ranges :
for ( start , end ) in ranges :
for index , feature in enumerate ( record . features ) :
if ( start < int ( feature . location . end ) and end > int ( feature . location . start ) and index not in printed ) :
print ( feature )
printed . add ( index )
else : # Print all features .
for feature in record . features :
print ( feature ) |
def _split_variables ( variables ) :
"""Split variables into always passed ( std ) and specified ( file ) .
We always pass some variables to each step but need to
explicitly define file and algorithm variables so they can
be linked in as needed .""" | file_vs = [ ]
std_vs = [ ]
for v in variables :
cur_type = v [ "type" ]
while isinstance ( cur_type , dict ) :
if "items" in cur_type :
cur_type = cur_type [ "items" ]
else :
cur_type = cur_type [ "type" ]
if ( cur_type in [ "File" , "null" , "record" ] or ( isinstance ( cur_type , ( list , tuple ) ) and ( "File" in cur_type or { 'items' : 'File' , 'type' : 'array' } in cur_type ) ) ) :
file_vs . append ( v )
elif v [ "id" ] in ALWAYS_AVAILABLE :
std_vs . append ( v )
else :
file_vs . append ( v )
return file_vs , std_vs |
def update_metric ( self , metric , labels , pre_sliced = False ) :
"""Update evaluation metric with label and current outputs .""" | for current_exec , ( texec , islice ) in enumerate ( zip ( self . train_execs , self . slices ) ) :
if not pre_sliced :
labels_slice = [ label [ islice ] for label in labels ]
else :
labels_slice = labels [ current_exec ]
metric . update ( labels_slice , texec . outputs ) |
def sent_tokenize ( self , text , ** kwargs ) :
"""NLTK ' s sentence tokenizer ( currently PunktSentenceTokenizer ) .
Uses an unsupervised algorithm to build a model for abbreviation
words , collocations , and words that start sentences , then uses
that to find sentence boundaries .""" | sentences = self . sent_tok . tokenize ( text , realign_boundaries = kwargs . get ( "realign_boundaries" , True ) )
return sentences |
def cli_certify_complex_dict ( config , schema , key_certifier , value_certifier , allow_extra , include_collections , value , ) :
"""Console script for certify _ dict .""" | schema = load_json_pickle ( schema , config )
key_certifier = create_certifier ( load_json_pickle ( key_certifier , config ) )
value_certifier = create_certifier ( load_json_pickle ( value_certifier , config ) )
execute_cli_command ( 'dict' , config , lambda x : load_json_pickle ( x , config ) , certify_dict , value , allow_extra = allow_extra , include_collections = include_collections , key_certifier = key_certifier , required = config [ 'required' ] , schema = schema , value_certifier = value_certifier , ) |
def data_and_labels ( self ) :
"""Dataset features and labels in a matrix form for learning .
Also returns sample _ ids in the same order .
Returns
data _ matrix : ndarray
2D array of shape [ num _ samples , num _ features ]
with features corresponding row - wise to sample _ ids
labels : ndarray
Array of numeric labels for each sample corresponding row - wise to sample _ ids
sample _ ids : list
List of sample ids""" | sample_ids = np . array ( self . keys )
label_dict = self . labels
matrix = np . full ( [ self . num_samples , self . num_features ] , np . nan )
labels = np . full ( [ self . num_samples , 1 ] , np . nan )
for ix , sample in enumerate ( sample_ids ) :
matrix [ ix , : ] = self . __data [ sample ]
labels [ ix ] = label_dict [ sample ]
return matrix , np . ravel ( labels ) , sample_ids |
def get_num_shards ( num_samples : int , samples_per_shard : int , min_num_shards : int ) -> int :
"""Returns the number of shards .
: param num _ samples : Number of training data samples .
: param samples _ per _ shard : Samples per shard .
: param min _ num _ shards : Minimum number of shards .
: return : Number of shards .""" | return max ( int ( math . ceil ( num_samples / samples_per_shard ) ) , min_num_shards ) |
def PopupGetFile ( message , default_path = '' , default_extension = '' , save_as = False , file_types = ( ( "ALL Files" , "*.*" ) , ) , no_window = False , size = ( None , None ) , button_color = None , background_color = None , text_color = None , icon = DEFAULT_WINDOW_ICON , font = None , no_titlebar = False , grab_anywhere = False , keep_on_top = False , location = ( None , None ) , initial_folder = None ) :
"""Display popup with text entry field and browse button . Browse for file
: param message :
: param default _ path :
: param default _ extension :
: param save _ as :
: param file _ types :
: param no _ window :
: param size :
: param button _ color :
: param background _ color :
: param text _ color :
: param icon :
: param font :
: param no _ titlebar :
: param grab _ anywhere :
: param keep _ on _ top :
: param location :
: return : string representing the path chosen , None if cancelled or window closed with X""" | global _my_windows
if no_window :
if _my_windows . NumOpenWindows :
root = tk . Toplevel ( )
else :
root = tk . Tk ( )
try :
root . attributes ( '-alpha' , 0 )
# hide window while building it . makes for smoother ' paint '
except :
pass
if save_as :
filename = tk . filedialog . asksaveasfilename ( filetypes = file_types , defaultextension = default_extension )
# show the ' get file ' dialog box
else :
filename = tk . filedialog . askopenfilename ( filetypes = file_types , defaultextension = default_extension )
# show the ' get file ' dialog box
root . destroy ( )
return filename
browse_button = SaveAs ( file_types = file_types , initial_folder = initial_folder ) if save_as else FileBrowse ( file_types = file_types , initial_folder = initial_folder )
layout = [ [ Text ( message , auto_size_text = True , text_color = text_color , background_color = background_color ) ] , [ InputText ( default_text = default_path , size = size ) , browse_button ] , [ Button ( 'Ok' , size = ( 6 , 1 ) , bind_return_key = True ) , Button ( 'Cancel' , size = ( 6 , 1 ) ) ] ]
window = Window ( title = message , icon = icon , auto_size_text = True , button_color = button_color , font = font , background_color = background_color , no_titlebar = no_titlebar , grab_anywhere = grab_anywhere , keep_on_top = keep_on_top , location = location )
( button , input_values ) = window . Layout ( layout ) . Read ( )
window . Close ( )
if button != 'Ok' :
return None
else :
path = input_values [ 0 ]
return path |
def _send_packet ( self , sid , pkt ) :
"""Send a Socket . IO packet to a client .""" | encoded_packet = pkt . encode ( )
if isinstance ( encoded_packet , list ) :
binary = False
for ep in encoded_packet :
self . eio . send ( sid , ep , binary = binary )
binary = True
else :
self . eio . send ( sid , encoded_packet , binary = False ) |
def pmap ( func , args , processes = None , callback = lambda * _ , ** __ : None , ** kwargs ) :
"""pmap ( func , args , processes = None , callback = do _ nothing , * * kwargs )
Parallel equivalent of ` ` map ( func , args ) ` ` , with the additional ability of
providing keyword arguments to func , and a callback function which is
applied to each element in the returned list . Unlike map , the output is a
non - lazy list . If * processes * is 1 , no thread pool is used .
* * Parameters * *
func : function
The function to map .
args : iterable
The arguments to map * func * over .
processes : int or None , optional
The number of processes in the thread pool . If only 1 , no thread pool
is used to avoid useless overhead . If None , the number is chosen based
on your system by : class : ` multiprocessing . Pool ` ( default None ) .
callback : function , optional
Function to call on the return value of ` ` func ( arg ) ` ` for each * arg *
in * args * ( default do _ nothing ) .
kwargs : dict
Extra keyword arguments are unpacked in each call of * func * .
* * Returns * *
results : list
A list equivalent to ` ` [ func ( x , * * kwargs ) for x in args ] ` ` .""" | if processes is 1 :
results = [ ]
for arg in args :
result = func ( arg , ** kwargs )
results . append ( result )
callback ( result )
return results
else :
with Pool ( ) if processes is None else Pool ( processes ) as p :
results = [ p . apply_async ( func , ( arg , ) , kwargs , callback ) for arg in args ]
return [ result . get ( ) for result in results ] |
def extender ( path = None , cache = None ) :
"""A context that temporarily extends sys . path and reverts it after the
context is complete .""" | old_path = sys . path [ : ]
extend ( path , cache = None )
try :
yield
finally :
sys . path = old_path |
def check_for_completion ( self ) :
"""Check once for completion of the job and return completion status and
result if it has completed .
If the job completed in error , an : exc : ` ~ zhmcclient . HTTPError `
exception is raised .
Returns :
: A tuple ( status , result ) with :
* status ( : term : ` string ` ) : Completion status of the job , as
returned in the ` ` status ` ` field of the response body of the
" Query Job Status " HMC operation , as follows :
* ` ` " complete " ` ` : Job completed ( successfully ) .
* any other value : Job is not yet complete .
* result ( : term : ` json object ` or ` None ` ) : ` None ` for incomplete
jobs . For completed jobs , the result of the original asynchronous
operation that was performed by the job , from the ` ` job - results ` `
field of the response body of the " Query Job Status " HMC
operation . That result is a : term : ` json object ` as described
for the asynchronous operation , or ` None ` if the operation has no
result .
Raises :
: exc : ` ~ zhmcclient . HTTPError ` : The job completed in error , or the job
status cannot be retrieved , or the job cannot be deleted .
: exc : ` ~ zhmcclient . ParseError `
: exc : ` ~ zhmcclient . ClientAuthError `
: exc : ` ~ zhmcclient . ServerAuthError `
: exc : ` ~ zhmcclient . ConnectionError `""" | job_result_obj = self . session . get ( self . uri )
job_status = job_result_obj [ 'status' ]
if job_status == 'complete' :
self . session . delete ( self . uri )
op_status_code = job_result_obj [ 'job-status-code' ]
if op_status_code in ( 200 , 201 ) :
op_result_obj = job_result_obj . get ( 'job-results' , None )
elif op_status_code == 204 : # No content
op_result_obj = None
else :
error_result_obj = job_result_obj . get ( 'job-results' , None )
if not error_result_obj :
message = None
elif 'message' in error_result_obj :
message = error_result_obj [ 'message' ]
elif 'error' in error_result_obj :
message = error_result_obj [ 'error' ]
else :
message = None
error_obj = { 'http-status' : op_status_code , 'reason' : job_result_obj [ 'job-reason-code' ] , 'message' : message , 'request-method' : self . op_method , 'request-uri' : self . op_uri , }
raise HTTPError ( error_obj )
else :
op_result_obj = None
return job_status , op_result_obj |
def _fw_rule_update ( self , drvr_name , data ) :
"""Firewall Rule update routine .
Function to decode the updated rules and call routines that
in turn calls the device routines to update rules .""" | LOG . debug ( "FW Update %s" , data )
tenant_id = data . get ( 'firewall_rule' ) . get ( 'tenant_id' )
fw_rule = data . get ( 'firewall_rule' )
rule = self . _fw_rule_decode_store ( data )
rule_id = fw_rule . get ( 'id' )
if tenant_id not in self . fwid_attr or not ( self . fwid_attr [ tenant_id ] . is_rule_present ( rule_id ) ) :
LOG . error ( "Incorrect update info for tenant %s" , tenant_id )
return
self . fwid_attr [ tenant_id ] . rule_update ( rule_id , rule )
self . _check_update_fw ( tenant_id , drvr_name ) |
def pretty_xml ( data ) :
"""Return a pretty formated xml""" | parsed_string = minidom . parseString ( data . decode ( 'utf-8' ) )
return parsed_string . toprettyxml ( indent = '\t' , encoding = 'utf-8' ) |
def get_or_create_home_repo ( reset = False ) :
"""Check to make sure we never operate with a non - existing local repo""" | dosetup = True
if os . path . exists ( ONTOSPY_LOCAL ) :
dosetup = False
if reset :
import shutil
var = input ( "Delete the local library and all of its contents? (y/n) " )
if var == "y" :
shutil . rmtree ( ONTOSPY_LOCAL )
dosetup = True
else :
pass
if dosetup or not ( os . path . exists ( ONTOSPY_LOCAL ) ) :
os . mkdir ( ONTOSPY_LOCAL )
if dosetup or not ( os . path . exists ( ONTOSPY_LOCAL_CACHE ) ) : # print " HERE "
os . makedirs ( ONTOSPY_LOCAL_CACHE )
if dosetup or not ( os . path . exists ( ONTOSPY_LOCAL_VIZ ) ) :
os . mkdir ( ONTOSPY_LOCAL_VIZ )
if dosetup or not ( os . path . exists ( ONTOSPY_LIBRARY_DEFAULT ) ) :
os . mkdir ( ONTOSPY_LIBRARY_DEFAULT )
LIBRARY_HOME = get_home_location ( )
# from init file , or default
# check that the local library folder exists , otherwise prompt user to create it
if not ( os . path . exists ( LIBRARY_HOME ) ) :
printDebug ( "Warning: the local library at '%s' has been deleted or is not accessible anymore." % LIBRARY_HOME , "important" )
printDebug ( "Please reset the local library by running 'ontospy-manager -u <a-valid-path>'" , "comment" )
raise SystemExit ( 1 )
if dosetup :
print ( Fore . GREEN + "Setup successfull: local library created at <%s>" % LIBRARY_HOME + Style . RESET_ALL )
# else :
# print ( Style . DIM + " Local library : < % s > " % LIBRARY _ HOME + Style . RESET _ ALL )
return True |
def close_file ( self ) :
"""Closes the editor file .
: return : Method success .
: rtype : bool""" | if not self . is_modified ( ) :
LOGGER . debug ( "> Closing '{0}' file." . format ( self . __file ) )
self . file_closed . emit ( )
return True
choice = message_box . message_box ( "Warning" , "Warning" , "'{0}' document has been modified!\nWould you like to save your changes?" . format ( self . get_file_short_name ( ) ) , buttons = QMessageBox . Save | QMessageBox . Discard | QMessageBox . Cancel )
if choice == QMessageBox . Save :
if self . save_file ( ) :
LOGGER . debug ( "> Closing '{0}' file." . format ( self . __file ) )
return True
elif choice == QMessageBox . Discard :
LOGGER . debug ( "> Discarding '{0}' file." . format ( self . __file ) )
self . file_closed . emit ( )
return True
else :
return False |
def _create_update_tracking_related_event ( instance ) :
"""Create a TrackingEvent and TrackedFieldModification for an UPDATE event
for each related model .""" | events = { }
# Create a dict mapping related model field to modified fields
for field , related_fields in instance . _tracked_related_fields . items ( ) :
if not isinstance ( instance . _meta . get_field ( field ) , ManyToManyField ) :
if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : # Compare pk
value = getattr ( instance , '{0}_id' . format ( field ) )
else :
value = getattr ( instance , field )
if instance . _original_fields [ field ] != value :
for related_field in related_fields :
events . setdefault ( related_field , [ ] ) . append ( field )
# Create the events from the events dict
for related_field , fields in events . items ( ) :
try :
related_instances = getattr ( instance , related_field [ 1 ] )
except ObjectDoesNotExist :
continue
# FIXME : isinstance ( related _ instances , RelatedManager ? )
if hasattr ( related_instances , 'all' ) :
related_instances = related_instances . all ( )
else :
related_instances = [ related_instances ]
for related_instance in related_instances :
event = _create_event ( related_instance , UPDATE )
for field in fields :
fieldname = '{0}__{1}' . format ( related_field [ 0 ] , field )
_create_tracked_field ( event , instance , field , fieldname = fieldname ) |
def is_expired_token ( self , client ) :
"""For a given client will test whether or not the token
has expired .
This is for testing a client object and does not look up
from client _ id . You can use _ get _ client _ from _ cache ( ) to
lookup a client from client _ id .""" | if 'expires' not in client :
return True
expires = dateutil . parser . parse ( client [ 'expires' ] )
if expires < datetime . datetime . now ( ) :
return True
return False |
def utf8 ( value ) :
r"""Check that the string is UTF - 8 . Returns an encode bytestring .
> > > utf8 ( b ' \ xe0 ' ) # doctest : + ELLIPSIS
Traceback ( most recent call last ) :
ValueError : Not UTF - 8 : . . .""" | try :
if isinstance ( value , bytes ) :
return value . decode ( 'utf-8' )
else :
return value
except Exception :
raise ValueError ( 'Not UTF-8: %r' % value ) |
def fetch_job ( config ) :
'''Fetch any available work from the OpenSubmit server and
return an according job object .
Returns None if no work is available .
Errors are reported by this function directly .''' | url = "%s/jobs/?Secret=%s&UUID=%s" % ( config . get ( "Server" , "url" ) , config . get ( "Server" , "secret" ) , config . get ( "Server" , "uuid" ) )
try : # Fetch information from server
result = urlopen ( url )
headers = result . info ( )
logger . debug ( "Raw job data: " + str ( result . headers ) . replace ( '\n' , ', ' ) )
if not compatible_api_version ( headers [ "APIVersion" ] ) : # No proper reporting possible , so only logging .
logger . error ( "Incompatible API version. Please update OpenSubmit." )
return None
if headers [ "Action" ] == "get_config" : # The server does not know us ,
# so it demands registration before hand .
logger . info ( "Machine unknown on server, sending registration ..." )
send_hostinfo ( config )
return None
# Create job object with information we got
from . job import Job
job = Job ( config )
job . submitter_name = headers [ 'SubmitterName' ]
job . author_names = headers [ 'AuthorNames' ]
job . submitter_studyprogram = headers [ 'SubmitterStudyProgram' ]
job . course = headers [ 'Course' ]
job . assignment = headers [ 'Assignment' ]
job . action = headers [ "Action" ]
job . file_id = headers [ "SubmissionFileId" ]
job . sub_id = headers [ "SubmissionId" ]
job . file_name = headers [ "SubmissionOriginalFilename" ]
job . submitter_student_id = headers [ "SubmitterStudentId" ]
if "Timeout" in headers :
job . timeout = int ( headers [ "Timeout" ] )
if "PostRunValidation" in headers : # Ignore server - given host + port and use the configured one instead
# This fixes problems with the arbitrary Django LiveServer port choice
# It would be better to return relative URLs only for this property ,
# but this is a Bernhard - incompatible API change
from urllib . parse import urlparse
relative_path = urlparse ( headers [ "PostRunValidation" ] ) . path
job . validator_url = config . get ( "Server" , "url" ) + relative_path
job . working_dir = create_working_dir ( config , job . sub_id )
# Store submission in working directory
submission_fname = job . working_dir + job . file_name
with open ( submission_fname , 'wb' ) as target :
target . write ( result . read ( ) )
assert ( os . path . exists ( submission_fname ) )
# Store validator package in working directory
validator_fname = job . working_dir + 'download.validator'
fetch ( job . validator_url , validator_fname )
try :
prepare_working_directory ( job , submission_fname , validator_fname )
except JobException as e :
job . send_fail_result ( e . info_student , e . info_tutor )
return None
logger . debug ( "Got job: " + str ( job ) )
return job
except HTTPError as e :
if e . code == 404 :
logger . debug ( "Nothing to do." )
return None
except URLError as e :
logger . error ( "Error while contacting {0}: {1}" . format ( url , str ( e ) ) )
return None |
def apply ( self , word , ctx = None ) :
"""ignore ctx information right now""" | return Sequential . in_sequence ( word , AdjacentVowels . uyir_letters , AdjacentVowels . reason ) |
def split_for_transport ( orig_pkt , transport_proto ) :
"""Split an IP ( v6 ) packet in the correct location to insert an ESP or AH
header .
@ param orig _ pkt : the packet to split . Must be an IP or IPv6 packet
@ param transport _ proto : the IPsec protocol number that will be inserted
at the split position .
@ return : a tuple ( header , nh , payload ) where nh is the protocol number of
payload .""" | # force resolution of default fields to avoid padding errors
header = orig_pkt . __class__ ( raw ( orig_pkt ) )
next_hdr = header . payload
nh = None
if header . version == 4 :
nh = header . proto
header . proto = transport_proto
header . remove_payload ( )
del header . chksum
del header . len
return header , nh , next_hdr
else :
found_rt_hdr = False
prev = header
# Since the RFC 4302 is vague about where the ESP / AH headers should be
# inserted in IPv6 , I chose to follow the linux implementation .
while isinstance ( next_hdr , ( IPv6ExtHdrHopByHop , IPv6ExtHdrRouting , IPv6ExtHdrDestOpt ) ) : # noqa : E501
if isinstance ( next_hdr , IPv6ExtHdrHopByHop ) :
pass
if isinstance ( next_hdr , IPv6ExtHdrRouting ) :
found_rt_hdr = True
elif isinstance ( next_hdr , IPv6ExtHdrDestOpt ) and found_rt_hdr :
break
prev = next_hdr
next_hdr = next_hdr . payload
nh = prev . nh
prev . nh = transport_proto
prev . remove_payload ( )
del header . plen
return header , nh , next_hdr |
def read_config ( contents ) :
"""Reads pylintrc config into native ConfigParser object .
Args :
contents ( str ) : The contents of the file containing the INI config .
Returns :
ConfigParser . ConfigParser : The parsed configuration .""" | file_obj = io . StringIO ( contents )
config = six . moves . configparser . ConfigParser ( )
config . readfp ( file_obj )
return config |
def _calc_q_h0 ( n , x , h , nt , n_jobs = 1 , verbose = 0 , random_state = None ) :
"""Calculate q under the null hypothesis of whiteness .""" | rng = check_random_state ( random_state )
par , func = parallel_loop ( _calc_q_statistic , n_jobs , verbose )
q = par ( func ( rng . permutation ( x . T ) . T , h , nt ) for _ in range ( n ) )
return np . array ( q ) |
def timesince ( d , now = None , pos = True , flag = False ) :
"""pos means calculate which direction , pos = True , now - d , pos = False , d - now
flag means return value type , True will return since , message and Flase return message
> > > d = datetime . datetime ( 2009 , 10 , 1 , 12 , 23 , 19)
> > > timesince ( d , d , True )
> > > now = datetime . datetime ( 2009 , 10 , 1 , 12 , 24 , 19)
> > > timesince ( d , now , True )
u ' 1 minute ago '
> > > now = datetime . datetime ( 2009 , 10 , 1 , 12 , 24 , 30)
> > > timesince ( d , now , True )
u ' 1 minute ago '
> > > now = datetime . datetime ( 2009 , 9 , 28 , 12 , 24 , 30)
> > > timesince ( d , now , True )
u ' 2 days , 23 hours later '
> > > now = datetime . datetime ( 2009 , 10 , 3 , 12 , 24 , 30)
> > > timesince ( d , now , True )
u ' 2 days ago '""" | if not d :
if flag :
return 0 , ''
else :
return ''
chunks = ( ( 60 * 60 * 24 * 365 , lambda n : ungettext ( 'year' , 'years' , n ) ) , ( 60 * 60 * 24 * 30 , lambda n : ungettext ( 'month' , 'months' , n ) ) , ( 60 * 60 * 24 * 7 , lambda n : ungettext ( 'week' , 'weeks' , n ) ) , ( 60 * 60 * 24 , lambda n : ungettext ( 'day' , 'days' , n ) ) , ( 60 * 60 , lambda n : ungettext ( 'hour' , 'hours' , n ) ) , ( 60 , lambda n : ungettext ( 'minute' , 'minutes' , n ) ) )
if not now :
now = date . now ( )
else :
now = date . to_datetime ( now )
d = date . to_datetime ( d )
delta = now - ( d - datetime . timedelta ( 0 , 0 , d . microsecond ) )
oldsince = since = delta . days * 24 * 60 * 60 + delta . seconds
suffix = ''
if pos :
if since >= 0 :
suffix = ugettext ( ' ago' )
elif since < 0 :
suffix = ugettext ( ' later' )
since *= - 1
for i , ( seconds , name ) in enumerate ( chunks ) :
count = since // seconds
if count != 0 :
break
s = ( '%(number)d %(type)s' ) % { 'number' : count , 'type' : name ( count ) }
if i + 1 < len ( chunks ) : # Now get the second item
seconds2 , name2 = chunks [ i + 1 ]
count2 = ( since - ( seconds * count ) ) // seconds2
if count2 != 0 :
s += ( ', %(number)d %(type)s' ) % { 'number' : count2 , 'type' : name2 ( count2 ) }
# if flag = = True , then return twe elements ( since , message )
if flag :
return oldsince , s + suffix
else :
return s + suffix |
def qdc ( self ) :
'''Return the qdc tag for the term''' | start_tag = '' . join ( ( '<' , self . get_term_display ( ) . lower ( ) , ' q="' , self . qualifier , '">' , ) ) if self . qualifier else '' . join ( ( '<' , self . get_term_display ( ) . lower ( ) , '>' , ) )
qdc = '' . join ( ( start_tag , saxutils . escape ( self . content ) , '</' , self . get_term_display ( ) . lower ( ) , '>' , ) )
return mark_safe ( qdc ) |
def tpl_single_send ( self , param , must = [ APIKEY , MOBILE , TPL_ID , TPL_VALUE ] ) :
'''指定模板单发 only v2 deprecated
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
mobile String 是
接收的手机号 ( 针对国际短信 , mobile参数会自动格式化到E . 164格式 , 可能会造成传入mobile参数跟后续的状态报告中的号码不一致 。 E . 164格式说明 , 参见 :
https : / / en . wikipedia . org / wiki / E . 164 ) 15205201314
tpl _ id Long 是 模板id 1
tpl _ value String 是 变量名和变量值对 。 请先对您的变量名和变量值分别进行urlencode再传递 。 使用参考 : 代码示例 。
注 : 变量名和变量值都不能为空 模板 : 【 # company # 】 您的验证码是 # code # 。 最终发送结果 : 【 云片网 】 您的验证码是1234。
tplvalue = urlencode ( " # code # " ) + " = " + urlencode ( " 1234 " ) + " & amp ; " +
urlencode ( " # company # " ) + " = " + urlencode ( " 云片网 " ) ; 若您直接发送报文请求则使用下面这种形式
tplvalue = urlencode ( urlencode ( " # code # " ) + " = " + urlencode ( " 1234 " ) + " & amp ; " +
urlencode ( " # company # " ) + " = " + urlencode ( " 云片网 " ) ) ;
extend String 否 扩展号 。 默认不开放 , 如有需要请联系客服申请 001
uid String 否 用户自定义唯一id 。 最大长度不超过256的字符串 。 默认不开放 , 如有需要请联系客服申请 10001
Args :
param :
Results :
Result''' | r = self . verify_param ( param , must )
if not r . is_succ ( ) :
return r
h = CommonResultHandler ( lambda rsp : { VERSION_V2 : rsp } [ self . version ( ) ] )
return self . path ( 'tpl_single_send.json' ) . post ( param , h , r ) |
def accept_reject_or_neither ( self , url , parent_page = None ) :
'''Returns ` True ` ( accepted ) , ` False ` ( rejected ) , or ` None ` ( no decision ) .
` None ` usually means rejected , unless ` max _ hops _ off ` comes into play .''' | if not isinstance ( url , urlcanon . ParsedUrl ) :
url = urlcanon . semantic ( url )
if not url . scheme in ( b'http' , b'https' ) : # XXX doesn ' t belong here maybe ( where ? worker ignores unknown
# schemes ? )
return False
try_parent_urls = [ ]
if parent_page :
try_parent_urls . append ( urlcanon . semantic ( parent_page . url ) )
if parent_page . redirect_url :
try_parent_urls . append ( urlcanon . semantic ( parent_page . redirect_url ) )
# enforce max _ hops
if ( parent_page and "max_hops" in self . scope and parent_page . hops_from_seed >= self . scope [ "max_hops" ] ) :
return False
# enforce reject rules
if "blocks" in self . scope :
for block_rule in self . scope [ "blocks" ] :
rule = urlcanon . MatchRule ( ** block_rule )
if try_parent_urls :
for parent_url in try_parent_urls :
if rule . applies ( url , parent_url ) :
return False
else :
if rule . applies ( url ) :
return False
# honor accept rules
for accept_rule in self . scope [ "accepts" ] :
rule = urlcanon . MatchRule ( ** accept_rule )
if try_parent_urls :
for parent_url in try_parent_urls :
if rule . applies ( url , parent_url ) :
return True
else :
if rule . applies ( url ) :
return True
# no decision if we reach here
return None |
def get_transactions ( self , include_investment = False ) :
"""Returns the transaction data as a Pandas DataFrame .""" | assert_pd ( )
s = StringIO ( self . get_transactions_csv ( include_investment = include_investment ) )
s . seek ( 0 )
df = pd . read_csv ( s , parse_dates = [ 'Date' ] )
df . columns = [ c . lower ( ) . replace ( ' ' , '_' ) for c in df . columns ]
df . category = ( df . category . str . lower ( ) . replace ( 'uncategorized' , pd . np . nan ) )
return df |
def config_add ( self , key , value , ** kwargs ) :
"""Add a value to a key .
Returns a list of warnings Conda may have emitted .""" | cmd_list = [ 'config' , '--add' , key , value ]
cmd_list . extend ( self . _setup_config_from_kwargs ( kwargs ) )
return self . _call_and_parse ( cmd_list , abspath = kwargs . get ( 'abspath' , True ) , callback = lambda o , e : o . get ( 'warnings' , [ ] ) ) |
def linear_deform ( template , displacement , out = None ) :
"""Linearized deformation of a template with a displacement field .
The function maps a given template ` ` I ` ` and a given displacement
field ` ` v ` ` to the new function ` ` x - - > I ( x + v ( x ) ) ` ` .
Parameters
template : ` DiscreteLpElement `
Template to be deformed by a displacement field .
displacement : element of power space of ` ` template . space ` `
Vector field ( displacement field ) used to deform the
template .
out : ` numpy . ndarray ` , optional
Array to which the function values of the deformed template
are written . It must have the same shape as ` ` template ` ` and
a data type compatible with ` ` template . dtype ` ` .
Returns
deformed _ template : ` numpy . ndarray `
Function values of the deformed template . If ` ` out ` ` was given ,
the returned object is a reference to it .
Examples
Create a simple 1D template to initialize the operator and
apply it to a displacement field . Where the displacement is zero ,
the output value is the same as the input value .
In the 4 - th point , the value is taken from 0.2 ( one cell ) to the
left , i . e . 1.0.
> > > space = odl . uniform _ discr ( 0 , 1 , 5)
> > > disp _ field _ space = space . tangent _ bundle
> > > template = space . element ( [ 0 , 0 , 1 , 0 , 0 ] )
> > > displacement _ field = disp _ field _ space . element ( [ [ 0 , 0 , 0 , - 0.2 , 0 ] ] )
> > > linear _ deform ( template , displacement _ field )
array ( [ 0 . , 0 . , 1 . , 1 . , 0 . ] )
The result depends on the chosen interpolation . With ' linear '
interpolation and an offset equal to half the distance between two
points , 0.1 , one gets the mean of the values .
> > > space = odl . uniform _ discr ( 0 , 1 , 5 , interp = ' linear ' )
> > > disp _ field _ space = space . tangent _ bundle
> > > template = space . element ( [ 0 , 0 , 1 , 0 , 0 ] )
> > > displacement _ field = disp _ field _ space . element ( [ [ 0 , 0 , 0 , - 0.1 , 0 ] ] )
> > > linear _ deform ( template , displacement _ field )
array ( [ 0 . , 0 . , 1 . , 0.5 , 0 . ] )""" | image_pts = template . space . points ( )
for i , vi in enumerate ( displacement ) :
image_pts [ : , i ] += vi . asarray ( ) . ravel ( )
values = template . interpolation ( image_pts . T , out = out , bounds_check = False )
return values . reshape ( template . space . shape ) |
def state_reachable ( subsystem ) :
"""Return whether a state can be reached according to the network ' s TPM .""" | # If there is a row ` r ` in the TPM such that all entries of ` r - state ` are
# between - 1 and 1 , then the given state has a nonzero probability of being
# reached from some state .
# First we take the submatrix of the conditioned TPM that corresponds to
# the nodes that are actually in the subsystem . . .
tpm = subsystem . tpm [ ... , subsystem . node_indices ]
# Then we do the subtraction and test .
test = tpm - np . array ( subsystem . proper_state )
if not np . any ( np . logical_and ( - 1 < test , test < 1 ) . all ( - 1 ) ) :
raise exceptions . StateUnreachableError ( subsystem . state ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.