signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def cli ( env , limit , closed = False , get_all = False ) :
"""Invoices and all that mess""" | manager = AccountManager ( env . client )
invoices = manager . get_invoices ( limit , closed , get_all )
table = formatting . Table ( [ "Id" , "Created" , "Type" , "Status" , "Starting Balance" , "Ending Balance" , "Invoice Amount" , "Items" ] )
table . align [ 'Starting Balance' ] = 'l'
table . align [ 'Ending Balance' ] = 'l'
table . align [ 'Invoice Amount' ] = 'l'
table . align [ 'Items' ] = 'l'
if isinstance ( invoices , dict ) :
invoices = [ invoices ]
for invoice in invoices :
table . add_row ( [ invoice . get ( 'id' ) , utils . clean_time ( invoice . get ( 'createDate' ) , out_format = "%Y-%m-%d" ) , invoice . get ( 'typeCode' ) , invoice . get ( 'statusCode' ) , invoice . get ( 'startingBalance' ) , invoice . get ( 'endingBalance' ) , invoice . get ( 'invoiceTotalAmount' ) , invoice . get ( 'itemCount' ) ] )
env . fout ( table ) |
def add_node_element ( self , node ) :
"""Add a ( syntax category ) < node > to the document graph .
Parameters
node : etree . Element
etree representation of a < node > element
A < node > describes an element of a syntax tree .
The root < node > element does not have a parent attribute ,
while non - root nodes do
Example
< node xml : id = " s1_505 " cat = " SIMPX " func = " - - " >
< node xml : id = " s1_501 " cat = " LK " func = " - " parent = " s1_505 " >
# this is the root of the syntax tree of the sentence , but
# it is not the root node of the sentence , since there might
# be nodes outside of the tree which are children of the
# sentence root node ( e . g . < word > elements representing a
# quotation mark )""" | node_id = self . get_element_id ( node )
if 'parent' in node . attrib :
parent_id = self . get_parent_id ( node )
else : # < node > is the root of the syntax tree of a sentence ,
# but it might be embedded in a < edu > or < edu - range > .
# we want to attach it directly to the < sentence > element
parent_id = self . get_sentence_id ( node )
self . add_node ( node_id , layers = { self . ns , self . ns + ':syntax' } , attr_dict = self . element_attribs_to_dict ( node ) , label = node . attrib [ 'cat' ] )
self . add_edge ( parent_id , node_id , edge_type = dg . EdgeTypes . dominance_relation ) |
def randpos ( self ) :
'''random initial position''' | self . setpos ( gen_settings . home_lat , gen_settings . home_lon )
self . move ( random . uniform ( 0 , 360 ) , random . uniform ( 0 , gen_settings . region_width ) ) |
def barycentric_coords ( coords , simplex ) :
"""Converts a list of coordinates to barycentric coordinates , given a
simplex with d + 1 points . Only works for d > = 2.
Args :
coords : list of n coords to transform , shape should be ( n , d )
simplex : list of coordinates that form the simplex , shape should be
( d + 1 , d )
Returns :
a LIST of barycentric coordinates ( even if the original input was 1d )""" | coords = np . atleast_2d ( coords )
t = np . transpose ( simplex [ : - 1 , : ] ) - np . transpose ( simplex [ - 1 , : ] ) [ : , None ]
all_but_one = np . transpose ( np . linalg . solve ( t , np . transpose ( coords - simplex [ - 1 ] ) ) )
last_coord = 1 - np . sum ( all_but_one , axis = - 1 ) [ : , None ]
return np . append ( all_but_one , last_coord , axis = - 1 ) |
def users ( self ) :
""": class : ` ~ zhmcclient . UserManager ` : Access to the : term : ` Users < User > ` in
this Console .""" | # We do here some lazy loading .
if not self . _users :
self . _users = UserManager ( self )
return self . _users |
async def create_vm ( self , preset_name , image , flavor , security_groups = None , userdata = None , key_name = None , availability_zone = None , subnets = None ) :
'''Create VM
: arg preset _ name : string
: arg image : string image id
: arg flavor : string flavor id
: arg security _ groups : list
: arg userdata : string
: arg key _ name : string
: arg availability _ zone : string
: arg subnets : list
: returns list Vm objects
@ TODO
1 . returns image id''' | image_id = self . images_map . inv . get ( image )
flavor_id = self . flavors_map . inv . get ( flavor )
spec = { "name" : preset_name , "flavorRef" : flavor_id , "imageRef" : image_id , "security_groups" : [ { "name" : group } for group in security_groups ] , "user_data" : userdata }
if availability_zone is not None :
spec . update ( { "availability_zone" : availability_zone } )
if subnets is not None :
spec . update ( { "networks" : [ { 'uuid' : subnet [ 'net-id' ] } for subnet in subnets ] } )
if userdata is not None :
userdata = userdata . encode ( 'utf-8' )
userdata = base64 . b64encode ( userdata ) . decode ( 'utf-8' )
spec . update ( { "user_data" : userdata } )
result = await self . nova . servers . create ( server = spec )
return result [ "server" ] |
def contains_vasp_input ( dir_name ) :
"""Checks if a directory contains valid VASP input .
Args :
dir _ name :
Directory name to check .
Returns :
True if directory contains all four VASP input files ( INCAR , POSCAR ,
KPOINTS and POTCAR ) .""" | for f in [ "INCAR" , "POSCAR" , "POTCAR" , "KPOINTS" ] :
if not os . path . exists ( os . path . join ( dir_name , f ) ) and not os . path . exists ( os . path . join ( dir_name , f + ".orig" ) ) :
return False
return True |
def setEditor ( self , name ) :
"""Sets the editor class for this Stimulus""" | editor = get_stimulus_editor ( name )
self . editor = editor
self . _stim . setStimType ( name ) |
def get_suffixes ( arr ) :
"""Returns all possible suffixes of an array ( lazy evaluated )
Args :
arr : input array
Returns :
Array of all possible suffixes ( as tuples )""" | arr = tuple ( arr )
return [ arr ]
return ( arr [ i : ] for i in range ( len ( arr ) ) ) |
def replace_filehandler ( logname , new_file , level = None , frmt = None ) :
"""This utility function will remove a previous Logger FileHandler , if one
exists , and add a new filehandler .
Parameters :
logname
The name of the log to reconfigure , ' openaccess _ epub ' for example
new _ file
The file location for the new FileHandler
level
Optional . Level of FileHandler logging , if not used then the new
FileHandler will have the same level as the old . Pass in name strings ,
' INFO ' for example
frmt
Optional string format of Formatter for the FileHandler , if not used
then the new FileHandler will inherit the Formatter of the old , pass
in format strings , ' % ( message ) s ' for example
It is best practice to use the optional level and frmt arguments to account
for the case where a previous FileHandler does not exist . In the case that
they are not used and a previous FileHandler is not found , then the level
will be set logging . DEBUG and the frmt will be set to
openaccess _ epub . utils . logs . STANDARD _ FORMAT as a matter of safety .""" | # Call up the Logger to get reconfigured
log = logging . getLogger ( logname )
# Set up defaults and whether explicit for level
if level is not None :
level = get_level ( level )
explicit_level = True
else :
level = logging . DEBUG
explicit_level = False
# Set up defaults and whether explicit for frmt
if frmt is not None :
frmt = logging . Formatter ( frmt )
explicit_frmt = True
else :
frmt = logging . Formatter ( STANDARD_FORMAT )
explicit_frmt = False
# Look for a FileHandler to replace , set level and frmt if not explicit
old_filehandler = None
for handler in log . handlers : # I think this is an effective method of detecting FileHandler
if type ( handler ) == logging . FileHandler :
old_filehandler = handler
if not explicit_level :
level = handler . level
if not explicit_frmt :
frmt = handler . formatter
break
# Set up the new FileHandler
new_filehandler = logging . FileHandler ( new_file )
new_filehandler . setLevel ( level )
new_filehandler . setFormatter ( frmt )
# Add the new FileHandler
log . addHandler ( new_filehandler )
# Remove the old FileHandler if we found one
if old_filehandler is not None :
old_filehandler . close ( )
log . removeHandler ( old_filehandler ) |
def add_file ( self , file_grp , content = None , ** kwargs ) :
"""Add an output file . Creates an : class : ` OcrdFile ` to pass around and adds that to the
OcrdMets OUTPUT section .""" | log . debug ( 'outputfile file_grp=%s local_filename=%s content=%s' , file_grp , kwargs . get ( 'local_filename' ) , content is not None )
if content is not None and 'local_filename' not in kwargs :
raise Exception ( "'content' was set but no 'local_filename'" )
oldpwd = os . getcwd ( )
try :
os . chdir ( self . directory )
if 'local_filename' in kwargs :
local_filename_dir = kwargs [ 'local_filename' ] . rsplit ( '/' , 1 ) [ 0 ]
if not os . path . isdir ( local_filename_dir ) :
os . makedirs ( local_filename_dir )
if 'url' not in kwargs :
kwargs [ 'url' ] = kwargs [ 'local_filename' ]
# print ( kwargs )
ret = self . mets . add_file ( file_grp , ** kwargs )
if content is not None :
with open ( kwargs [ 'local_filename' ] , 'wb' ) as f :
if isinstance ( content , str ) :
content = bytes ( content , 'utf-8' )
f . write ( content )
finally :
os . chdir ( oldpwd )
return ret |
def scan ( self , t , dt = None , aggfunc = None ) :
"""Returns the spectrum from a specific time .
Parameters
t : float
dt : float""" | idx = ( np . abs ( self . index - t ) ) . argmin ( )
if dt is None : # only take the spectra at the nearest time
mz_abn = self . values [ idx , : ] . copy ( )
else : # sum up all the spectra over a range
en_idx = ( np . abs ( self . index - t - dt ) ) . argmin ( )
idx , en_idx = min ( idx , en_idx ) , max ( idx , en_idx )
if aggfunc is None :
mz_abn = self . values [ idx : en_idx + 1 , : ] . copy ( ) . sum ( axis = 0 )
else :
mz_abn = aggfunc ( self . values [ idx : en_idx + 1 , : ] . copy ( ) )
if isinstance ( mz_abn , scipy . sparse . spmatrix ) :
mz_abn = mz_abn . toarray ( ) [ 0 ]
return Scan ( self . columns , mz_abn ) |
def update_layers_geonode_wm ( service , num_layers = None ) :
"""Update layers for a WorldMap instance .
Sample endpoint : http : / / localhost : 8000/""" | wm_api_url = urlparse . urljoin ( service . url , 'worldmap/api/2.8/layer/?format=json' )
if num_layers :
total = num_layers
else :
response = requests . get ( wm_api_url )
data = json . loads ( response . content )
total = data [ 'meta' ] [ 'total_count' ]
# set srs
# WorldMap supports only 4326 , 900913 , 3857
for crs_code in [ 'EPSG:4326' , 'EPSG:900913' , 'EPSG:3857' ] :
srs , created = SpatialReferenceSystem . objects . get_or_create ( code = crs_code )
service . srs . add ( srs )
service . update_validity ( )
layer_n = 0
limit = 10
for i in range ( 0 , total , limit ) :
try :
url = ( '%s&order_by=-date&offset=%s&limit=%s' % ( wm_api_url , i , limit ) )
LOGGER . debug ( 'Fetching %s' % url )
response = requests . get ( url )
data = json . loads ( response . content )
for row in data [ 'objects' ] :
typename = row [ 'typename' ]
# name = typename . split ( ' : ' ) [ 1]
name = typename
uuid = row [ 'uuid' ]
LOGGER . debug ( 'Updating layer %s' % name )
title = row [ 'title' ]
abstract = row [ 'abstract' ]
bbox = row [ 'bbox' ]
page_url = urlparse . urljoin ( service . url , 'data/%s' % name )
category = ''
if 'topic_category' in row :
category = row [ 'topic_category' ]
username = ''
if 'owner_username' in row :
username = row [ 'owner_username' ]
temporal_extent_start = ''
if 'temporal_extent_start' in row :
temporal_extent_start = row [ 'temporal_extent_start' ]
temporal_extent_end = ''
if 'temporal_extent_end' in row :
temporal_extent_end = row [ 'temporal_extent_end' ]
# we use the geoserver virtual layer getcapabilities for wm endpoint
# TODO we should port make geoserver port configurable some way . . .
# endpoint = urlparse . urljoin ( service . url , ' geoserver / geonode / % s / wms ? ' % name )
endpoint = urlparse . urljoin ( service . url , 'geoserver/wms?' )
endpoint = endpoint . replace ( '8000' , '8080' )
print endpoint
if 'is_public' in row :
is_public = row [ 'is_public' ]
layer , created = Layer . objects . get_or_create ( service = service , catalog = service . catalog , name = name , uuid = uuid )
if created :
LOGGER . debug ( 'Added a new layer in registry: %s, %s' % ( name , uuid ) )
if layer . active :
links = [ [ 'Hypermap:WorldMap' , endpoint ] ]
# update fields
layer . type = 'Hypermap:WorldMap'
layer . title = title
layer . abstract = abstract
layer . is_public = is_public
layer . url = endpoint
layer . page_url = page_url
# category and owner username
layer_wm , created = LayerWM . objects . get_or_create ( layer = layer )
layer_wm . category = category
layer_wm . username = username
layer_wm . temporal_extent_start = temporal_extent_start
layer_wm . temporal_extent_end = temporal_extent_end
layer_wm . save ( )
# bbox [ x0 , y0 , x1 , y1]
# check if it is a valid bbox ( TODO improve this check )
# bbox = bbox . replace ( ' - inf ' , ' None ' )
# bbox = bbox . replace ( ' inf ' , ' None ' )
# if bbox . count ( ' , ' ) = = 3:
# bbox _ list = bbox [ 1 : - 1 ] . split ( ' , ' )
# else :
# bbox _ list = [ None , None , None , None ]
x0 = format_float ( bbox [ 0 ] )
x1 = format_float ( bbox [ 1 ] )
y0 = format_float ( bbox [ 2 ] )
y1 = format_float ( bbox [ 3 ] )
# In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM .
x0 , x1 = flip_coordinates ( x0 , x1 )
y0 , y1 = flip_coordinates ( y0 , y1 )
layer . bbox_x0 = x0
layer . bbox_y0 = y0
layer . bbox_x1 = x1
layer . bbox_y1 = y1
# keywords
keywords = [ ]
for keyword in row [ 'keywords' ] :
keywords . append ( keyword [ 'name' ] )
layer . keywords . all ( ) . delete ( )
for keyword in keywords :
layer . keywords . add ( keyword )
layer . wkt_geometry = bbox2wktpolygon ( [ x0 , y0 , x1 , y1 ] )
layer . xml = create_metadata_record ( identifier = str ( layer . uuid ) , source = endpoint , links = links , format = 'Hypermap:WorldMap' , type = layer . csw_type , relation = service . id_string , title = layer . title , alternative = name , abstract = layer . abstract , keywords = keywords , wkt_geometry = layer . wkt_geometry )
layer . anytext = gen_anytext ( layer . title , layer . abstract , keywords )
layer . save ( )
# dates
add_mined_dates ( layer )
add_metadata_dates_to_layer ( [ layer_wm . temporal_extent_start , layer_wm . temporal_extent_end ] , layer )
layer_n = layer_n + 1
# exits if DEBUG _ SERVICES
LOGGER . debug ( "Updated layer n. %s/%s" % ( layer_n , total ) )
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER :
return
except Exception as err :
LOGGER . error ( 'Error! %s' % err )
# update deleted layers . For now we check the whole set of deleted layers
# we should optimize it if the list will grow
# TODO implement the actions application
url = urlparse . urljoin ( service . url , 'worldmap/api/2.8/actionlayerdelete/?format=json' )
LOGGER . debug ( 'Fetching %s for detecting deleted layers' % url )
try :
response = requests . get ( url )
data = json . loads ( response . content )
for deleted_layer in data [ 'objects' ] :
if Layer . objects . filter ( uuid = deleted_layer [ 'args' ] ) . count ( ) > 0 :
layer = Layer . objects . get ( uuid = deleted_layer [ 'args' ] )
layer . was_deleted = True
layer . save ( )
LOGGER . debug ( 'Layer %s marked as deleted' % layer . uuid )
except Exception as err :
LOGGER . error ( 'Error! %s' % err ) |
def each_step ( graph ) :
"""Returns an iterator that yields each step and it ' s direct
dependencies .""" | steps = graph . topological_sort ( )
steps . reverse ( )
for step in steps :
deps = graph . downstream ( step . name )
yield ( step , deps ) |
def charmap ( prefixed_name ) :
"""Return the character map used for a given font .
Returns
return _ value : dict
The dictionary mapping the icon names to the corresponding unicode character .""" | prefix , name = prefixed_name . split ( '.' )
return _instance ( ) . charmap [ prefix ] [ name ] |
def invalid_type_error ( method_name , arg_name , got_value , expected_type , version = '0.13.0' ) :
"""Raise a CompilationException when an adapter method available to macros
has changed .""" | got_type = type ( got_value )
msg = ( "As of {version}, 'adapter.{method_name}' expects argument " "'{arg_name}' to be of type '{expected_type}', instead got " "{got_value} ({got_type})" )
raise_compiler_error ( msg . format ( version = version , method_name = method_name , arg_name = arg_name , expected_type = expected_type , got_value = got_value , got_type = got_type ) ) |
def check_password_readable ( self , section , fields ) :
"""Check if there is a readable configuration file and print a warning .""" | if not fields :
return
# The information which of the configuration files
# included which option is not available . To avoid false positives ,
# a warning is only printed if exactly one file has been read .
if len ( self . read_ok ) != 1 :
return
fn = self . read_ok [ 0 ]
if fileutil . is_accessable_by_others ( fn ) :
log . warn ( LOG_CHECK , "The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you." , fn , section , fields )
if os . name == 'posix' :
log . warn ( LOG_CHECK , _ ( "For example execute 'chmod go-rw %s'." ) % fn )
elif os . name == 'nt' :
log . warn ( LOG_CHECK , _ ( "See http://support.microsoft.com/kb/308419 for more info on setting file permissions." ) ) |
def migrate_abci_chain ( self ) :
"""Generate and record a new ABCI chain ID . New blocks are not
accepted until we receive an InitChain ABCI request with
the matching chain ID and validator set .
Chain ID is generated based on the current chain and height .
` chain - X ` = > ` chain - X - migrated - at - height - 5 ` .
` chain - X - migrated - at - height - 5 ` = > ` chain - X - migrated - at - height - 21 ` .
If there is no known chain ( we are at genesis ) , the function returns .""" | latest_chain = self . get_latest_abci_chain ( )
if latest_chain is None :
return
block = self . get_latest_block ( )
suffix = '-migrated-at-height-'
chain_id = latest_chain [ 'chain_id' ]
block_height_str = str ( block [ 'height' ] )
new_chain_id = chain_id . split ( suffix ) [ 0 ] + suffix + block_height_str
self . store_abci_chain ( block [ 'height' ] + 1 , new_chain_id , False ) |
def ex_varassign ( name , expr ) :
"""Assign an expression into a single variable . The expression may
either be an ` ast . expr ` object or a value to be used as a literal .""" | if not isinstance ( expr , ast . expr ) :
expr = ex_literal ( expr )
return ast . Assign ( [ ex_lvalue ( name ) ] , expr ) |
def _set_contn_src_dst ( self , v , load = False ) :
"""Setter method for contn _ src _ dst , mapped from YANG variable / overlay _ class _ map / cmap _ seq / match / contn _ src _ dst ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ contn _ src _ dst is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ contn _ src _ dst ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = contn_src_dst . contn_src_dst , is_container = 'container' , presence = False , yang_name = "contn-src-dst" , rest_name = "" , parent = self , choice = ( u'overlay-match-ip' , u'case-overlay-ip-src-dest' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-compact-syntax' : None , u'cli-drop-node-name' : None , u'cli-flatten-container' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-overlay-policy' , defining_module = 'brocade-overlay-policy' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """contn_src_dst must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=contn_src_dst.contn_src_dst, is_container='container', presence=False, yang_name="contn-src-dst", rest_name="", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-src-dest'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-flatten-container': None}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='container', is_config=True)""" , } )
self . __contn_src_dst = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _get_resources ( self , args , params ) :
"""Finds the list of resources from the keyword parameters and pops
them out of the params dictionary .""" | resources = [ ]
disabled = [ ]
for resource in [ 'holoviews' ] + list ( Store . renderers . keys ( ) ) :
if resource in args :
resources . append ( resource )
if resource in params :
setting = params . pop ( resource )
if setting is True and resource != 'matplotlib' :
if resource not in resources :
resources . append ( resource )
if setting is False :
disabled . append ( resource )
unmatched_args = set ( args ) - set ( resources )
if unmatched_args :
display ( HTML ( '<b>Warning:</b> Unrecognized resources %s' % ', ' . join ( unmatched_args ) ) )
resources = [ r for r in resources if r not in disabled ]
if ( 'holoviews' not in disabled ) and ( 'holoviews' not in resources ) :
resources = [ 'holoviews' ] + resources
return resources |
def load_metadata_csv_single_user ( csv_in , header , tags_idx ) :
"""Return the metadata as requested for a single user .
: param csv _ in : This field is the csv file to return metadata from .
: param header : This field contains the headers in the csv file
: param tags _ idx : This field contains the index of the tags in the csv
file .""" | metadata = { }
n_headers = len ( header )
for index , row in enumerate ( csv_in , 2 ) :
if row [ 0 ] == "" :
raise ValueError ( 'Error: In row number ' + str ( index ) + ':' + ' "filename" must not be empty.' )
if row [ 0 ] == 'None' and [ x == 'NA' for x in row [ 1 : ] ] :
break
if len ( row ) != n_headers :
raise ValueError ( 'Error: In row number ' + str ( index ) + ':' + ' Number of columns (' + str ( len ( row ) ) + ') doesnt match Number of headings (' + str ( n_headers ) + ')' )
metadata [ row [ 0 ] ] = { header [ i ] : row [ i ] for i in range ( 1 , len ( header ) ) if i != tags_idx }
metadata [ row [ 0 ] ] [ 'tags' ] = [ t . strip ( ) for t in row [ tags_idx ] . split ( ',' ) if t . strip ( ) ]
return metadata |
def _get_pasteas_data ( self , dim , obj ) :
"""Returns list of lists of obj than has dimensionality dim
Parameters
dim : Integer
\t Dimensionality of obj
obj : Object
\t Iterable object of dimensionality dim""" | if dim == 0 :
return [ [ repr ( obj ) ] ]
elif dim == 1 :
return [ [ repr ( o ) ] for o in obj ]
elif dim == 2 :
return [ map ( repr , o ) for o in obj ] |
def get_desc2nts ( self , ** kws_usr ) :
"""Return grouped , sorted namedtuples in either format : flat , sections .""" | # desc2nts contains : ( sections hdrgo _ prt sortobj ) or ( flat hdrgo _ prt sortobj )
# keys _ nts : hdrgo _ prt section _ prt top _ n use _ sections
kws_nts = { k : v for k , v in kws_usr . items ( ) if k in self . keys_nts }
return self . get_desc2nts_fnc ( ** kws_nts ) |
def prepare_encoder ( inputs , hparams , attention_type = "local_1d" ) :
"""Prepare encoder for images .""" | x = prepare_image ( inputs , hparams , name = "enc_channels" )
# Add position signals .
x = add_pos_signals ( x , hparams , "enc_pos" )
x_shape = common_layers . shape_list ( x )
if attention_type == "local_1d" :
x = tf . reshape ( x , [ x_shape [ 0 ] , x_shape [ 1 ] * x_shape [ 2 ] , hparams . hidden_size ] )
x . set_shape ( [ None , None , hparams . hidden_size ] )
elif attention_type == "local_2d" :
x . set_shape ( [ None , None , None , hparams . hidden_size ] )
return x |
def _ge_from_lt ( self , other ) :
"""Return a > = b . Computed by @ total _ ordering from ( not a < b ) .""" | op_result = self . __lt__ ( other )
if op_result is NotImplemented :
return NotImplemented
return not op_result |
def modify_dict ( data , key , value , create_if_missing = False ) :
"""Change ( or add ) a json key / value pair .
Args :
data ( dict ) : The original data . This will not be modified .
key ( list ) : A list of keys and subkeys specifing the key to change ( list can be one )
value ( str ) : The value to change for the above key
create _ if _ missing ( bool ) : Set to true to create key if the last key in the list is not found
Otherwise the function will throw a KeyError
Returns :
( dict ) : the final modified dict""" | data_copy = copy . deepcopy ( data )
key_copy = copy . deepcopy ( key )
delver = data_copy
current_key = key_copy
last_key = "Root"
# Dig through the json , setting delver to the dict that contains the last key in " key "
while len ( current_key ) > 1 :
if current_key [ 0 ] not in delver :
raise KeyError ( "ModifyJsonStep Key Couldn't find Subkey {} in {}." . format ( current_key [ 0 ] , last_key ) )
if len ( current_key ) > 2 and not isinstance ( delver [ current_key [ 0 ] ] , dict ) :
raise ValueError ( "ModifyJsonStep The Value of {} is a {}, not a dict" . format ( current_key [ 0 ] , type ( delver [ current_key [ 0 ] ] ) ) )
last_key = current_key [ 0 ]
delver = delver [ current_key [ 0 ] ]
current_key . pop ( 0 )
if current_key [ 0 ] not in delver and not create_if_missing :
raise KeyError ( "ModifyJsonStep Key Couldn't find Subkey {} in {}." . format ( current_key [ 0 ] , last_key ) )
delver [ current_key [ 0 ] ] = value
return data_copy |
def validate_and_copy_one_submission ( self , submission_path ) :
"""Validates one submission and copies it to target directory .
Args :
submission _ path : path in Google Cloud Storage of the submission file""" | if os . path . exists ( self . download_dir ) :
shutil . rmtree ( self . download_dir )
os . makedirs ( self . download_dir )
if os . path . exists ( self . validate_dir ) :
shutil . rmtree ( self . validate_dir )
os . makedirs ( self . validate_dir )
logging . info ( '\n' + ( '#' * 80 ) + '\n# Processing submission: %s\n' + '#' * 80 , submission_path )
local_path = self . copy_submission_locally ( submission_path )
metadata = self . base_validator . validate_submission ( local_path )
if not metadata :
logging . error ( 'Submission "%s" is INVALID' , submission_path )
self . stats . add_failure ( )
return
submission_type = metadata [ 'type' ]
container_name = metadata [ 'container_gpu' ]
logging . info ( 'Submission "%s" is VALID' , submission_path )
self . list_of_containers . add ( container_name )
self . stats . add_success ( submission_type )
if self . do_copy :
submission_id = '{0:04}' . format ( self . cur_submission_idx )
self . cur_submission_idx += 1
self . copy_submission_to_destination ( submission_path , TYPE_TO_DIR [ submission_type ] , submission_id )
self . id_to_path_mapping [ submission_id ] = submission_path |
def get_subject ( self , identifier ) :
"""Build a Subject XML block for a SAML 1.1
AuthenticationStatement or AttributeStatement .""" | subject = etree . Element ( 'Subject' )
name = etree . SubElement ( subject , 'NameIdentifier' )
name . text = identifier
subject_confirmation = etree . SubElement ( subject , 'SubjectConfirmation' )
method = etree . SubElement ( subject_confirmation , 'ConfirmationMethod' )
method . text = self . confirmation_method
return subject |
def has_colors ( fp ) :
"""Test if given file is an ANSI color enabled tty .""" | # The is _ tty ( ) function ensures that we do not colorize
# redirected streams , as this is almost never what we want
if not is_tty ( fp ) :
return False
if os . name == 'nt' :
return True
elif has_curses :
import curses
try :
curses . setupterm ( os . environ . get ( "TERM" ) , fp . fileno ( ) )
# More than 8 colors are good enough .
return curses . tigetnum ( "colors" ) >= 8
except curses . error :
return False
return False |
def display_annotations ( self ) :
"""Mark all the bookmarks / events , on top of first plot .""" | for item in self . idx_annot :
self . scene . removeItem ( item )
self . idx_annot = [ ]
for item in self . idx_annot_labels :
self . scene . removeItem ( item )
self . idx_annot_labels = [ ]
self . highlight = None
window_start = self . parent . value ( 'window_start' )
window_length = self . parent . value ( 'window_length' )
window_end = window_start + window_length
y_distance = self . parent . value ( 'y_distance' )
bookmarks = [ ]
events = [ ]
if self . parent . notes . annot is not None :
if self . parent . value ( 'annot_show' ) :
bookmarks = self . parent . notes . annot . get_bookmarks ( )
events = self . parent . notes . get_selected_events ( ( window_start , window_end ) )
annotations = bookmarks + events
for annot in annotations :
if window_start <= annot [ 'end' ] and window_end >= annot [ 'start' ] :
mrk_start = max ( ( annot [ 'start' ] , window_start ) )
mrk_end = min ( ( annot [ 'end' ] , window_end ) )
if annot in bookmarks :
color = QColor ( self . parent . value ( 'annot_bookmark_color' ) )
if annot in events :
color = convert_name_to_color ( annot [ 'name' ] )
if logical_or ( annot [ 'chan' ] == [ '' ] , self . action [ 'cross_chan_mrk' ] . isChecked ( ) ) :
h_annot = len ( self . idx_label ) * y_distance
item = TextItem_with_BG ( color . darker ( 200 ) )
item . setText ( annot [ 'name' ] )
item . setPos ( annot [ 'start' ] , len ( self . idx_label ) * y_distance )
item . setFlag ( QGraphicsItem . ItemIgnoresTransformations )
item . setRotation ( - 90 )
self . scene . addItem ( item )
self . idx_annot_labels . append ( item )
mrk_dur = amax ( ( mrk_end - mrk_start , self . parent . value ( 'min_marker_display_dur' ) ) )
item = RectMarker ( mrk_start , 0 , mrk_dur , h_annot , zvalue = - 8 , color = color . lighter ( 120 ) )
self . scene . addItem ( item )
self . idx_annot . append ( item )
if annot [ 'chan' ] != [ '' ] : # find indices of channels with annotations
chan_idx_in_mrk = in1d ( self . chan , annot [ 'chan' ] )
y_annot = asarray ( self . chan_pos ) [ chan_idx_in_mrk ]
y_annot -= y_distance / 2
mrk_dur = amax ( ( mrk_end - mrk_start , self . parent . value ( 'min_marker_display_dur' ) ) )
for y in y_annot :
item = RectMarker ( mrk_start , y , mrk_dur , y_distance , zvalue = - 7 , color = color )
self . scene . addItem ( item )
self . idx_annot . append ( item ) |
def _write_to_cache ( self , expr , res ) :
"""Store the cached result without indentation , and without the
keyname""" | res = dedent ( res )
super ( ) . _write_to_cache ( expr , res ) |
def allow_access_to_instance ( self , _ , ip_address ) :
'''Allow access to instance .''' | if not self . connect_to_aws_rds ( ) :
return False
try :
conn = boto . ec2 . connect_to_region ( self . region , aws_access_key_id = self . aws_access_key_id , aws_secret_access_key = self . aws_secret_access_key )
sgs = conn . get_all_security_groups ( 'default' )
default_sg = sgs [ 0 ]
default_sg . authorize ( ip_protocol = 'tcp' , from_port = 3306 , to_port = 3306 , cidr_ip = str ( ip_address ) + '/32' )
except EC2ResponseError , exception :
if exception . error_code == "InvalidPermission.Duplicate" :
return True
# ok it already exists
else :
return False
else :
return True |
def json ( self ) :
"""Return a JSON - serializable representation of this result .
The output of this function can be converted to a serialized string
with : any : ` json . dumps ` .""" | def _json_or_none ( obj ) :
return None if obj is None else obj . json ( )
data = { "segment_models" : [ _json_or_none ( m ) for m in self . segment_models ] , "model_lookup" : { key : _json_or_none ( val ) for key , val in self . model_lookup . items ( ) } , "prediction_segment_type" : self . prediction_segment_type , "prediction_segment_name_mapping" : self . prediction_segment_name_mapping , "prediction_feature_processor" : self . prediction_feature_processor . __name__ , }
return data |
def dephasing_operators ( p ) :
"""Return the phase damping Kraus operators""" | k0 = np . eye ( 2 ) * np . sqrt ( 1 - p / 2 )
k1 = np . sqrt ( p / 2 ) * Z
return k0 , k1 |
def get_tags_of_port ( self , port ) :
"""Get all tags related to a given port .
This is the inverse of what is stored in self . cluster _ tags ) .""" | return ( sorted ( [ tag for tag in self . cluster_tags if port in self . cluster_tags [ tag ] ] ) ) |
def loadTargetPatterns ( self , filename , cols = None , everyNrows = 1 , delim = ' ' , checkEven = 1 ) :
"""Loads targets as patterns from file .""" | self . loadTargetPatternssFromFile ( filename , cols , everyNrows , delim , checkEven ) |
def _is_non_public_numeric_address ( host ) :
"""returns True if ' host ' is not public""" | # for numeric hostnames , skip RFC1918 addresses , since no Tor exit
# node will be able to reach those . Likewise ignore IPv6 addresses .
try :
a = ipaddress . ip_address ( six . text_type ( host ) )
except ValueError :
return False
# non - numeric , let Tor try it
if a . is_loopback or a . is_multicast or a . is_private or a . is_reserved or a . is_unspecified :
return True
# too weird , don ' t connect
return False |
def add_vectors ( vec_a , vec_b , periodic ) :
'''Returns the sum of the points vec _ a - vec _ b subject
to the periodic boundary conditions .''' | moved = noperiodic ( np . array ( [ vec_a , vec_b ] ) , periodic )
return vec_a + vec_b |
def close ( self ) :
"""Close the stream . This performs a proper stream shutdown , except if the
stream is currently performing a TLS handshake . In that case , calling
: meth : ` close ` is equivalent to calling : meth : ` abort ` .
Otherwise , the transport waits until all buffers are transmitted .""" | if self . _state == _State . CLOSED :
self . _invalid_state ( "close() called" )
return
if self . _state == _State . TLS_HANDSHAKING : # hard - close
self . _force_close ( None )
elif self . _state == _State . TLS_SHUTTING_DOWN : # shut down in progress , nothing to do
pass
elif self . _buffer : # there is data to be send left , first wait for it to transmit . . .
self . _closing = True
elif self . _state . tls_started : # normal TLS state , nothing left to transmit , shut down
self . _tls_shutdown ( )
else : # normal non - TLS state , nothing left to transmit , close
self . _raw_shutdown ( ) |
def is_nested_list_like ( obj ) :
"""Check if the object is list - like , and that all of its elements
are also list - like .
. . versionadded : : 0.20.0
Parameters
obj : The object to check
Returns
is _ list _ like : bool
Whether ` obj ` has list - like properties .
Examples
> > > is _ nested _ list _ like ( [ [ 1 , 2 , 3 ] ] )
True
> > > is _ nested _ list _ like ( [ { 1 , 2 , 3 } , { 1 , 2 , 3 } ] )
True
> > > is _ nested _ list _ like ( [ " foo " ] )
False
> > > is _ nested _ list _ like ( [ ] )
False
> > > is _ nested _ list _ like ( [ [ 1 , 2 , 3 ] , 1 ] )
False
Notes
This won ' t reliably detect whether a consumable iterator ( e . g .
a generator ) is a nested - list - like without consuming the iterator .
To avoid consuming it , we always return False if the outer container
doesn ' t define ` _ _ len _ _ ` .
See Also
is _ list _ like""" | return ( is_list_like ( obj ) and hasattr ( obj , '__len__' ) and len ( obj ) > 0 and all ( is_list_like ( item ) for item in obj ) ) |
def run_step ( context ) :
"""Run command , program or executable .
Context is a dictionary or dictionary - like .
Context must contain the following keys :
cmd : < < cmd string > > ( command + args to execute . )
OR , as a dict
cmd :
run : str . mandatory . < < cmd string > > command + args to execute .
save : bool . defaults False . save output to cmdOut .
Will execute the command string in the shell as a sub - process .
Escape curly braces : if you want a literal curly brace , double it like
{ { or } } .
If save is True , will save the output to context as follows :
cmdOut :
returncode : 0
stdout : ' stdout str here . None if empty . '
stderr : ' stderr str here . None if empty . '
cmdOut . returncode is the exit status of the called process . Typically 0
means OK . A negative value - N indicates that the child was terminated by
signal N ( POSIX only ) .
context [ ' cmd ' ] will interpolate anything in curly braces for values
found in context . So if your context looks like this :
key1 : value1
key2 : value2
cmd : mything - - arg1 { key1}
The cmd passed to the shell will be " mything - - arg value1" """ | logger . debug ( "started" )
pypyr . steps . cmd . run_step ( context )
logger . debug ( "done" ) |
def bandnames ( self , names ) :
"""set the names of the raster bands
Parameters
names : list of str
the names to be set ; must be of same length as the number of bands
Returns""" | if not isinstance ( names , list ) :
raise TypeError ( 'the names to be set must be of type list' )
if len ( names ) != self . bands :
raise ValueError ( 'length mismatch of names to be set ({}) and number of bands ({})' . format ( len ( names ) , self . bands ) )
self . __bandnames = names |
def gaussian ( h , Xi , x ) :
"""Gaussian Kernel for continuous variables
Parameters
h : 1 - D ndarray , shape ( K , )
The bandwidths used to estimate the value of the kernel function .
Xi : 1 - D ndarray , shape ( K , )
The value of the training set .
x : 1 - D ndarray , shape ( K , )
The value at which the kernel density is being estimated .
Returns
kernel _ value : ndarray , shape ( nobs , K )
The value of the kernel function at each training point for each var .""" | return ( 1. / np . sqrt ( 2 * np . pi ) ) * np . exp ( - ( Xi - x ) ** 2 / ( h ** 2 * 2. ) ) |
def get_substructure ( data , path ) :
"""Tries to retrieve a sub - structure within some data . If the path does not
match any sub - structure , returns None .
> > > data = { ' a ' : 5 , ' b ' : { ' c ' : [ 1 , 2 , [ { ' f ' : [ 57 ] } ] , 4 ] , ' d ' : ' test ' } }
> > > get _ substructure ( island , " bc " )
[1 , 2 , [ { ' f ' : [ 57 ] } ] , 4]
> > > get _ substructure ( island , [ ' b ' , ' c ' ] )
[1 , 2 , [ { ' f ' : [ 57 ] } ] , 4]
> > > get _ substructure ( island , [ ' b ' , ' c ' , 2 , 0 , ' f ' , 0 ] )
57
> > > get _ substructure ( island , [ ' b ' , ' c ' , 2 , 0 , ' f ' , ' d ' ] )
None
@ param data : a container
@ type data : str | dict | list | ( an indexable container )
@ param path : location of the data
@ type path : list | str
@ rtype : *""" | if not len ( path ) :
return data
try :
return get_substructure ( data [ path [ 0 ] ] , path [ 1 : ] )
except ( TypeError , IndexError , KeyError ) :
return None |
def combine_quantities ( data , units = 'units' , magnitude = 'magnitude' , list_of_dicts = False ) :
"""combine < unit , magnitude > pairs into pint . Quantity objects
Parameters
data : dict
units : str
name of units key
magnitude : str
name of magnitude key
list _ of _ dicts : bool
treat list of dicts as additional branches
Examples
> > > from pprint import pprint
> > > sdata = { ' energy ' : { ' magnitude ' : 1.602e - 22 , ' units ' : ' kilojoule ' } ,
. . . ' meta ' : None ,
. . . ' other ' : { ' y ' : { ' magnitude ' : [ 4 , 5 , 6 ] , ' units ' : ' nanometer ' } } ,
. . . ' x ' : { ' magnitude ' : [ 1 , 2 , 3 ] , ' units ' : ' nanometer ' } ,
. . . ' y ' : { ' magnitude ' : [ 8,9,10 ] , ' units ' : ' meter ' } }
> > > combined _ data = combine _ quantities ( sdata )
> > > pprint ( combined _ data )
{ ' energy ' : < Quantity ( 1.602e - 22 , ' kilojoule ' ) > ,
' meta ' : None ,
' other ' : { ' y ' : < Quantity ( [ 4 5 6 ] , ' nanometer ' ) > } ,
' x ' : < Quantity ( [ 1 2 3 ] , ' nanometer ' ) > ,
' y ' : < Quantity ( [ 8 9 10 ] , ' meter ' ) > }""" | # noqa : E501
try :
from pint import UnitRegistry
ureg = UnitRegistry ( )
except ImportError :
raise ImportError ( 'please install pint to use this module' )
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten2d = flatten2d ( data , list_of_dicts = list_of_dicts )
new_dict = { }
for key , val in list ( data_flatten2d . items ( ) ) :
if units in val and magnitude in val :
quantity = ureg . Quantity ( val . pop ( magnitude ) , val . pop ( units ) )
if not val :
data_flatten2d . pop ( key )
new_dict [ key ] = quantity
final_dict = merge ( [ data_flatten2d , new_dict ] )
# olddict = unflatten ( data _ flatten2d , list _ of _ dicts = list _ of _ dicts )
# new _ dict = unflatten ( new _ dict , list _ of _ dicts = list _ of _ dicts )
return unflatten ( final_dict , list_of_dicts = list_of_dicts ) |
def draw_rect ( self , x , y , width , height , string , fg = Ellipsis , bg = Ellipsis ) :
"""Draws a rectangle starting from x and y and extending to width and height .
If width or height are None then it will extend to the edge of the console .
Args :
x ( int ) : x - coordinate for the top side of the rect .
y ( int ) : y - coordinate for the left side of the rect .
width ( Optional [ int ] ) : The width of the rectangle .
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console .
height ( Optional [ int ] ) : The height of the rectangle .
string ( Optional [ Union [ Text , int ] ] ) : An integer , single character
string , or None .
You can set the string parameter as None if you only want
to change the colors of an area .
fg ( Optional [ Union [ Tuple [ int , int , int ] , int , Ellipsis ] ] )
bg ( Optional [ Union [ Tuple [ int , int , int ] , int , Ellipsis ] ] )
Raises :
AssertionError : Having x or y values that can ' t be placed inside
of the console will raise an AssertionError .
You can use always use ` ` ( ( x , y ) in console ) ` ` to check if a tile
is drawable .
. . seealso : : : any : ` clear ` , : any : ` draw _ frame `""" | x , y , width , height = self . _normalizeRect ( x , y , width , height )
fg , bg = _format_color ( fg , self . _fg ) , _format_color ( bg , self . _bg )
char = _format_char ( string )
# use itertools to make an x , y grid
# using ctypes here reduces type converstions later
# grid = _ itertools . product ( ( _ ctypes . c _ int ( x ) for x in range ( x , x + width ) ) ,
# ( _ ctypes . c _ int ( y ) for y in range ( y , y + height ) ) )
grid = _itertools . product ( ( x for x in range ( x , x + width ) ) , ( y for y in range ( y , y + height ) ) )
# zip the single character in a batch variable
batch = zip ( grid , _itertools . repeat ( char , width * height ) )
self . _set_batch ( batch , fg , bg , nullChar = ( char is None ) ) |
def member_create ( self , params , member_id ) :
"""start new mongod instances as part of replica set
Args :
params - member params
member _ id - member index
return member config""" | member_config = params . get ( 'rsParams' , { } )
server_id = params . pop ( 'server_id' , None )
version = params . pop ( 'version' , self . _version )
proc_params = { 'replSet' : self . repl_id }
proc_params . update ( params . get ( 'procParams' , { } ) )
if self . enable_ipv6 :
enable_ipv6_single ( proc_params )
# Make sure that auth isn ' t set the first time we start the servers .
proc_params = self . _strip_auth ( proc_params )
# Don ' t pass in auth _ key the first time we start the servers .
server_id = self . _servers . create ( name = 'mongod' , procParams = proc_params , sslParams = self . sslParams , version = version , server_id = server_id )
member_config . update ( { "_id" : member_id , "host" : self . _servers . hostname ( server_id ) } )
return member_config |
def to_yaml ( obj , stream = None , dumper_cls = yaml . Dumper , default_flow_style = False , ** kwargs ) :
"""Serialize a Python object into a YAML stream with OrderedDict and
default _ flow _ style defaulted to False .
If stream is None , return the produced string instead .
OrderedDict reference : http : / / stackoverflow . com / a / 21912744
default _ flow _ style reference : http : / / stackoverflow . com / a / 18210750
: param data : python object to be serialized
: param stream : to be serialized to
: param Dumper : base Dumper class to extend .
: param kwargs : arguments to pass to to _ dict
: return : stream if provided , string if stream is None""" | class OrderedDumper ( dumper_cls ) :
pass
def dict_representer ( dumper , data ) :
return dumper . represent_mapping ( yaml . resolver . BaseResolver . DEFAULT_MAPPING_TAG , data . items ( ) )
OrderedDumper . add_representer ( OrderedDict , dict_representer )
obj_dict = to_dict ( obj , ** kwargs )
return yaml . dump ( obj_dict , stream , OrderedDumper , default_flow_style = default_flow_style ) |
def run ( self , batch : mx . io . DataBatch ) -> List [ mx . nd . NDArray ] :
"""Runs the forward pass and returns the outputs .
: param batch : The batch to run .
: return : The grouped symbol ( probs and target dists ) and lists containing the data names and label names .""" | self . module . forward ( batch , is_train = False )
return self . module . get_outputs ( ) |
def get_is_property ( value , is_bytes = False ) :
"""Get shortcut for ` SC ` or ` Binary ` property .""" | if value . startswith ( '^' ) :
prefix = value [ 1 : 3 ]
temp = value [ 3 : ]
negate = '^'
else :
prefix = value [ : 2 ]
temp = value [ 2 : ]
negate = ''
if prefix != 'is' :
raise ValueError ( "Does not start with 'is'!" )
script_obj = unidata . ascii_script_extensions if is_bytes else unidata . unicode_script_extensions
bin_obj = unidata . ascii_binary if is_bytes else unidata . unicode_binary
value = negate + unidata . unicode_alias [ 'script' ] . get ( temp , temp )
if value not in script_obj :
value = negate + unidata . unicode_alias [ 'binary' ] . get ( temp , temp )
obj = bin_obj
else :
obj = script_obj
return obj [ value ] |
def append ( self , item ) :
"""Add an item to the end of the list""" | with self . lock :
with self . _closeable_cursor ( ) as cursor :
cursor . execute ( '''INSERT INTO list (list_index, value) VALUES ((SELECT MAX(list_index) FROM list) + 1, ?)''' , ( self . _coder ( item ) , ) )
self . _do_write ( ) |
def get_account_config ( self , id_or_name ) :
"""Return a dictionary of account configuration for the account with the
specified ID or name .
: param id _ or _ name : ID or name of account
: type id _ or _ name : str
: return : configuration for specified account
: rtype : dict""" | if id_or_name in self . _config :
return self . _config [ id_or_name ]
if id_or_name in self . _acct_name_to_id :
return self . _config [ self . _acct_name_to_id [ id_or_name ] ]
raise RuntimeError ( 'ERROR: Unknown account ID or name' ) |
def inverse ( self ) :
"""Inverse of this operator .
For example , if A and B are operators : :
[ [ A , 0 ] ,
[0 , B ] ]
The inverse is given by : :
[ [ A ^ - 1 , 0 ] ,
[0 , B ^ - 1 ] ]
This is only well defined if each sub - operator has an inverse
Returns
inverse : ` DiagonalOperator `
The inverse operator
See Also
ProductSpaceOperator . inverse""" | inverses = [ op . inverse for op in self . operators ]
return DiagonalOperator ( * inverses , domain = self . range , range = self . domain ) |
def _build_torrent ( self , row ) :
"""Builds and returns a Torrent object for the given parsed row .""" | # Scrape , strip and build ! ! !
cols = row . findall ( './/td' )
# split the row into it ' s columns
# this column contains the categories
[ category , sub_category ] = [ c . text for c in cols [ 0 ] . findall ( './/a' ) ]
# this column with all important info
links = cols [ 1 ] . findall ( './/a' )
# get 4 a tags from this columns
title = unicode ( links [ 0 ] . text )
url = self . url . build ( ) . path ( links [ 0 ] . get ( 'href' ) )
magnet_link = links [ 1 ] . get ( 'href' )
# the magnet download link
try :
torrent_link = links [ 2 ] . get ( 'href' )
# the torrent download link
if not torrent_link . endswith ( '.torrent' ) :
torrent_link = None
except IndexError :
torrent_link = None
comments = 0
has_cover = 'No'
images = cols [ 1 ] . findall ( './/img' )
for image in images :
image_title = image . get ( 'title' )
if image_title is None :
continue
if "comments" in image_title :
comments = int ( image_title . split ( " " ) [ 3 ] )
if "cover" in image_title :
has_cover = 'Yes'
user_status = "MEMBER"
if links [ - 2 ] . get ( 'href' ) . startswith ( "/user/" ) :
user_status = links [ - 2 ] . find ( './/img' ) . get ( 'title' )
meta_col = cols [ 1 ] . find ( './/font' ) . text_content ( )
# don ' t need user
match = self . _meta . match ( meta_col )
created = match . groups ( ) [ 0 ] . replace ( '\xa0' , ' ' )
size = match . groups ( ) [ 1 ] . replace ( '\xa0' , ' ' )
user = match . groups ( ) [ 2 ]
# uploaded by user
# last 2 columns for seeders and leechers
seeders = int ( cols [ 2 ] . text )
leechers = int ( cols [ 3 ] . text )
t = Torrent ( title , url , category , sub_category , magnet_link , torrent_link , comments , has_cover , user_status , created , size , user , seeders , leechers )
return t |
def inits_t ( wrap ) :
"""Transformation for Sequence . inits
: param wrap : wrap children values with this
: return : transformation""" | return Transformation ( 'inits' , lambda sequence : [ wrap ( sequence [ : i ] ) for i in reversed ( range ( len ( sequence ) + 1 ) ) ] , { ExecutionStrategies . PRE_COMPUTE } ) |
def addresses ( self , fields = None ) :
"""List of addresses associated with the postcode .
Addresses are dict objects which look like the following : :
" uprn " : " 10033544614 " ,
" organisation _ name " : " BUCKINGHAM PALACE " ,
" department _ name " : " " ,
" po _ box _ number " : " " ,
" building _ name " : " " ,
" sub _ building _ name " : " " ,
" building _ number " : null ,
" thoroughfare _ name " : " " ,
" dependent _ thoroughfare _ name " : " " ,
" dependent _ locality " : " " ,
" double _ dependent _ locality " : " " ,
" post _ town " : " LONDON " ,
" postcode " : " SW1A 1AA " ,
" postcode _ type " : " L " ,
" formatted _ address " : " Buckingham Palace \\ nLondon \\ nSW1A 1AA " ,
" point " : {
" type " : " Point " ,
" coordinates " : [
-0.141587558526369,
51.50100893654096""" | if fields is None :
fields = ',' . join ( DEFAULT_ADDRESS_FIELDS )
if getattr ( self , '_addresses' , None ) is None :
self . _addresses = self . _client . _query_api ( 'addresses/' , postcode = self . normalised , fields = fields )
return self . _addresses |
def l2traceroute_input_protocolType_IP_src_ip ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
l2traceroute = ET . Element ( "l2traceroute" )
config = l2traceroute
input = ET . SubElement ( l2traceroute , "input" )
protocolType = ET . SubElement ( input , "protocolType" )
IP = ET . SubElement ( protocolType , "IP" )
src_ip = ET . SubElement ( IP , "src-ip" )
src_ip . text = kwargs . pop ( 'src_ip' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _resource_prefix ( self , resource = None ) :
"""Get elastic prefix for given resource .
Resource can specify ` ` elastic _ prefix ` ` which behaves same like ` ` mongo _ prefix ` ` .""" | px = 'ELASTICSEARCH'
if resource and config . DOMAIN [ resource ] . get ( 'elastic_prefix' ) :
px = config . DOMAIN [ resource ] . get ( 'elastic_prefix' )
return px |
async def create_payment_address ( seed : str = None ) -> str :
"""Creates a payment address inside the wallet .
: param seed : String
Example :
address = await Wallet . create _ payment _ address ( ' 000001234567 ' )
: return : String""" | logger = logging . getLogger ( __name__ )
if not hasattr ( Wallet . create_payment_address , "cb" ) :
logger . debug ( "vcx_wallet_create_payment_address: Creating callback" )
Wallet . create_payment_address . cb = create_cb ( CFUNCTYPE ( None , c_uint32 , c_uint32 , c_char_p ) )
if seed :
c_seed = c_char_p ( seed . encode ( 'utf-8' ) )
else :
c_seed = None
result = await do_call ( 'vcx_wallet_create_payment_address' , c_seed , Wallet . create_payment_address . cb )
logger . debug ( "vcx_wallet_create_payment_address completed" )
return result |
def _learn ( # mutated args
permanences , rng , # activity
activeCells , activeInput , growthCandidateInput , # configuration
sampleSize , initialPermanence , permanenceIncrement , permanenceDecrement , connectedPermanence ) :
"""For each active cell , reinforce active synapses , punish inactive synapses ,
and grow new synapses to a subset of the active input bits that the cell
isn ' t already connected to .
Parameters :
@ param permanences ( SparseMatrix )
Matrix of permanences , with cells as rows and inputs as columns
@ param rng ( Random )
Random number generator
@ param activeCells ( sorted sequence )
Sorted list of the cells that are learning
@ param activeInput ( sorted sequence )
Sorted list of active bits in the input
@ param growthCandidateInput ( sorted sequence )
Sorted list of active bits in the input that the activeCells may
grow new synapses to
For remaining parameters , see the _ _ init _ _ docstring .""" | permanences . incrementNonZerosOnOuter ( activeCells , activeInput , permanenceIncrement )
permanences . incrementNonZerosOnRowsExcludingCols ( activeCells , activeInput , - permanenceDecrement )
permanences . clipRowsBelowAndAbove ( activeCells , 0.0 , 1.0 )
if sampleSize == - 1 :
permanences . setZerosOnOuter ( activeCells , activeInput , initialPermanence )
else :
existingSynapseCounts = permanences . nNonZerosPerRowOnCols ( activeCells , activeInput )
maxNewByCell = numpy . empty ( len ( activeCells ) , dtype = "int32" )
numpy . subtract ( sampleSize , existingSynapseCounts , out = maxNewByCell )
permanences . setRandomZerosOnOuter ( activeCells , growthCandidateInput , maxNewByCell , initialPermanence , rng ) |
def set_last_row_idx ( self , last_row_idx ) :
'''Parameters
param last _ row _ idx : int
number of rows''' | assert last_row_idx >= self . _max_row
self . _max_row = last_row_idx
return self |
def get_psms ( self ) :
"""Creates iterator to write to new tsv . Contains input tsv
lines plus quant data for these .""" | self . header = actions . create_header ( self . oldheader , self . spectracol )
self . psms = actions . generate_psms_spectradata ( self . lookup , self . fn , self . oldheader ) |
def accpro_results ( self ) :
"""Parse the ACCpro output file and return a dict of secondary structure compositions .""" | return ssbio . protein . sequence . utils . fasta . load_fasta_file_as_dict_of_seqs ( self . out_accpro ) |
def check_for_message_exception ( cls , message_result ) :
"""Makes sure there isn ' t an error when sending the message .
Kafka will silently catch exceptions and not bubble them up .
Parameters
message _ result : FutureRecordMetadata""" | exception = message_result . exception
if isinstance ( exception , BaseException ) :
raise exception |
def _translate_src_mode_to_dst_mode ( self , src_mode ) : # type : ( SyncCopy , blobxfer . models . azure . StorageModes ) - > bool
"""Translate the source mode into the destination mode
: param SyncCopy self : this
: param blobxfer . models . azure . StorageModes src _ mode : source mode
: rtype : blobxfer . models . azure . StorageModes
: return : destination mode""" | if ( self . _spec . options . dest_mode == blobxfer . models . azure . StorageModes . Auto ) :
return src_mode
else :
return self . _spec . options . dest_mode |
def toposort ( data ) :
"""Dependencies are expressed as a dictionary whose keys are items and
whose values are a set of dependent items . Output is a list of sets in
topological order . The first set consists of items with no dependences ,
each subsequent set consists of items that depend upon items in the
preceeding sets .
> > > print ' \\ n ' . join ( repr ( sorted ( x ) ) for x in toposort2 ( {
. . . 2 : set ( [ 11 ] ) ,
. . . 9 : set ( [ 11,8 ] ) ,
. . . 10 : set ( [ 11,3 ] ) ,
. . . 11 : set ( [ 7,5 ] ) ,
. . . 8 : set ( [ 7,3 ] ) ,
[3 , 5 , 7]
[8 , 11]
[2 , 9 , 10]""" | # Ignore self dependencies .
for k , v in iteritems ( data ) :
v . discard ( k )
# Find all items that don ' t depend on anything .
extra_items_in_deps = reduce ( set . union , itervalues ( data ) ) - set ( data . keys ( ) )
# Add empty dependences where needed
data . update ( { item : set ( ) for item in extra_items_in_deps } )
while True :
ordered = set ( item for item , dep in iteritems ( data ) if not dep )
if not ordered :
break
yield ordered
data = { item : ( dep - ordered ) for item , dep in iteritems ( data ) if item not in ordered }
assert not data , 'Cyclic dependencies exist among these items:\n%s' % '\n' . join ( repr ( x ) for x in list ( data . items ( ) ) ) |
def name_targets ( func ) :
"""Wrap a function such that returning ` ` ' a ' , ' b ' , ' c ' , [ 1 , 2 , 3 ] ` ` transforms
the value into ` ` dict ( a = 1 , b = 2 , c = 3 ) ` ` .
This is useful in the case where the last parameter is an SCons command .""" | def wrap ( * a , ** kw ) :
ret = func ( * a , ** kw )
return dict ( zip ( ret [ : - 1 ] , ret [ - 1 ] ) )
return wrap |
def load ( self , rawdata ) :
"""Load cookies from a string ( presumably HTTP _ COOKIE ) or
from a dictionary . Loading cookies from a dictionary ' d '
is equivalent to calling :
map ( Cookie . _ _ setitem _ _ , d . keys ( ) , d . values ( ) )""" | if isinstance ( rawdata , str ) :
self . __parse_string ( rawdata )
else : # self . update ( ) wouldn ' t call our custom _ _ setitem _ _
for key , value in rawdata . items ( ) :
self [ key ] = value
return |
def build_machine ( lines ) :
"""Build machine from list of lines .""" | if lines == [ ] :
raise SyntaxError ( 'Empty file' )
else :
machine = Machine ( lines [ 0 ] . split ( ) )
for line in lines [ 1 : ] :
if line . strip ( ) != '' :
machine . add_state ( line )
machine . check ( )
return machine |
def Start ( self ) :
"""This starts the worker threads .""" | if not self . started :
self . started = True
for _ in range ( self . min_threads ) :
self . _AddWorker ( ) |
def tags ( self ) :
"""Returns a dictionary that lists all available tags that can be used
for further filtering""" | ret = { }
for typ in _meta_fields_twig :
if typ in [ 'uniqueid' , 'plugin' , 'feedback' , 'fitting' , 'history' , 'twig' , 'uniquetwig' ] :
continue
k = '{}s' . format ( typ )
ret [ k ] = getattr ( self , k )
return ret |
def get_torrent ( self , torrent_id ) :
"""Gets the ` . torrent ` data for the given ` torrent _ id ` .
: param torrent _ id : the ID of the torrent to download
: raises TorrentNotFoundError : if the torrent does not exist
: returns : : class : ` Torrent ` of the associated torrent""" | params = { 'page' : 'download' , 'tid' : torrent_id , }
r = requests . get ( self . base_url , params = params )
if r . headers . get ( 'content-type' ) != 'application/x-bittorrent' :
raise TorrentNotFoundError ( TORRENT_NOT_FOUND_TEXT )
torrent_data = r . content
return Torrent ( torrent_id , torrent_data ) |
def add_origin_info ( self , packet ) :
"""Add optional Origin headers to message""" | packet . add_header ( 'Origin-Machine-Name' , platform . node ( ) )
packet . add_header ( 'Origin-Software-Name' , 'gntp.py' )
packet . add_header ( 'Origin-Software-Version' , __version__ )
packet . add_header ( 'Origin-Platform-Name' , platform . system ( ) )
packet . add_header ( 'Origin-Platform-Version' , platform . platform ( ) ) |
def linsrgb_to_srgb ( linsrgb ) :
"""Convert physically linear RGB values into sRGB ones . The transform is
uniform in the components , so * linsrgb * can be of any shape .
* linsrgb * values should range between 0 and 1 , inclusively .""" | # From Wikipedia , but easy analogue to the above .
gamma = 1.055 * linsrgb ** ( 1. / 2.4 ) - 0.055
scale = linsrgb * 12.92
return np . where ( linsrgb > 0.0031308 , gamma , scale ) |
def extract_component_doi ( tag , nodenames ) :
"""Used to get component DOI from a tag and confirm it is actually for that tag
and it is not for one of its children in the list of nodenames""" | component_doi = None
if ( tag . name == "sub-article" ) :
component_doi = doi_uri_to_doi ( node_text ( first ( raw_parser . article_id ( tag , pub_id_type = "doi" ) ) ) )
else :
object_id_tag = first ( raw_parser . object_id ( tag , pub_id_type = "doi" ) )
# Tweak : if it is media and has no object _ id _ tag then it is not a " component "
if tag . name == "media" and not object_id_tag :
component_doi = None
else : # Check the object id is for this tag and not one of its children
# This happens for example when boxed text has a child figure ,
# the boxed text does not have a DOI , the figure does have one
if object_id_tag and first_parent ( object_id_tag , nodenames ) . name == tag . name :
component_doi = doi_uri_to_doi ( node_text ( object_id_tag ) )
return component_doi |
def get_table ( self , tablename ) :
""": param str tablename : Name of table to find .
: return : A : py : class : ` xltable . Table ` instance from the table name .""" | table , ( _row , _col ) = self . __tables [ tablename ]
return table |
def clear_sessions ( hyperstream , inactive_only = True , clear_history = False ) :
"""Clear all ( inactive ) sessions and ( optionally ) their history
: param hyperstream : The hyperstream object
: param inactive _ only : Whether to only clear inactive sessions ( active sessions may be owned by another process )
: param clear _ history : Whether to clear the history of the session . Note that this will only clear the history
if the creator is the same process : there could feasibly be a history stored in a file channel that is not
accessible by this process .""" | query = dict ( )
if inactive_only :
query [ 'active' ] = False
if hyperstream . current_session is not None :
query [ 'session_id__ne' ] = hyperstream . current_session . session_id
with switch_db ( SessionModel , "hyperstream" ) :
for s in SessionModel . objects ( ** query ) :
if clear_history :
channel = hyperstream . channel_manager [ s . history_channel ]
stream_id = StreamId ( "session" , meta_data = ( ( 'uuid' , str ( s . session_id ) ) , ) )
try :
channel . purge_stream ( stream_id , remove_definition = True , sandbox = None )
except StreamNotFoundError :
pass
s . delete ( ) |
def isrchi ( value , ndim , array ) :
"""Search for a given value within an integer array . Return
the index of the first matching array entry , or - 1 if the key
value was not found .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / isrchi _ c . html
: param value : Key value to be found in array .
: type value : int
: param ndim : Dimension of array .
: type ndim : int
: param array : Integer array to search .
: type array : Array of ints
: return :
The index of the first matching array element or - 1
if the value is not found .
: rtype : int""" | value = ctypes . c_int ( value )
ndim = ctypes . c_int ( ndim )
array = stypes . toIntVector ( array )
return libspice . isrchi_c ( value , ndim , array ) |
def bus_inspector ( self , bus , message ) :
"""Inspect the bus for screensaver messages of interest""" | # We only care about stuff on this interface . We did filter
# for it above , but even so we still hear from ourselves
# ( hamster messages ) .
if message . get_interface ( ) != self . screensaver_uri :
return True
member = message . get_member ( )
if member in ( "SessionIdleChanged" , "ActiveChanged" ) :
logger . debug ( "%s -> %s" % ( member , message . get_args_list ( ) ) )
idle_state = message . get_args_list ( ) [ 0 ]
if idle_state :
self . idle_from = dt . datetime . now ( )
# from gnome screensaver 2.24 to 2.28 they have switched
# configuration keys and signal types .
# luckily we can determine key by signal type
if member == "SessionIdleChanged" :
delay_key = "/apps/gnome-screensaver/idle_delay"
else :
delay_key = "/desktop/gnome/session/idle_delay"
client = gconf . Client . get_default ( )
self . timeout_minutes = client . get_int ( delay_key )
else :
self . screen_locked = False
self . idle_from = None
if member == "ActiveChanged" : # ActiveChanged comes before SessionIdleChanged signal
# as a workaround for pre 2.26 , we will wait a second - maybe
# SessionIdleChanged signal kicks in
def dispatch_active_changed ( idle_state ) :
if not self . idle_was_there :
self . emit ( 'idle-changed' , idle_state )
self . idle_was_there = False
gobject . timeout_add_seconds ( 1 , dispatch_active_changed , idle_state )
else : # dispatch idle status change to interested parties
self . idle_was_there = True
self . emit ( 'idle-changed' , idle_state )
elif member == "Lock" : # in case of lock , lock signal will be sent first , followed by
# ActiveChanged and SessionIdle signals
logger . debug ( "Screen Lock Requested" )
self . screen_locked = True
return |
def ReadCodonAlignment ( fastafile , checknewickvalid ) :
"""Reads codon alignment from file .
* fastafile * is the name of an existing FASTA file .
* checknewickvalid * : if * True * , we require that names are unique and do
* * not * * contain spaces , commas , colons , semicolons , parentheses , square
brackets , or single or double quotation marks .
If any of these disallowed characters are present , raises an Exception .
Reads the alignment from the * fastafile * and returns the aligned
sequences as a list of 2 - tuple of strings * ( header , sequence ) *
where * sequence * is upper case .
If the terminal codon is a stop codon for * * all * * sequences , then
this terminal codon is trimmed . Raises an exception if the sequences
are not aligned codon sequences that are free of stop codons ( with
the exception of a shared terminal stop ) and free of ambiguous nucleotides .
Read aligned sequences in this example :
> > > seqs = [ ( ' seq1 ' , ' ATGGAA ' ) , ( ' seq2 ' , ' ATGAAA ' ) ]
> > > f = io . StringIO ( )
> > > n = f . write ( u ' \\ n ' . join ( [ ' > { 0} \\ n { 1 } ' . format ( * tup ) for tup in seqs ] ) )
> > > n = f . seek ( 0)
> > > a = ReadCodonAlignment ( f , True )
> > > seqs = = a
True
Trim stop codons from all sequences in this example :
> > > seqs = [ ( ' seq1 ' , ' ATGTAA ' ) , ( ' seq2 ' , ' ATGTGA ' ) ]
> > > f = io . StringIO ( )
> > > n = f . write ( u ' \\ n ' . join ( [ ' > { 0} \\ n { 1 } ' . format ( * tup ) for tup in seqs ] ) )
> > > n = f . seek ( 0)
> > > a = ReadCodonAlignment ( f , True )
> > > [ ( head , seq [ : - 3 ] ) for ( head , seq ) in seqs ] = = a
True
Read sequences with gap :
> > > seqs = [ ( ' seq1 ' , ' ATG - - - ' ) , ( ' seq2 ' , ' ATGAGA ' ) ]
> > > f = io . StringIO ( )
> > > n = f . write ( u ' \\ n ' . join ( [ ' > { 0} \\ n { 1 } ' . format ( * tup ) for tup in seqs ] ) )
> > > n = f . seek ( 0)
> > > a = ReadCodonAlignment ( f , True )
> > > [ ( head , seq ) for ( head , seq ) in seqs ] = = a
True
Premature stop codon gives error :
> > > seqs = [ ( ' seq1 ' , ' TGAATG ' ) , ( ' seq2 ' , ' ATGAGA ' ) ]
> > > f = io . StringIO ( )
> > > n = f . write ( u ' \\ n ' . join ( [ ' > { 0} \\ n { 1 } ' . format ( * tup ) for tup in seqs ] ) )
> > > n = f . seek ( 0)
> > > a = ReadCodonAlignment ( f , True ) # doctest : + IGNORE _ EXCEPTION _ DETAIL
Traceback ( most recent call last ) :
ValueError :""" | codonmatch = re . compile ( '^[ATCG]{3}$' )
gapmatch = re . compile ( '^-+^' )
seqs = [ ( seq . description . strip ( ) , str ( seq . seq ) . upper ( ) ) for seq in Bio . SeqIO . parse ( fastafile , 'fasta' ) ]
assert seqs , "{0} failed to specify any sequences" . format ( fastafile )
seqlen = len ( seqs [ 0 ] [ 1 ] )
if not all ( [ len ( seq ) == seqlen for ( head , seq ) in seqs ] ) :
raise ValueError ( ( "All sequences in {0} are not of the same length; " "they must not be properly aligned" ) . format ( fastafile ) )
if ( seqlen < 3 ) or ( seqlen % 3 != 0 ) :
raise ValueError ( ( "The length of the sequences in {0} is {1} which " "is not divisible by 3; they are not valid codon sequences" ) . format ( fastafile , seqlen ) )
terminalcodon = [ ]
codons_by_position = dict ( [ ( icodon , [ ] ) for icodon in range ( seqlen // 3 ) ] )
for ( head , seq ) in seqs :
assert len ( seq ) % 3 == 0
for icodon in range ( seqlen // 3 ) :
codon = seq [ 3 * icodon : 3 * icodon + 3 ]
codons_by_position [ icodon ] . append ( codon )
if codonmatch . search ( codon ) :
aa = str ( Bio . Seq . Seq ( codon ) . translate ( ) )
if aa == '*' :
if icodon + 1 != len ( seq ) // 3 :
raise ValueError ( ( "In {0}, sequence {1}, non-terminal " "codon {2} is stop codon: {3}" ) . format ( fastafile , head , icodon + 1 , codon ) )
elif codon == '---' :
aa = '-'
else :
raise ValueError ( ( "In {0}, sequence {1}, codon {2} is invalid: " "{3}" ) . format ( fastafile , head , icodon + 1 , codon ) )
terminalcodon . append ( aa )
for ( icodon , codonlist ) in codons_by_position . items ( ) :
if all ( [ codon == '---' for codon in codonlist ] ) :
raise ValueError ( ( "In {0}, all codons are gaps at position {1}" ) . format ( fastafile , icodon + 1 ) )
if all ( [ aa in [ '*' , '-' ] for aa in terminalcodon ] ) :
if len ( seq ) == 3 :
raise ValueError ( ( "The only codon is a terminal stop codon for " "the sequences in {0}" ) . format ( fastafile ) )
seqs = [ ( head , seq [ : - 3 ] ) for ( head , seq ) in seqs ]
elif any ( [ aa == '*' for aa in terminalcodon ] ) :
raise ValueError ( ( "Only some sequences in {0} have a terminal stop " "codon. All or none must have terminal stop." ) . format ( fastafile ) )
if any ( [ gapmatch . search ( seq ) for ( head , seq ) in seqs ] ) :
raise ValueError ( ( "In {0}, at least one sequence is entirely composed " "of gaps." ) . format ( fastafile ) )
if checknewickvalid :
if len ( set ( [ head for ( head , seq ) in seqs ] ) ) != len ( seqs ) :
raise ValueError ( "Headers in {0} not all unique" . format ( fastafile ) )
disallowedheader = re . compile ( '[\s\:\;\(\)\[\]\,\'\"]' )
for ( head , seq ) in seqs :
if disallowedheader . search ( head ) :
raise ValueError ( ( "Invalid character in header in {0}:" "\n{2}" ) . format ( fastafile , head ) )
return seqs |
def get_multiple ( self , fields = list ( ) , limit = None , order_by = list ( ) , offset = None ) :
"""Wrapper method that takes whatever was returned by the _ all _ inner ( ) generators and chains it in one result
The response can be sorted by passing a list of fields to order _ by .
Example :
get _ multiple ( order _ by = [ ' category ' , ' - created _ on ' ] ) would sort the category field in ascending order ,
with a secondary sort by created _ on in descending order .
: param fields : List of fields to return in the result
: param limit : Limits the number of records returned
: param order _ by : Sort response based on certain fields
: param offset : A number of records to skip before returning records ( for pagination )
: return :
- Iterable chain object""" | return itertools . chain . from_iterable ( self . _all_inner ( fields , limit , order_by , offset ) ) |
def _choose_width_fn ( has_invisible , enable_widechars , is_multiline ) :
"""Return a function to calculate visible cell width .""" | if has_invisible :
line_width_fn = _visible_width
elif enable_widechars : # optional wide - character support if available
line_width_fn = wcwidth . wcswidth
else :
line_width_fn = len
if is_multiline :
def width_fn ( s ) :
return _multiline_width ( s , line_width_fn )
else :
width_fn = line_width_fn
return width_fn |
def build_index ( self , filename , indexfile ) :
"""Recipe from Brad Chapman ' s blog
< http : / / bcbio . wordpress . com / 2009/07/26 / sorting - genomic - alignments - using - python / >""" | indexes = interval_index_file . Indexes ( )
in_handle = open ( filename )
reader = maf . Reader ( in_handle )
while True :
pos = reader . file . tell ( )
rec = next ( reader )
if rec is None :
break
for c in rec . components :
indexes . add ( c . src , c . forward_strand_start , c . forward_strand_end , pos , max = c . src_size )
index_handle = open ( indexfile , "w" )
indexes . write ( index_handle )
index_handle . close ( ) |
def check ( self ) :
"""Checks values""" | status = True
g = get_root ( self ) . globals
if self . mag . ok ( ) :
self . mag . config ( bg = g . COL [ 'main' ] )
else :
self . mag . config ( bg = g . COL [ 'warn' ] )
status = False
if self . airmass . ok ( ) :
self . airmass . config ( bg = g . COL [ 'main' ] )
else :
self . airmass . config ( bg = g . COL [ 'warn' ] )
status = False
if self . seeing . ok ( ) :
self . seeing . config ( bg = g . COL [ 'main' ] )
else :
self . seeing . config ( bg = g . COL [ 'warn' ] )
status = False
return status |
def mark_job_as_failed ( self , job_id , exception , traceback ) :
"""Mark the job as failed , and record the traceback and exception .
Args :
job _ id : The job _ id of the job that failed .
exception : The exception object thrown by the job .
traceback : The traceback , if any . Note ( aron ) : Not implemented yet . We need to find a way
for the conncurrent . futures workers to throw back the error to us .
Returns : None""" | session = self . sessionmaker ( )
job , orm_job = self . _update_job_state ( job_id , State . FAILED , session = session )
# Note ( aron ) : looks like SQLAlchemy doesn ' t automatically
# save any pickletype fields even if we re - set ( orm _ job . obj = job ) that
# field . My hunch is that it ' s tracking the id of the object ,
# and if that doesn ' t change , then SQLAlchemy doesn ' t repickle the object
# and save to the DB .
# Our hack here is to just copy the job object , and then set thespecific
# field we want to edit , in this case the job . state . That forces
# SQLAlchemy to re - pickle the object , thus setting it to the correct state .
job = copy ( job )
job . exception = exception
job . traceback = traceback
orm_job . obj = job
session . add ( orm_job )
session . commit ( )
session . close ( ) |
def c32checkEncode ( version , data ) :
"""> > > c32checkEncode ( 22 , ' a46ff88886c2ef9762d970b4d2c63678835bd39d ' )
' P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7'
> > > c32checkEncode ( 0 , ' a46ff88886c2ef9762d970b4d2c63678835bd39d ' )
'02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE '
> > > c32checkEncode ( 31 , ' a46ff88886c2ef9762d970b4d2c63678835bd39d ' )
' Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR '
> > > c32checkEncode ( 11 , ' a46ff88886c2ef9762d970b4d2c63678835bd39d ' )
' B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV '
> > > c32checkEncode ( 17 , ' a46ff88886c2ef9762d970b4d2c63678835bd39d ' )
' H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG '
> > > c32checkEncode ( 2 , ' a46ff88886c2ef9762d970b4d2c63678835bd39d ' )
'22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9'
> > > c32checkEncode ( 22 , ' ' )
' P37JJX3D '
> > > c32checkEncode ( 22 , ' 00000 ' )
' P000002Q6VF78'
> > > c32checkEncode ( 22 , ' 000001 ' )
' P000005JA84HQ '
> > > c32checkEncode ( 22 , ' 1000001 ' )
' P8000004R0CMNV '
> > > c32checkEncode ( 22 , ' 100000 ' )
' P80000033H8YKK '
> > > c32checkEncode ( 0 , ' 1 ' )
'04C407K6'
> > > c32checkEncode ( 0 , ' 22 ' )
'049Q1W6AP '
> > > c32checkEncode ( 0 , ' 001 ' )
'006NZP224'
> > > c32checkEncode ( 31 , ' 00001 ' )
' Z004720442'
> > > c32checkEncode ( 31 , ' 000001 ' )
' Z004720442'
> > > c32checkEncode ( 31 , ' 000001 ' )
' Z00073C2AR7'
> > > c32checkEncode ( 11 , ' 10 ' )
' B20QX4FW0'
> > > c32checkEncode ( 11 , ' 100 ' )
' B102PC6RCC '
> > > c32checkEncode ( 11 , ' 1000 ' )
' BG02G1QXCQ '
> > > c32checkEncode ( 17 , ' 100000 ' )
' H40003YJA8JD '
> > > c32checkEncode ( 17 , ' 100000 ' )
' H200001ZTRYYH '
> > > c32checkEncode ( 17 , ' 100000 ' )
' H1000002QFX7E6'
> > > c32checkEncode ( 2 , ' 100000 ' )
'2G000003FNKA3P '""" | if version < 0 or version >= len ( C32 ) :
raise ValueError ( 'Invalid version -- must be between 0 and {}' . format ( len ( C32 ) - 1 ) )
if not re . match ( r'^[0-9a-fA-F]*$' , data ) :
raise ValueError ( 'Invalid data -- must be hex' )
data = data . lower ( )
if len ( data ) % 2 != 0 :
data = '0{}' . format ( data )
version_hex = '{:02x}' . format ( version )
checksum_hex = c32checksum ( '{}{}' . format ( version_hex , data ) )
c32str = c32encode ( '{}{}' . format ( data , checksum_hex ) )
return '{}{}' . format ( C32 [ version ] , c32str ) |
def decode_to_shape ( inputs , shape , scope ) :
"""Encode the given tensor to given image shape .""" | with tf . variable_scope ( scope , reuse = tf . AUTO_REUSE ) :
x = inputs
x = tfl . flatten ( x )
x = tfl . dense ( x , shape [ 2 ] , activation = None , name = "dec_dense" )
x = tf . expand_dims ( x , axis = 1 )
return x |
def runExperiment ( ) :
"""Experiment 1 : Calculate error rate as a function of training sequence numbers
: return :""" | trainSeqN = [ 5 , 10 , 20 , 50 , 100 , 200 ]
rptPerCondition = 5
correctRateAll = np . zeros ( ( len ( trainSeqN ) , rptPerCondition ) )
missRateAll = np . zeros ( ( len ( trainSeqN ) , rptPerCondition ) )
fpRateAll = np . zeros ( ( len ( trainSeqN ) , rptPerCondition ) )
for i in xrange ( len ( trainSeqN ) ) :
for rpt in xrange ( rptPerCondition ) :
train_seed = 1
numTrainSequence = trainSeqN [ i ]
net = initializeLSTMnet ( )
net = trainLSTMnet ( net , numTrainSequence , seedSeq = train_seed )
( correctRate , missRate , fpRate ) = testLSTMnet ( net , numTestSequence , seedSeq = train_seed + rpt )
correctRateAll [ i , rpt ] = correctRate
missRateAll [ i , rpt ] = missRate
fpRateAll [ i , rpt ] = fpRate
np . savez ( 'result/reberSequenceLSTM.npz' , correctRateAll = correctRateAll , missRateAll = missRateAll , fpRateAll = fpRateAll , trainSeqN = trainSeqN )
plt . figure ( )
plt . subplot ( 2 , 2 , 1 )
plt . semilogx ( trainSeqN , 100 * np . mean ( correctRateAll , 1 ) , '-*' )
plt . xlabel ( ' Training Sequence Number' )
plt . ylabel ( ' Hit Rate - Best Match (%)' )
plt . subplot ( 2 , 2 , 2 )
plt . semilogx ( trainSeqN , 100 * np . mean ( missRateAll , 1 ) , '-*' )
plt . xlabel ( ' Training Sequence Number' )
plt . ylabel ( ' Miss Rate (%)' )
plt . subplot ( 2 , 2 , 3 )
plt . semilogx ( trainSeqN , 100 * np . mean ( fpRateAll , 1 ) , '-*' )
plt . xlabel ( ' Training Sequence Number' )
plt . ylabel ( ' False Positive Rate (%)' )
plt . savefig ( 'result/ReberSequence_LSTMperformance.pdf' ) |
def encode_as_simple ( name , value ) :
"""Creates an etree element following the simple field convention . Values
are assumed to be strs , unicodes , ints , floats , or Decimals :
> > > element = encode _ as _ simple ( ' foo ' , ' 5 ' )
> > > element . tag = = ' foo '
True
> > > element . text = = ' 5'
True
> > > element = encode _ as _ simple ( ' bar ' , 8)
> > > element . tag = = ' bar '
True
> > > element . text = = ' 8'
True""" | if isinstance ( value , objectify . ObjectifiedDataElement ) :
return encode_as_simple ( name , unicode ( value ) )
if type ( value ) in _stringable_types :
value = str ( value )
return elements . field ( name , value ) |
def agg_single_func ( values , agg_field , agg_func , group_by = None ) :
"""Aggregates single function
: param values : list of objects ( dict )
: param agg _ field : target field to calculate aggregate
: param agg _ func : aggregate function
: param group _ by : field used to determine group
: return : aggregated value""" | if len ( values ) == 0 :
return None
else :
if group_by :
group_aggs = pd . DataFrame ( values ) . groupby ( group_by ) . agg ( { agg_field : [ agg_func ] } )
res = { }
for row in group_aggs . itertuples ( ) :
res . update ( { row [ 0 ] : row [ 1 ] } )
return res
else :
return pd . DataFrame ( values ) . agg ( { agg_field : [ agg_func ] } ) [ agg_field ] [ agg_func ] |
def dry_run ( func ) :
"""Dry run : simulate sql execution .""" | @ wraps ( func )
def inner ( dry_run , * args , ** kwargs ) :
ret = func ( dry_run = dry_run , * args , ** kwargs )
if not dry_run :
db . session . commit ( )
else :
db . session . rollback ( )
return ret
return inner |
def bucket ( things , key ) :
"""Return a map of key - > list of things .""" | ret = defaultdict ( list )
for thing in things :
ret [ key ( thing ) ] . append ( thing )
return ret |
def outgoing_connections ( self ) :
"""Returns a list of all outgoing connections for this peer .""" | # Outgoing connections are on the right
return list ( dropwhile ( lambda c : c . direction != OUTGOING , self . connections ) ) |
def from_json ( cls , data , result = None ) :
"""Create new Node element from JSON data
: param data : Element data from JSON
: type data : Dict
: param result : The result this element belongs to
: type result : overpy . Result
: return : New instance of Node
: rtype : overpy . Node
: raises overpy . exception . ElementDataWrongType : If type value of the passed JSON data does not match .""" | if data . get ( "type" ) != cls . _type_value :
raise exception . ElementDataWrongType ( type_expected = cls . _type_value , type_provided = data . get ( "type" ) )
tags = data . get ( "tags" , { } )
node_id = data . get ( "id" )
lat = data . get ( "lat" )
lon = data . get ( "lon" )
attributes = { }
ignore = [ "type" , "id" , "lat" , "lon" , "tags" ]
for n , v in data . items ( ) :
if n in ignore :
continue
attributes [ n ] = v
return cls ( node_id = node_id , lat = lat , lon = lon , tags = tags , attributes = attributes , result = result ) |
def _get_file_list ( orig_files , out_file , regions , ref_file , config ) :
"""Create file with region sorted list of non - empty VCFs for concatenating .""" | sorted_files = _sort_by_region ( orig_files , regions , ref_file , config )
exist_files = [ ( c , x ) for c , x in sorted_files if os . path . exists ( x ) and vcf_has_variants ( x ) ]
if len ( exist_files ) == 0 : # no non - empty inputs , merge the empty ones
exist_files = [ x for c , x in sorted_files if os . path . exists ( x ) ]
elif len ( exist_files ) > 1 :
exist_files = _fix_gatk_header ( exist_files , out_file , config )
else :
exist_files = [ x for c , x in exist_files ]
ready_files = run_multicore ( p_bgzip_and_index , [ [ x , config ] for x in exist_files ] , config )
input_file_list = "%s-files.list" % utils . splitext_plus ( out_file ) [ 0 ]
with open ( input_file_list , "w" ) as out_handle :
for fname in ready_files :
out_handle . write ( fname + "\n" )
return input_file_list |
def default_downloader ( directory , urls , filenames , url_prefix = None , clear = False ) :
"""Downloads or clears files from URLs and filenames .
Parameters
directory : str
The directory in which downloaded files are saved .
urls : list
A list of URLs to download .
filenames : list
A list of file names for the corresponding URLs .
url _ prefix : str , optional
If provided , this is prepended to filenames that
lack a corresponding URL .
clear : bool , optional
If ` True ` , delete the given filenames from the given
directory rather than download them .""" | # Parse file names from URL if not provided
for i , url in enumerate ( urls ) :
filename = filenames [ i ]
if not filename :
filename = filename_from_url ( url )
if not filename :
raise ValueError ( "no filename available for URL '{}'" . format ( url ) )
filenames [ i ] = filename
files = [ os . path . join ( directory , f ) for f in filenames ]
if clear :
for f in files :
if os . path . isfile ( f ) :
os . remove ( f )
else :
print ( 'Downloading ' + ', ' . join ( filenames ) + '\n' )
ensure_directory_exists ( directory )
for url , f , n in zip ( urls , files , filenames ) :
if not url :
if url_prefix is None :
raise NeedURLPrefix
url = url_prefix + n
with open ( f , 'wb' ) as file_handle :
download ( url , file_handle ) |
def launch_minecraft ( port , installdir = "MalmoPlatform" , replaceable = False ) :
"""Launch Minecraft listening for malmoenv connections .
Args :
port : the TCP port to listen on .
installdir : the install dir name . Defaults to MalmoPlatform .
Must be same as given ( or defaulted ) in download call if used .
replaceable : whether or not to automatically restart Minecraft ( default is false ) .""" | launch_script = './launchClient.sh'
if os . name == 'nt' :
launch_script = 'launchClient.bat'
cwd = os . getcwd ( )
os . chdir ( installdir )
os . chdir ( "Minecraft" )
try :
cmd = [ launch_script , '-port' , str ( port ) , '-env' ]
if replaceable :
cmd . append ( '-replaceable' )
subprocess . check_call ( cmd )
finally :
os . chdir ( cwd ) |
def initialize_library ( self , library , lib_type = VERSION_STORE , ** kwargs ) :
"""Create an Arctic Library or a particular type .
Parameters
library : ` str `
The name of the library . e . g . ' library ' or ' user . library '
lib _ type : ` str `
The type of the library . e . g . arctic . VERSION _ STORE or arctic . TICK _ STORE
Or any type registered with register _ library _ type
Default : arctic . VERSION _ STORE
kwargs :
Arguments passed to the Library type for initialization .""" | lib = ArcticLibraryBinding ( self , library )
# check that we don ' t create too many namespaces
# can be disabled check _ library _ count = False
check_library_count = kwargs . pop ( 'check_library_count' , True )
if len ( self . _conn [ lib . database_name ] . list_collection_names ( ) ) > 5000 and check_library_count :
raise ArcticException ( "Too many namespaces %s, not creating: %s" % ( len ( self . _conn [ lib . database_name ] . list_collection_names ( ) ) , library ) )
lib . set_library_type ( lib_type )
LIBRARY_TYPES [ lib_type ] . initialize_library ( lib , ** kwargs )
# Add a 10G quota just in case the user is calling this with API .
if not lib . get_quota ( ) :
lib . set_quota ( 10 * 1024 * 1024 * 1024 )
self . _cache . append ( 'list_libraries' , self . _sanitize_lib_name ( library ) ) |
def scan ( self ) :
"""Start a thread for each registered scan function to scan proxy lists""" | self . logger . info ( '{0} registered scan functions, starting {0} threads ' 'to scan candidate proxy lists...' . format ( len ( self . scan_funcs ) ) )
for i in range ( len ( self . scan_funcs ) ) :
t = threading . Thread ( name = self . scan_funcs [ i ] . __name__ , target = self . scan_funcs [ i ] , kwargs = self . scan_kwargs [ i ] )
t . daemon = True
self . scan_threads . append ( t )
t . start ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.