signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_parent_gradebooks ( self , gradebook_id ) :
"""Gets the parents of the given gradebook .
arg : gradebook _ id ( osid . id . Id ) : the ` ` Id ` ` of a gradebook
return : ( osid . grading . GradebookList ) - the parents of the
gradebook
raise : NotFound - ` ` gradebook _ id ` ` is not found
raise : NullArgument - ` ` gradebook _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . BinHierarchySession . get _ parent _ bins
if self . _catalog_session is not None :
return self . _catalog_session . get_parent_catalogs ( catalog_id = gradebook_id )
return GradebookLookupSession ( self . _proxy , self . _runtime ) . get_gradebooks_by_ids ( list ( self . get_parent_gradebook_ids ( gradebook_id ) ) ) |
def instance_for_arguments ( self , arguments : { Prior : float } ) :
"""Create an instance of the associated class for a set of arguments
Parameters
arguments : { Prior : float }
Dictionary mapping _ matrix priors to attribute analysis _ path and value pairs
Returns
An instance of the class""" | for prior , value in arguments . items ( ) :
prior . assert_within_limits ( value )
model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples }
constant_arguments = { t . name : t . constant . value for t in self . direct_constant_tuples }
for tuple_prior in self . tuple_prior_tuples :
model_arguments [ tuple_prior . name ] = tuple_prior . prior . value_for_arguments ( arguments )
for prior_model_tuple in self . direct_prior_model_tuples :
model_arguments [ prior_model_tuple . name ] = prior_model_tuple . prior_model . instance_for_arguments ( arguments )
return self . cls ( ** { ** model_arguments , ** constant_arguments } ) |
def process_soundcloud ( vargs ) :
"""Main SoundCloud path .""" | artist_url = vargs [ 'artist_url' ]
track_permalink = vargs [ 'track' ]
keep_previews = vargs [ 'keep' ]
folders = vargs [ 'folders' ]
id3_extras = { }
one_track = False
likes = False
client = get_client ( )
if 'soundcloud' not in artist_url . lower ( ) :
if vargs [ 'group' ] :
artist_url = 'https://soundcloud.com/groups/' + artist_url . lower ( )
elif len ( track_permalink ) > 0 :
one_track = True
track_url = 'https://soundcloud.com/' + artist_url . lower ( ) + '/' + track_permalink . lower ( )
else :
artist_url = 'https://soundcloud.com/' + artist_url . lower ( )
if vargs [ 'likes' ] or 'likes' in artist_url . lower ( ) :
likes = True
if 'likes' in artist_url . lower ( ) :
artist_url = artist_url [ 0 : artist_url . find ( '/likes' ) ]
likes = True
if one_track :
num_tracks = 1
else :
num_tracks = vargs [ 'num_tracks' ]
try :
if one_track :
resolved = client . get ( '/resolve' , url = track_url , limit = 200 )
elif likes :
userId = str ( client . get ( '/resolve' , url = artist_url ) . id )
resolved = client . get ( '/users/' + userId + '/favorites' , limit = 200 , linked_partitioning = 1 )
next_href = False
if ( hasattr ( resolved , 'next_href' ) ) :
next_href = resolved . next_href
while ( next_href ) :
resolved2 = requests . get ( next_href ) . json ( )
if ( 'next_href' in resolved2 ) :
next_href = resolved2 [ 'next_href' ]
else :
next_href = False
resolved2 = soundcloud . resource . ResourceList ( resolved2 [ 'collection' ] )
resolved . collection . extend ( resolved2 )
resolved = resolved . collection
else :
resolved = client . get ( '/resolve' , url = artist_url , limit = 200 )
except Exception as e : # HTTPError ?
# SoundScrape is trying to prevent us from downloading this .
# We ' re going to have to stop trusting the API / client and
# do all our own scraping . Boo .
if '404 Client Error' in str ( e ) :
puts ( colored . red ( "Problem downloading [404]: " ) + colored . white ( "Item Not Found" ) )
return None
message = str ( e )
item_id = message . rsplit ( '/' , 1 ) [ - 1 ] . split ( '.json' ) [ 0 ] . split ( '?client_id' ) [ 0 ]
hard_track_url = get_hard_track_url ( item_id )
track_data = get_soundcloud_data ( artist_url )
puts_safe ( colored . green ( "Scraping" ) + colored . white ( ": " + track_data [ 'title' ] ) )
filenames = [ ]
filename = sanitize_filename ( track_data [ 'artist' ] + ' - ' + track_data [ 'title' ] + '.mp3' )
if folders :
name_path = join ( vargs [ 'path' ] , track_data [ 'artist' ] )
if not exists ( name_path ) :
mkdir ( name_path )
filename = join ( name_path , filename )
else :
filename = join ( vargs [ 'path' ] , filename )
if exists ( filename ) :
puts_safe ( colored . yellow ( "Track already downloaded: " ) + colored . white ( track_data [ 'title' ] ) )
return None
filename = download_file ( hard_track_url , filename )
tagged = tag_file ( filename , artist = track_data [ 'artist' ] , title = track_data [ 'title' ] , year = '2018' , genre = '' , album = '' , artwork_url = '' )
if not tagged :
wav_filename = filename [ : - 3 ] + 'wav'
os . rename ( filename , wav_filename )
filename = wav_filename
filenames . append ( filename )
else :
aggressive = False
# This is is likely a ' likes ' page .
if not hasattr ( resolved , 'kind' ) :
tracks = resolved
else :
if resolved . kind == 'artist' :
artist = resolved
artist_id = str ( artist . id )
tracks = client . get ( '/users/' + artist_id + '/tracks' , limit = 200 )
elif resolved . kind == 'playlist' :
id3_extras [ 'album' ] = resolved . title
if resolved . tracks != [ ] :
tracks = resolved . tracks
else :
tracks = get_soundcloud_api_playlist_data ( resolved . id ) [ 'tracks' ]
tracks = tracks [ : num_tracks ]
aggressive = True
for track in tracks :
download_track ( track , resolved . title , keep_previews , folders , custom_path = vargs [ 'path' ] )
elif resolved . kind == 'track' :
tracks = [ resolved ]
elif resolved . kind == 'group' :
group = resolved
group_id = str ( group . id )
tracks = client . get ( '/groups/' + group_id + '/tracks' , limit = 200 )
else :
artist = resolved
artist_id = str ( artist . id )
tracks = client . get ( '/users/' + artist_id + '/tracks' , limit = 200 )
if tracks == [ ] and artist . track_count > 0 :
aggressive = True
filenames = [ ]
# this might be buggy
data = get_soundcloud_api2_data ( artist_id )
for track in data [ 'collection' ] :
if len ( filenames ) >= num_tracks :
break
if track [ 'type' ] == 'playlist' :
track [ 'playlist' ] [ 'tracks' ] = track [ 'playlist' ] [ 'tracks' ] [ : num_tracks ]
for playlist_track in track [ 'playlist' ] [ 'tracks' ] :
album_name = track [ 'playlist' ] [ 'title' ]
filename = download_track ( playlist_track , album_name , keep_previews , folders , filenames , custom_path = vargs [ 'path' ] )
if filename :
filenames . append ( filename )
else :
d_track = track [ 'track' ]
filename = download_track ( d_track , custom_path = vargs [ 'path' ] )
if filename :
filenames . append ( filename )
if not aggressive :
filenames = download_tracks ( client , tracks , num_tracks , vargs [ 'downloadable' ] , vargs [ 'folders' ] , vargs [ 'path' ] , id3_extras = id3_extras )
if vargs [ 'open' ] :
open_files ( filenames ) |
def tabSeparatedSummary ( self , sortOn = None ) :
"""Summarize all the alignments for this title as multi - line string with
TAB - separated values on each line .
@ param sortOn : A C { str } attribute to sort titles on . One of ' length ' ,
' maxScore ' , ' medianScore ' , ' readCount ' , or ' title ' .
@ raise ValueError : If an unknown C { sortOn } value is given .
@ return : A newline - separated C { str } , each line with a summary of a
title . Each summary line is TAB - separated .""" | # The order of the fields returned here is somewhat arbitrary . The
# subject titles are last because they are so variable in length .
# Putting them last makes it more likely that the initial columns in
# printed output will be easier to read down .
# Note that post - processing scripts will be relying on the field
# ordering here . So you can ' t just add fields . It ' s probably safe
# to add them at the end , but be careful / think .
# A TAB - separated file can easily be read by awk using e . g . ,
# awk ' BEGIN { FS = " \ t " } . . . '
result = [ ]
for titleSummary in self . summary ( sortOn ) :
result . append ( '\t' . join ( [ '%(coverage)f' , '%(medianScore)f' , '%(bestScore)f' , '%(readCount)d' , '%(hspCount)d' , '%(subjectLength)d' , '%(subjectTitle)s' , ] ) % titleSummary )
return '\n' . join ( result ) |
def fit ( self , X , y = None ) :
"""Fit a transformation from the pipeline
: param X ( DataSet ) : the data to fit""" | for columns , transformer in self . mapping :
if transformer is not None :
transformer . fit ( self . _get_columns ( X , columns ) )
return self |
def _geocode ( self , pq ) :
""": arg PlaceQuery pq : PlaceQuery object to use for geocoding
: returns : list of location Candidates""" | # : List of desired output fields
# : See ` ESRI docs < https : / / developers . arcgis . com / rest / geocode / api - reference / geocoding - geocode - addresses . htm > _ ` for details
outFields = ( 'Loc_name' , # ' Shape ' ,
'Score' , 'Match_addr' , # based on address standards for the country
# ' Address ' , # returned by default
# ' Country ' # 3 - digit ISO 3166-1 code for a country . Example : Canada = " CAN "
# ' Admin ' ,
# ' DepAdmin ' ,
# ' SubAdmin ' ,
# ' Locality ' ,
# ' Postal ' ,
# ' PostalExt ' ,
'Addr_type' , # ' Type ' ,
# ' Rank ' ,
'AddNum' , 'StPreDir' , 'StPreType' , 'StName' , 'StType' , 'StDir' , # ' Side ' ,
# ' AddNumFrom ' ,
# ' AddNumTo ' ,
# ' AddBldg ' ,
'City' , 'Subregion' , 'Region' , 'Postal' , 'Country' , # ' Ymax ' ,
# ' Ymin ' ,
# ' Xmin ' ,
# ' Xmax ' ,
'DisplayX' , 'DisplayY' , # ' LangCode ' ,
# ' Status ' ,
)
outFields = ',' . join ( outFields )
query = dict ( f = 'json' , # default HTML . Other options are JSON and KMZ .
outFields = outFields , # outSR = WKID , defaults to 4326
maxLocations = 20 , # default 1 ; max is 20
)
# Postal - code only searches work in the single - line but not multipart geocoder
# Remember that with the default postprocessors , postcode - level results will be eliminated
if pq . query == pq . address == '' and pq . postal != '' :
pq . query = pq . postal
if pq . query == '' : # multipart
query = dict ( query , Address = pq . address , # commonly represents the house number and street name of a complete address
Neighborhood = pq . neighborhood , City = pq . city , Subregion = pq . subregion , Region = pq . state , Postal = pq . postal , # PostalExt =
CountryCode = pq . country , # full country name or ISO 3166-1 2 - or 3 - digit country code
)
else : # single - line
magic_key = pq . key if hasattr ( pq , 'key' ) else ''
query = dict ( query , singleLine = pq . query , # This can be a street address , place name , postal code , or POI .
sourceCountry = pq . country , # full country name or ISO 3166-1 2 - or 3 - digit country code
)
if magic_key :
query [ 'magicKey' ] = magic_key
# This is a lookup key returned from the suggest endpoint .
if pq . bounded and pq . viewbox is not None :
query = dict ( query , searchExtent = pq . viewbox . to_esri_wgs_json ( ) )
if self . _authenticated :
if self . _token is None or self . _token_expiration < datetime . utcnow ( ) :
expiration = timedelta ( hours = 2 )
self . _token = self . get_token ( expiration )
self . _token_expiration = datetime . utcnow ( ) + expiration
query [ 'token' ] = self . _token
if getattr ( pq , 'for_storage' , False ) :
query [ 'forStorage' ] = 'true'
endpoint = self . _endpoint + '/findAddressCandidates'
response_obj = self . _get_json_obj ( endpoint , query )
returned_candidates = [ ]
# this will be the list returned
try :
locations = response_obj [ 'candidates' ]
for location in locations :
c = Candidate ( )
attributes = location [ 'attributes' ]
c . match_addr = attributes [ 'Match_addr' ]
c . locator = attributes [ 'Loc_name' ]
c . locator_type = attributes [ 'Addr_type' ]
c . score = attributes [ 'Score' ]
c . x = attributes [ 'DisplayX' ]
# represents the actual location of the address .
c . y = attributes [ 'DisplayY' ]
c . wkid = response_obj [ 'spatialReference' ] [ 'wkid' ]
c . geoservice = self . __class__ . __name__
# Optional address component fields .
for in_key , out_key in [ ( 'City' , 'match_city' ) , ( 'Subregion' , 'match_subregion' ) , ( 'Region' , 'match_region' ) , ( 'Postal' , 'match_postal' ) , ( 'Country' , 'match_country' ) ] :
setattr ( c , out_key , attributes . get ( in_key , '' ) )
setattr ( c , 'match_streetaddr' , self . _street_addr_from_response ( attributes ) )
returned_candidates . append ( c )
except KeyError :
pass
return returned_candidates |
def js_reverse_inline ( context ) :
"""Outputs a string of javascript that can generate URLs via the use
of the names given to those URLs .""" | if 'request' in context :
default_urlresolver = get_resolver ( getattr ( context [ 'request' ] , 'urlconf' , None ) )
else :
default_urlresolver = get_resolver ( None )
return mark_safe ( generate_js ( default_urlresolver ) ) |
def stripe_to_db ( self , data ) :
"""Convert the raw value to decimal representation .""" | val = data . get ( self . name )
# Note : 0 is a possible return value , which is ' falseish '
if val is not None :
return val / decimal . Decimal ( "100" ) |
def u2open ( self , u2request ) :
"""Open a connection .
@ param u2request : A urllib2 request .
@ type u2request : urllib2 . Requet .
@ return : The opened file - like urllib2 object .
@ rtype : fp""" | tm = self . options . timeout
url = build_opener ( HTTPSClientAuthHandler ( self . context ) )
if self . u2ver ( ) < 2.6 :
socket . setdefaulttimeout ( tm )
return url . open ( u2request )
else :
return url . open ( u2request , timeout = tm ) |
def save ( self , force = False , uuid = False , ** kwargs ) :
"""REPLACES the object in DB . This is forbidden with objects from find ( ) methods unless force = True is given .""" | if not self . _initialized_with_doc and not force :
raise Exception ( "Cannot save a document not initialized from a Python dict. This might remove fields from the DB!" )
self . _initialized_with_doc = False
if '_id' not in self :
if uuid :
self [ '_id' ] = str ( "%s-%s" % ( self . mongokat_collection . __class__ . __name__ , uuid4 ( ) ) )
return self . mongokat_collection . save ( self , ** kwargs ) |
def write_keys ( self , records_in , clean = True ) :
"""Write the keywords to the header .
parameters
records : FITSHDR or list or dict
Can be one of these :
- FITSHDR object
- list of dictionaries containing ' name ' , ' value ' and optionally
a ' comment ' field ; the order is preserved .
- a dictionary of keyword - value pairs ; no comments are written
in this case , and the order is arbitrary .
clean : boolean
If True , trim out the standard fits header keywords that are
created on HDU creation , such as EXTEND , SIMPLE , STTYPE , TFORM ,
TDIM , XTENSION , BITPIX , NAXIS , etc .
Notes
Input keys named COMMENT and HISTORY are written using the
write _ comment and write _ history methods .""" | if isinstance ( records_in , FITSHDR ) :
hdr = records_in
else :
hdr = FITSHDR ( records_in )
if clean :
is_table = hasattr ( self , '_table_type_str' )
# is _ table = isinstance ( self , TableHDU )
hdr . clean ( is_table = is_table )
for r in hdr . records ( ) :
name = r [ 'name' ] . upper ( )
value = r [ 'value' ]
if name == 'COMMENT' :
self . write_comment ( value )
elif name == 'HISTORY' :
self . write_history ( value )
elif name == 'CONTINUE' :
self . _write_continue ( value )
else :
comment = r . get ( 'comment' , '' )
self . write_key ( name , value , comment = comment ) |
def task ( func ) :
"""Decorator to run the decorated function as a Task""" | def task_wrapper ( * args , ** kwargs ) :
return spawn ( func , * args , ** kwargs )
return task_wrapper |
def add_to_stmts_rules ( stmts , rules ) :
"""Use by plugins to add extra rules to the existing rules for
a statement .""" | def is_rule_less_than ( ra , rb ) :
rka = ra [ 0 ]
rkb = rb [ 0 ]
if not util . is_prefixed ( rkb ) : # old rule is non - prefixed ; append new rule after
return False
if not util . is_prefixed ( rka ) : # old rule prefixed , but new rule is not , insert
return True
# both are prefixed , compare modulename
return rka [ 0 ] < rkb [ 0 ]
for s in stmts :
( arg , rules0 ) = stmt_map [ s ]
for r in rules :
i = 0
while i < len ( rules0 ) :
if is_rule_less_than ( r , rules0 [ i ] ) :
rules0 . insert ( i , r )
break
i += 1
if i == len ( rules0 ) :
rules0 . insert ( i , r ) |
def moments_match_ep ( self , data_i , tau_i , v_i , Y_metadata_i = None ) :
"""Moments match of the marginal approximation in EP algorithm
: param i : number of observation ( int )
: param tau _ i : precision of the cavity distribution ( float )
: param v _ i : mean / variance of the cavity distribution ( float )""" | sigma2_hat = 1. / ( 1. / self . variance + tau_i )
mu_hat = sigma2_hat * ( data_i / self . variance + v_i )
sum_var = self . variance + 1. / tau_i
Z_hat = 1. / np . sqrt ( 2. * np . pi * sum_var ) * np . exp ( - .5 * ( data_i - v_i / tau_i ) ** 2. / sum_var )
return Z_hat , mu_hat , sigma2_hat |
def _build_dependent_model_list ( self , obj_schema ) :
'''Helper function to build the list of models the given object schema is referencing .''' | dep_models_list = [ ]
if obj_schema :
obj_schema [ 'type' ] = obj_schema . get ( 'type' , 'object' )
if obj_schema [ 'type' ] == 'array' :
dep_models_list . extend ( self . _build_dependent_model_list ( obj_schema . get ( 'items' , { } ) ) )
else :
ref = obj_schema . get ( '$ref' )
if ref :
ref_obj_model = ref . split ( "/" ) [ - 1 ]
ref_obj_schema = self . _models ( ) . get ( ref_obj_model )
dep_models_list . extend ( self . _build_dependent_model_list ( ref_obj_schema ) )
dep_models_list . extend ( [ ref_obj_model ] )
else : # need to walk each property object
properties = obj_schema . get ( 'properties' )
if properties :
for _ , prop_obj_schema in six . iteritems ( properties ) :
dep_models_list . extend ( self . _build_dependent_model_list ( prop_obj_schema ) )
return list ( set ( dep_models_list ) ) |
def pivot_table ( self , index , columns , values = 'value' , aggfunc = 'count' , fill_value = None , style = None ) :
"""Returns a pivot table
Parameters
index : str or list of strings
rows for Pivot table
columns : str or list of strings
columns for Pivot table
values : str , default ' value '
dataframe column to aggregate or count
aggfunc : str or function , default ' count '
function used for aggregation ,
accepts ' count ' , ' mean ' , and ' sum '
fill _ value : scalar , default None
value to replace missing values with
style : str , default None
output style for pivot table formatting
accepts ' highlight _ not _ max ' , ' heatmap '""" | index = [ index ] if isstr ( index ) else index
columns = [ columns ] if isstr ( columns ) else columns
df = self . data
# allow ' aggfunc ' to be passed as string for easier user interface
if isstr ( aggfunc ) :
if aggfunc == 'count' :
df = self . data . groupby ( index + columns , as_index = False ) . count ( )
fill_value = 0
elif aggfunc == 'mean' :
df = self . data . groupby ( index + columns , as_index = False ) . mean ( ) . round ( 2 )
aggfunc = np . sum
fill_value = 0 if style == 'heatmap' else ""
elif aggfunc == 'sum' :
aggfunc = np . sum
fill_value = 0 if style == 'heatmap' else ""
df = df . pivot_table ( values = values , index = index , columns = columns , aggfunc = aggfunc , fill_value = fill_value )
return df |
def constants_pyx ( ) :
"""generate CONST = ZMQ _ CONST and _ _ all _ _ for constants . pxi""" | all_lines = [ ]
assign_lines = [ ]
for name in all_names :
if name == "NULL" : # avoid conflict with NULL in Cython
assign_lines . append ( "globals()['NULL'] = ZMQ_NULL" )
else :
assign_lines . append ( '{0} = ZMQ_{0}' . format ( name ) )
all_lines . append ( ' "{0}",' . format ( name ) )
return dict ( ASSIGNMENTS = '\n' . join ( assign_lines ) , ALL = '\n' . join ( all_lines ) ) |
def _parse_broadcast ( self , msg ) :
"""Given a broacast message , returns the message that was broadcast .""" | # get message , remove surrounding quotes , and unescape
return self . _unescape ( self . _get_type ( msg [ self . broadcast_prefix_len : ] ) ) |
def image_summary ( tag , image ) :
"""Outputs a ` Summary ` protocol buffer with image ( s ) .
Parameters
tag : str
A name for the generated summary . Will also serve as a series name in TensorBoard .
image : MXNet ` NDArray ` or ` numpy . ndarray `
Image data that is one of the following layout : ( H , W ) , ( C , H , W ) , ( N , C , H , W ) .
The pixel values of the image are assumed to be normalized in the range [ 0 , 1 ] .
The image will be rescaled to the range [ 0 , 255 ] and cast to ` np . uint8 ` before creating
the image protobuf .
Returns
A ` Summary ` protobuf of the image .""" | tag = _clean_tag ( tag )
image = _prepare_image ( image )
image = _make_image ( image )
return Summary ( value = [ Summary . Value ( tag = tag , image = image ) ] ) |
def abstracts ( self , key , value ) :
"""Populate the ` ` abstracts ` ` key .""" | result = [ ]
source = force_single_element ( value . get ( '9' ) )
for a_value in force_list ( value . get ( 'a' ) ) :
result . append ( { 'source' : source , 'value' : a_value , } )
return result |
def get_artifact_info ( self ) :
"""Returns a tuple composed of a : class : ` pants . java . jar . JarDependency `
describing the jar for this target and a bool indicating if this target is exportable .""" | exported = bool ( self . provides )
org = self . provides . org if exported else 'internal'
name = self . provides . name if exported else self . identifier
# TODO ( John Sirois ) : This should return something less than a JarDependency encapsulating just
# the org and name . Perhaps a JarFamily ?
return JarDependency ( org = org , name = name , rev = None ) , exported |
def next ( self ) :
"""Returns the next ( object , nextPageToken ) pair .""" | if self . _currentObject is None :
raise StopIteration ( )
nextPageToken = None
if self . _nextObject is not None :
start = self . _getStart ( self . _nextObject )
# If start > the search anchor , move the search anchor . Otherwise ,
# increment the distance from the anchor .
if start > self . _searchAnchor :
self . _searchAnchor = start
self . _distanceFromAnchor = 0
else :
self . _distanceFromAnchor += 1
nextPageToken = "{}:{}" . format ( self . _searchAnchor , self . _distanceFromAnchor )
ret = self . _extractProtocolObject ( self . _currentObject ) , nextPageToken
self . _currentObject = self . _nextObject
self . _nextObject = next ( self . _searchIterator , None )
return ret |
def line_spacing ( self ) :
"""The spacing between baselines of successive lines in this paragraph .
A float value indicates a number of lines . A | Length | value indicates
a fixed spacing . Value is contained in ` . / a : lnSpc / a : spcPts / @ val ` or
` . / a : lnSpc / a : spcPct / @ val ` . Value is | None | if no element is present .""" | lnSpc = self . lnSpc
if lnSpc is None :
return None
if lnSpc . spcPts is not None :
return lnSpc . spcPts . val
return lnSpc . spcPct . val |
def compute_equiv_class ( atom ) :
"""( atom ) - > Computes a unique integer for an atom""" | try :
equiv_class = atom . number + 1000 * ( atom . charge + 10 ) + 100000 * ( atom . hcount ) + 1000000 * ( atom . weight )
except TypeError :
raise ValueError , "Can't compute number from atom.number %s atom.charge %s atom.hcount %s" " atom.weight %s" % ( atom . number , atom . charge , atom . hcount , atom . weight )
return equiv_class |
def walk ( textRoot , currentTag , level , prefix = None , postfix = None , unwrapUntilPara = False ) :
'''. . note : :
This method does not cover all possible input doxygen types ! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing , the
* * raw xml will appear on the file page being documented * * . This traverser is
greedily designed to work for what testing revealed as the * bare minimum *
required . * * Please * * see the : ref : ` Doxygen ALIASES < doxygen _ aliases > ` section
for how to bypass invalid documentation coming form Exhale .
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format . * * Not all doxygen markup
types are handled * * . The current supported doxygen xml markup tags are :
- ` ` para ` `
- ` ` orderedlist ` `
- ` ` itemizedlist ` `
- ` ` verbatim ` ` ( specifically : ` ` embed : rst : leading - asterisk ` ` )
- ` ` formula ` `
- ` ` ref ` `
- ` ` emphasis ` ` ( e . g . , using ` em ` _ )
- ` ` computeroutput ` ` ( e . g . , using ` c ` _ )
- ` ` bold ` ` ( e . g . , using ` b ` _ )
. . _ em : http : / / www . doxygen . nl / manual / commands . html # cmdem
. . _ c : http : / / www . doxygen . nl / manual / commands . html # cmdc
. . _ b : http : / / www . doxygen . nl / manual / commands . html # cmdb
The goal of this method is to " explode " input ` ` xml ` ` data into raw reStructuredText
to put at the top of the file pages . Wielding beautiful soup , this essentially
means that you need to expand every non ` ` para ` ` tag into a ` ` para ` ` . So if an
ordered list appears in the xml , then the raw listing must be built up from the
child nodes . After this is finished , though , the : meth : ` bs4 . BeautifulSoup . get _ text `
method will happily remove all remaining ` ` para ` ` tags to produce the final
reStructuredText * * provided that * * the original " exploded " tags ( such as the ordered
list definition and its ` ` listitem ` ` children ) have been * removed * from the soup .
* * Parameters * *
` ` textRoot ` ` ( : class : ` ~ exhale . graph . ExhaleRoot ` )
The text root object that is calling this method . This parameter is
necessary in order to retrieve / convert the doxygen ` ` \\ ref SomeClass ` ` tag
and link it to the appropriate node page . The ` ` textRoot ` ` object is not
modified by executing this method .
` ` currentTag ` ` ( : class : ` bs4 . element . Tag ` )
The current xml tag being processed , either to have its contents directly
modified or unraveled .
` ` level ` ` ( int )
. . warning : :
This variable does * * not * * represent " recursion depth " ( as one would
typically see with a variable like this ) !
The * * block * * level of indentation currently being parsed . Because we are
parsing a tree in order to generate raw reStructuredText code , we need to
maintain a notion of " block level " . This means tracking when there are
nested structures such as a list within a list :
. . code - block : : rst
1 . This is an outer ordered list .
- There is a nested unordered list .
- It is a child of the outer list .
2 . This is another item in the outer list .
The outer ordered ( numbers ` ` 1 ` ` and ` ` 2 ` ` ) list is at indentation level
` ` 0 ` ` , and the inner unordered ( ` ` - ` ` ) list is at indentation level ` ` 1 ` ` .
Meaning that level is used as
. . code - block : : py
indent = " " * level
# . . . later . . .
some _ text = " \\ n { indent } { text } " . format ( indent = indent , text = some _ text )
to indent the ordered / unordered lists accordingly .''' | if not currentTag :
return
if prefix :
currentTag . insert_before ( prefix )
if postfix :
currentTag . insert_after ( postfix )
children = currentTag . findChildren ( recursive = False )
indent = " " * level
if currentTag . name == "orderedlist" :
idx = 1
for child in children :
walk ( textRoot , child , level + 1 , "\n{0}{1}. " . format ( indent , idx ) , None , True )
idx += 1
child . unwrap ( )
currentTag . unwrap ( )
elif currentTag . name == "itemizedlist" :
for child in children :
walk ( textRoot , child , level + 1 , "\n{0}- " . format ( indent ) , None , True )
child . unwrap ( )
currentTag . unwrap ( )
elif currentTag . name == "verbatim" : # TODO : find relevant section in breathe . sphinxrenderer and include the versions
# for both leading / / / as well as just plain embed : rst .
leading_asterisk = "embed:rst:leading-asterisk\n*"
if currentTag . string . startswith ( leading_asterisk ) :
cont = currentTag . string . replace ( leading_asterisk , "" )
cont = textwrap . dedent ( cont . replace ( "\n*" , "\n" ) )
currentTag . string = cont
elif currentTag . name == "formula" :
currentTag . string = ":math:`{0}`" . format ( currentTag . string [ 1 : - 1 ] )
elif currentTag . name == "ref" :
signal = None
if "refid" not in currentTag . attrs :
signal = "No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}" . format ( currentTag . attrs )
else :
refid = currentTag . attrs [ "refid" ]
if refid not in textRoot . node_by_refid :
signal = "Found unknown 'refid' of [{0}] in file level documentation." . format ( refid )
else :
currentTag . string = ":ref:`{0}`" . format ( textRoot . node_by_refid [ refid ] . link_name )
if signal : # < < verboseBuild
utils . verbose_log ( signal , utils . AnsiColors . BOLD_YELLOW )
elif currentTag . name == "emphasis" :
currentTag . string = "*{0}*" . format ( currentTag . string )
elif currentTag . name == "computeroutput" :
currentTag . string = "``{0}``" . format ( currentTag . string )
elif currentTag . name == "bold" :
currentTag . string = "**{0}**" . format ( currentTag . string )
else :
ctr = 0
for child in children :
c_prefix = None
c_postfix = None
if ctr > 0 and child . name == "para" :
c_prefix = "\n{0}" . format ( indent )
walk ( textRoot , child , level , c_prefix , c_postfix )
ctr += 1 |
def start_tree ( self , tree , name ) :
"""Skip this fixer if " _ _ future _ _ . division " is already imported .""" | super ( FixDivisionSafe , self ) . start_tree ( tree , name )
self . skip = "division" in tree . future_features |
def _slice_bam ( in_bam , region , tmp_dir , config ) :
"""Use sambamba to slice a bam region""" | name_file = os . path . splitext ( os . path . basename ( in_bam ) ) [ 0 ]
out_file = os . path . join ( tmp_dir , os . path . join ( tmp_dir , name_file + _to_str ( region ) + ".bam" ) )
sambamba = config_utils . get_program ( "sambamba" , config )
region = _to_sambamba ( region )
with file_transaction ( out_file ) as tx_out_file :
cmd = ( "{sambamba} slice {in_bam} {region} -o {tx_out_file}" )
do . run ( cmd . format ( ** locals ( ) ) , "Slice region" , { } )
return out_file |
def log_update ( self , service , to_update , status , count ) :
"""lets log everything at the end
: param service : service object
: param to _ update : boolean to check if we have to update
: param status : is everything worked fine ?
: param count : number of data to update
: type service : service object
: type to _ update : boolean
: type status : boolean
: type count : interger""" | if to_update :
if status :
msg = "{} - {} new data" . format ( service , count )
update_result ( service . id , msg = "OK" , status = status )
logger . info ( msg )
else :
msg = "{} AN ERROR OCCURS " . format ( service )
update_result ( service . id , msg = msg , status = status )
logger . warning ( msg )
else :
logger . debug ( "{} nothing new " . format ( service ) ) |
def _weighted_formula ( form , weight_func ) :
"""Yield weight of each formula element .""" | for e , mf in form . items ( ) :
if e == Atom . H :
continue
yield e , mf , weight_func ( e ) |
def _identify_eds_ing ( first , second ) :
"""Find nodes connecting adjacent edges .
Args :
first ( Edge ) : Edge object representing the first edge .
second ( Edge ) : Edge object representing the second edge .
Returns :
tuple [ int , int , set [ int ] ] : The first two values represent left and right node
indicies of the new edge . The third value is the new dependence set .""" | A = set ( [ first . L , first . R ] )
A . update ( first . D )
B = set ( [ second . L , second . R ] )
B . update ( second . D )
depend_set = A & B
left , right = sorted ( list ( A ^ B ) )
return left , right , depend_set |
def get_file_descriptor ( self ) :
"""Returns the file descriptor used for passing to the select call when listening
on the message queue .""" | return self . _subscription . connection and self . _subscription . connection . _sock . fileno ( ) |
def spmatrix ( self , reordered = True , symmetric = False ) :
"""Converts the : py : class : ` cspmatrix ` : math : ` A ` to a sparse matrix . A reordered
matrix is returned if the optional argument ` reordered ` is
` True ` ( default ) , and otherwise the inverse permutation is applied . Only the
default options are allowed if the : py : class : ` cspmatrix ` : math : ` A ` represents
a Cholesky factor .
: param reordered : boolean ( default : True )
: param symmetric : boolean ( default : False )""" | n = self . symb . n
snptr = self . symb . snptr
snode = self . symb . snode
relptr = self . symb . relptr
snrowidx = self . symb . snrowidx
sncolptr = self . symb . sncolptr
blkptr = self . symb . blkptr
blkval = self . blkval
if self . is_factor :
if symmetric :
raise ValueError ( "'symmetric = True' not implemented for Cholesky factors" )
if not reordered :
raise ValueError ( "'reordered = False' not implemented for Cholesky factors" )
snpost = self . symb . snpost
blkval = + blkval
for k in snpost :
j = snode [ snptr [ k ] ]
# representative vertex
nn = snptr [ k + 1 ] - snptr [ k ]
# | Nk |
na = relptr [ k + 1 ] - relptr [ k ]
# | Ak |
if na == 0 :
continue
nj = na + nn
if nn == 1 :
blas . scal ( blkval [ blkptr [ k ] ] , blkval , offset = blkptr [ k ] + 1 , n = na )
else :
blas . trmm ( blkval , blkval , transA = "N" , diag = "N" , side = "R" , uplo = "L" , m = na , n = nn , ldA = nj , ldB = nj , offsetA = blkptr [ k ] , offsetB = blkptr [ k ] + nn )
cc = matrix ( 0 , ( n , 1 ) )
# count number of nonzeros in each col
for k in range ( self . symb . Nsn ) :
nn = snptr [ k + 1 ] - snptr [ k ]
na = relptr [ k + 1 ] - relptr [ k ]
nj = nn + na
for i in range ( nn ) :
j = snode [ snptr [ k ] + i ]
cc [ j ] = nj - i
# build col . ptr
cp = [ 0 ]
for i in range ( n ) :
cp . append ( cp [ - 1 ] + cc [ i ] )
cp = matrix ( cp )
# copy data and row indices
val = matrix ( 0.0 , ( cp [ - 1 ] , 1 ) )
ri = matrix ( 0 , ( cp [ - 1 ] , 1 ) )
for k in range ( self . symb . Nsn ) :
nn = snptr [ k + 1 ] - snptr [ k ]
na = relptr [ k + 1 ] - relptr [ k ]
nj = nn + na
for i in range ( nn ) :
j = snode [ snptr [ k ] + i ]
blas . copy ( blkval , val , offsetx = blkptr [ k ] + nj * i + i , offsety = cp [ j ] , n = nj - i )
ri [ cp [ j ] : cp [ j + 1 ] ] = snrowidx [ sncolptr [ k ] + i : sncolptr [ k + 1 ] ]
I = [ ] ;
J = [ ]
for i in range ( n ) :
I += list ( ri [ cp [ i ] : cp [ i + 1 ] ] )
J += ( cp [ i + 1 ] - cp [ i ] ) * [ i ]
tmp = spmatrix ( val , I , J , ( n , n ) )
# tmp is reordered and lower tril .
if reordered or self . symb . p is None : # reordered matrix ( do not apply inverse permutation )
if not symmetric :
return tmp
else :
return symmetrize ( tmp )
else : # apply inverse permutation
tmp = perm ( symmetrize ( tmp ) , self . symb . ip )
if symmetric :
return tmp
else :
return tril ( tmp ) |
def _write ( self , f ) :
"""Serialize an NDEF record to a file - like object .""" | log . debug ( "writing ndef record at offset {0}" . format ( f . tell ( ) ) )
record_type = self . type
record_name = self . name
record_data = self . data
if record_type == '' :
header_flags = 0 ;
record_name = '' ;
record_data = ''
elif record_type . startswith ( "urn:nfc:wkt:" ) :
header_flags = 1 ;
record_type = record_type [ 12 : ]
elif re . match ( r'[a-zA-Z0-9-]+/[a-zA-Z0-9-+.]+' , record_type ) :
header_flags = 2 ;
record_type = record_type
elif re . match ( r'[a-zA-Z][a-zA-Z0-9+-.]*://' , record_type ) :
header_flags = 3 ;
record_type = record_type
elif record_type . startswith ( "urn:nfc:ext:" ) :
header_flags = 4 ;
record_type = record_type [ 12 : ]
elif record_type == 'unknown' :
header_flags = 5 ;
record_type = ''
elif record_type == 'unchanged' :
header_flags = 6 ;
record_type = ''
type_length = len ( record_type )
data_length = len ( record_data )
name_length = len ( record_name )
if self . _message_begin :
header_flags |= 0x80
if self . _message_end :
header_flags |= 0x40
if data_length < 256 :
header_flags |= 0x10
if name_length > 0 :
header_flags |= 0x08
if data_length < 256 :
f . write ( struct . pack ( ">BBB" , header_flags , type_length , data_length ) )
else :
f . write ( struct . pack ( ">BBL" , header_flags , type_length , data_length ) )
if name_length > 0 :
f . write ( struct . pack ( ">B" , name_length ) )
f . write ( record_type )
f . write ( record_name )
f . write ( record_data ) |
def fetch ( self , minutes = values . unset , start_date = values . unset , end_date = values . unset , task_channel = values . unset ) :
"""Fetch a WorkerStatisticsInstance
: param unicode minutes : Filter cumulative statistics by up to ' x ' minutes in the past .
: param datetime start _ date : Filter cumulative statistics by a start date .
: param datetime end _ date : Filter cumulative statistics by a end date .
: param unicode task _ channel : Filter cumulative statistics by TaskChannel .
: returns : Fetched WorkerStatisticsInstance
: rtype : twilio . rest . taskrouter . v1 . workspace . worker . worker _ statistics . WorkerStatisticsInstance""" | return self . _proxy . fetch ( minutes = minutes , start_date = start_date , end_date = end_date , task_channel = task_channel , ) |
def get_group_for_col ( self , table_name , col_name ) :
"""Check data model to find group name for a given column header
Parameters
table _ name : str
col _ name : str
Returns
group _ name : str""" | df = self . dm [ table_name ]
try :
group_name = df . loc [ col_name , 'group' ]
except KeyError :
return ''
return group_name |
def _read_para_reg_failed ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP REG _ FAILED parameter .
Structure of HIP REG _ FAILED parameter [ RFC 8003 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Lifetime | Reg Type # 1 | Reg Type # 2 | Reg Type # 3 |
| . . . | . . . | Reg Type # n | |
+ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + Padding +
Octets Bits Name Description
0 0 reg _ failed . type Parameter Type
1 15 reg _ failed . critical Critical Bit
2 16 reg _ failed . length Length of Contents
4 32 reg _ failed . lifetime Lifetime
4 32 reg _ failed . lifetime . min Min Lifetime
5 40 reg _ failed . lifetime . max Max Lifetime
6 48 reg _ failed . reg _ typetype Reg Type
? ? - Padding""" | _life = collections . namedtuple ( 'Lifetime' , ( 'min' , 'max' ) )
_mint = self . _read_unpack ( 1 )
_maxt = self . _read_unpack ( 1 )
_type = list ( )
for _ in range ( clen - 2 ) :
_code = self . _read_unpack ( 1 )
_kind = _REG_FAILURE_TYPE . get ( _code )
if _kind is None :
if 0 <= _code <= 200 :
_kind = 'Unassigned (IETF Review)'
elif 201 <= _code <= 255 :
_kind = 'Unassigned (Reserved for Private Use)'
else :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_type . append ( _kind )
reg_failed = dict ( type = desc , critical = cbit , length = clen , lifetime = _life ( _mint , _maxt ) , reg_type = tuple ( _type ) , )
_plen = length - clen
if _plen :
self . _read_fileng ( _plen )
return reg_failed |
def get_pickup_time_estimates ( self , start_latitude , start_longitude , product_id = None , ) :
"""Get pickup time estimates for products at a given location .
Parameters
start _ latitude ( float )
The latitude component of a start location .
start _ longitude ( float )
The longitude component of a start location .
product _ id ( str )
The unique ID of the product being requested . If none is
provided , it will default to the cheapest product for the
given location .
Returns
( Response )
A Response containing each product ' s pickup time estimates .""" | args = OrderedDict ( [ ( 'start_latitude' , start_latitude ) , ( 'start_longitude' , start_longitude ) , ( 'product_id' , product_id ) , ] )
return self . _api_call ( 'GET' , 'v1.2/estimates/time' , args = args ) |
def dragMoveEvent ( self , event ) :
"""Processes the drag drop event using the filter set by the setDragDropFilter
: param event | < QDragEvent >""" | filt = self . dragDropFilter ( )
if ( filt and filt ( self , event ) ) :
return
super ( XTableWidget , self ) . dragMoveEvent ( event ) |
def convert_epoch_to_timestamp ( cls , timestamp , tsformat ) :
"""Converts the given float representing UNIX - epochs into an actual timestamp .
: param float timestamp : Timestamp as UNIX - epochs .
: param string tsformat : Format of the given timestamp . This is used to convert the
timestamp from UNIX epochs . For valid examples take a look into the
: py : func : ` time . strptime ` documentation .
: return : Returns the timestamp as defined in format .
: rtype : string""" | return time . strftime ( tsformat , time . gmtime ( timestamp ) ) |
def set_auth_request ( self , interface_id , address = None ) :
"""Set the authentication request field for the specified
engine .""" | self . interface . set_auth_request ( interface_id , address )
self . _engine . update ( ) |
def get_crumbs ( self ) :
"""Get crumbs for navigation links .
Returns :
tuple :
concatenated list of crumbs using these crumbs and the
crumbs of the parent classes through ` ` _ _ mro _ _ ` ` .""" | crumbs = [ ]
for cls in reversed ( type ( self ) . __mro__ [ 1 : ] ) :
crumbs . extend ( getattr ( cls , 'crumbs' , ( ) ) )
crumbs . extend ( list ( self . crumbs ) )
return tuple ( crumbs ) |
def properties ( self , name = None , pk = None , category = Category . INSTANCE , ** kwargs ) : # type : ( Optional [ str ] , Optional [ str ] , Optional [ str ] , * * Any ) - > List [ Property ]
"""Retrieve properties .
If additional ` keyword = value ` arguments are provided , these are added to the request parameters . Please
refer to the documentation of the KE - chain API for additional query parameters .
: param name : name to limit the search for .
: type name : basestring or None
: param pk : primary key or id ( UUID ) of the property to search for
: type pk : basestring or None
: param category : filter the properties by category . Defaults to INSTANCE . Other options MODEL or None
: type category : basestring or None
: param kwargs : ( optional ) additional search keyword arguments
: type kwargs : dict or None
: return : list of : class : ` models . Property `
: raises NotFoundError : When no ` Property ` is found""" | request_params = { 'name' : name , 'id' : pk , 'category' : category }
if kwargs :
request_params . update ( ** kwargs )
response = self . _request ( 'GET' , self . _build_url ( 'properties' ) , params = request_params )
if response . status_code != requests . codes . ok : # pragma : no cover
raise NotFoundError ( "Could not retrieve properties" )
data = response . json ( )
return [ Property . create ( p , client = self ) for p in data [ 'results' ] ] |
def stop_event_loop ( self , event = None ) :
"""Stop an event loop . This is used to stop a blocking event
loop so that interactive functions , such as ginput and
waitforbuttonpress , can wait for events .
Call signature : :
stop _ event _ loop _ default ( self )""" | if hasattr ( self , '_event_loop' ) :
if self . _event_loop . IsRunning ( ) :
self . _event_loop . Exit ( )
del self . _event_loop |
def on_headers ( self , response , exc = None ) :
'''Websocket upgrade as ` ` on _ headers ` ` event .''' | if response . status_code == 101 :
connection = response . connection
request = response . request
handler = request . websocket_handler
if not handler :
handler = WS ( )
parser = request . client . frame_parser ( kind = 1 )
consumer = partial ( WebSocketClient . create , response , handler , parser )
connection . upgrade ( consumer )
response . event ( 'post_request' ) . fire ( )
websocket = connection . current_consumer ( )
response . request_again = lambda r : websocket |
def get_query ( self , q , request ) :
"""return a query set searching for the query string q
either implement this method yourself or set the search _ field
in the LookupChannel class definition""" | return Group . objects . filter ( Q ( name__icontains = q ) | Q ( description__icontains = q ) ) |
def generate_plates ( seed , world_name , output_dir , width , height , num_plates = 10 ) :
"""Eventually this method should be invoked when generation is called at
asked to stop at step " plates " , it should not be a different operation
: param seed :
: param world _ name :
: param output _ dir :
: param width :
: param height :
: param num _ plates :
: return :""" | elevation , plates = generate_plates_simulation ( seed , width , height , num_plates = num_plates )
world = World ( world_name , Size ( width , height ) , seed , GenerationParameters ( num_plates , - 1.0 , "plates" ) )
world . elevation = ( numpy . array ( elevation ) . reshape ( height , width ) , None )
world . plates = numpy . array ( plates , dtype = numpy . uint16 ) . reshape ( height , width )
# Generate images
filename = '%s/plates_%s.png' % ( output_dir , world_name )
draw_simple_elevation_on_file ( world , filename , None )
print ( "+ plates image generated in '%s'" % filename )
geo . center_land ( world )
filename = '%s/centered_plates_%s.png' % ( output_dir , world_name )
draw_simple_elevation_on_file ( world , filename , None )
print ( "+ centered plates image generated in '%s'" % filename ) |
def evaluate ( self , input_data , targets , return_cache = False , prediction = True ) :
"""Evaluate the loss function without computing gradients .
* * Parameters : * *
input _ data : GPUArray
Data to evaluate
targets : GPUArray
Targets
return _ cache : bool , optional
Whether to return intermediary variables from the
computation and the hidden activations .
prediction : bool , optional
Whether to use prediction model . Only relevant when using
dropout . If true , then weights are multiplied by
1 - dropout if the layer uses dropout .
* * Returns : * *
loss : float
The value of the loss function .
hidden _ cache : list , only returned if ` ` return _ cache = = True ` `
Cache as returned by : meth : ` hebel . models . NeuralNet . feed _ forward ` .
activations : list , only returned if ` ` return _ cache = = True ` `
Hidden activations as returned by
: meth : ` hebel . models . NeuralNet . feed _ forward ` .""" | # Forward pass
activations , hidden_cache = self . feed_forward ( input_data , return_cache = True , prediction = prediction )
loss = self . top_layer . train_error ( None , targets , average = False , cache = activations , prediction = prediction )
for hl in self . hidden_layers :
if hl . l1_penalty_weight :
loss += hl . l1_penalty
if hl . l2_penalty_weight :
loss += hl . l2_penalty
if self . top_layer . l1_penalty_weight :
loss += self . top_layer . l1_penalty
if self . top_layer . l2_penalty_weight :
loss += self . top_layer . l2_penalty
if not return_cache :
return loss
else :
return loss , hidden_cache , activations |
def augment_usage_errors ( ctx , param = None ) :
"""Context manager that attaches extra information to exceptions that
fly .""" | try :
yield
except BadParameter as e :
if e . ctx is None :
e . ctx = ctx
if param is not None and e . param is None :
e . param = param
raise
except UsageError as e :
if e . ctx is None :
e . ctx = ctx
raise |
def windows_df ( self ) :
"""Get Windows ( W ) W - row , W - col and W - index of windows e . g . loaded with : meth : ` block _ windows ` as a dataframe .
Returns :
[ dataframe ] - - A dataframe with the window information and indices ( row , col , index ) .""" | import pandas as pd
if self . windows is None :
raise Exception ( "You need to call the block_windows or windows before." )
df_wins = [ ]
for row , col , win in zip ( self . windows_row , self . windows_col , self . windows ) :
df_wins . append ( pd . DataFrame ( { "row" : [ row ] , "col" : [ col ] , "Window" : [ win ] } ) )
df_wins = pd . concat ( df_wins ) . set_index ( [ "row" , "col" ] )
df_wins [ "window_index" ] = range ( df_wins . shape [ 0 ] )
df_wins = df_wins . sort_index ( )
return df_wins |
def map_to_array ( pairs ) :
"""MAP THE ( tuid , line ) PAIRS TO A SINGLE ARRAY OF TUIDS
: param pairs :
: return :""" | if pairs :
pairs = [ TuidMap ( * p ) for p in pairs ]
max_line = max ( p . line for p in pairs )
tuids = [ None ] * max_line
for p in pairs :
if p . line : # line = = 0 IS A PLACEHOLDER FOR FILES THAT DO NOT EXIST
tuids [ p . line - 1 ] = p . tuid
return tuids
else :
return None |
def _build_model ( self ) :
"""Build the model .""" | # Set up LSTM modules
self . lstms = nn . ModuleList ( [ RNN ( num_classes = 0 , num_tokens = self . word_dict . s , emb_size = self . settings [ "emb_dim" ] , lstm_hidden = self . settings [ "hidden_dim" ] , attention = self . settings [ "attention" ] , dropout = self . settings [ "dropout" ] , bidirectional = self . settings [ "bidirectional" ] , use_cuda = self . settings [ "host_device" ] in self . _gpu , ) ] * self . settings [ "relation_arity" ] )
if "input_dim" not in self . settings :
raise ValueError ( "Model parameter input_dim cannot be None." )
# Set up final linear layer
self . sparse_linear = SparseLinear ( self . settings [ "input_dim" ] , self . cardinality , self . settings [ "bias" ] ) |
def morpho ( self , m ) :
"""Renvoie la chaîne de rang m dans la liste des morphologies donnée par le fichier data / morphos . la
: param m : Indice de morphologie
: type m : int
: return : Chaîne de rang m dans la liste des morphologies donnée par le fichier data / morphos . la
: rtype : str""" | l = "fr"
# TODO : Si ajout langue de traduction , il faudra convertir ce qui suit
# if self . _ morphos . keys ( ) . contains ( _ cible . mid ( 0,2 ) ) ) l = _ cible . mid ( 0,2:
# elif ( _ cible . size ( ) > 4 ) and ( _ morphos . keys ( ) . contains ( _ cible . mid ( 3,2 ) ) ) :
# l = _ cible . mid ( 3,2)
if m < 0 or m > len ( self . _morphos [ l ] ) :
raise KeyError ( "Morphology %s requested but not found" % m )
if m == len ( self . _morphos [ l ] ) :
return "-"
return self . _morphos [ l ] [ m ] |
def Emulation_setVisibleSize ( self , width , height ) :
"""Function path : Emulation . setVisibleSize
Domain : Emulation
Method name : setVisibleSize
WARNING : This function is marked ' Experimental ' !
Parameters :
Required arguments :
' width ' ( type : integer ) - > Frame width ( DIP ) .
' height ' ( type : integer ) - > Frame height ( DIP ) .
No return value .
Description : Resizes the frame / viewport of the page . Note that this does not affect the frame ' s container ( e . g . browser window ) . Can be used to produce screenshots of the specified size . Not supported on Android .""" | assert isinstance ( width , ( int , ) ) , "Argument 'width' must be of type '['int']'. Received type: '%s'" % type ( width )
assert isinstance ( height , ( int , ) ) , "Argument 'height' must be of type '['int']'. Received type: '%s'" % type ( height )
subdom_funcs = self . synchronous_command ( 'Emulation.setVisibleSize' , width = width , height = height )
return subdom_funcs |
def com_google_fonts_check_metadata_match_weight_postscript ( font_metadata ) :
"""METADATA . pb weight matches postScriptName .""" | WEIGHTS = { "Thin" : 100 , "ThinItalic" : 100 , "ExtraLight" : 200 , "ExtraLightItalic" : 200 , "Light" : 300 , "LightItalic" : 300 , "Regular" : 400 , "Italic" : 400 , "Medium" : 500 , "MediumItalic" : 500 , "SemiBold" : 600 , "SemiBoldItalic" : 600 , "Bold" : 700 , "BoldItalic" : 700 , "ExtraBold" : 800 , "ExtraBoldItalic" : 800 , "Black" : 900 , "BlackItalic" : 900 }
pair = [ ]
for k , weight in WEIGHTS . items ( ) :
if weight == font_metadata . weight :
pair . append ( ( k , weight ) )
if not pair :
yield FAIL , ( "METADATA.pb: Font weight value ({})" " is invalid." ) . format ( font_metadata . weight )
elif not ( font_metadata . post_script_name . endswith ( '-' + pair [ 0 ] [ 0 ] ) or font_metadata . post_script_name . endswith ( '-' + pair [ 1 ] [ 0 ] ) ) :
yield FAIL , ( "METADATA.pb: Mismatch between postScriptName (\"{}\")" " and weight value ({}). The name must be" " ended with \"{}\" or \"{}\"." "" ) . format ( font_metadata . post_script_name , pair [ 0 ] [ 1 ] , pair [ 0 ] [ 0 ] , pair [ 1 ] [ 0 ] )
else :
yield PASS , "Weight value matches postScriptName." |
def dipole ( self ) :
"""Calculates the dipole of the Slab in the direction of the surface
normal . Note that the Slab must be oxidation state - decorated for this
to work properly . Otherwise , the Slab will always have a dipole of 0.""" | dipole = np . zeros ( 3 )
mid_pt = np . sum ( self . cart_coords , axis = 0 ) / len ( self )
normal = self . normal
for site in self :
charge = sum ( [ getattr ( sp , "oxi_state" , 0 ) * amt for sp , amt in site . species . items ( ) ] )
dipole += charge * np . dot ( site . coords - mid_pt , normal ) * normal
return dipole |
def findTargetNS ( self , node ) :
"""Return the defined target namespace uri for the given node .""" | attrget = self . getAttr
attrkey = ( self . NS_XMLNS , 'xmlns' )
DOCUMENT_NODE = node . DOCUMENT_NODE
ELEMENT_NODE = node . ELEMENT_NODE
while 1 :
if node . nodeType != ELEMENT_NODE :
node = node . parentNode
continue
result = attrget ( node , 'targetNamespace' , default = None )
if result is not None :
return result
node = node . parentNode
if node . nodeType == DOCUMENT_NODE :
raise DOMException ( 'Cannot determine target namespace.' ) |
def to_dict ( self ) :
"""Converts this embed object into a dict .""" | # add in the raw data into the dict
result = { key [ 1 : ] : getattr ( self , key ) for key in self . __slots__ if key [ 0 ] == '_' and hasattr ( self , key ) }
# deal with basic convenience wrappers
try :
colour = result . pop ( 'colour' )
except KeyError :
pass
else :
if colour :
result [ 'color' ] = colour . value
try :
timestamp = result . pop ( 'timestamp' )
except KeyError :
pass
else :
if timestamp :
result [ 'timestamp' ] = timestamp . isoformat ( )
# add in the non raw attribute ones
if self . type :
result [ 'type' ] = self . type
if self . description :
result [ 'description' ] = self . description
if self . url :
result [ 'url' ] = self . url
if self . title :
result [ 'title' ] = self . title
return result |
def _remove_layer_and_reconnect ( self , layer ) :
"""Remove the layer , and reconnect each of its predecessor to each of
its successor""" | successors = self . get_successors ( layer )
predecessors = self . get_predecessors ( layer )
# remove layer ' s edges
for succ in successors :
self . _remove_edge ( layer , succ )
for pred in predecessors :
self . _remove_edge ( pred , layer )
# connect predecessors and successors
for pred in predecessors :
for succ in successors :
self . _add_edge ( pred , succ )
# remove layer in the data structures
self . layer_list . remove ( layer )
self . keras_layer_map . pop ( layer )
# re - assign input and output layers if layer happens to be an
# input / output layer
if layer in self . input_layers :
idx = self . input_layers . index ( layer )
self . input_layers . pop ( idx )
for pred in predecessors :
self . input_layers . insert ( idx , pred )
idx += 1
if layer in self . output_layers :
idx = self . output_layers . index ( layer )
self . output_layers . pop ( idx )
for succ in successors :
self . output_layers . insert ( idx , succ )
idx += 1 |
def gen_ref_docs ( gen_index = False ) : # type : ( int , bool ) - > None
"""Generate reference documentation for the project .
This will use * * sphinx - refdoc * * to generate the source . rst files for the
reference documentation .
Args :
gen _ index ( bool ) :
Set it to * * True * * if you want to generate the index file with the
list of top - level packages . This is set to default as in most cases
you only have one package per project so you can link directly to
that package reference ( and if index were generated sphinx would
complain about file not included in toctree ) .""" | try :
from refdoc import generate_docs
except ImportError as ex :
msg = ( "You need to install sphinx-refdoc if you want to generate " "code reference docs." )
print ( msg , file = sys . stderr )
log . err ( "Exception: {}" . format ( ex ) )
sys . exit ( - 1 )
pretend = context . get ( 'pretend' , False )
docs_dir = conf . get_path ( 'docs.path' , 'docs' )
docs_ref_dir = os . path . join ( docs_dir , 'ref' )
refdoc_paths = conf . get ( 'docs.reference' , [ ] )
if os . path . exists ( docs_ref_dir ) :
if not pretend :
log . info ( 'Removing existing reference docs' )
shutil . rmtree ( docs_ref_dir )
else :
log . info ( 'Would remove old reference docs' )
args = { 'out_dir' : docs_ref_dir , 'verbose' : context . get ( 'verbose' , 0 ) , }
if gen_index :
args [ 'gen_index' ] = True
pkg_paths = [ conf . proj_path ( p ) for p in refdoc_paths ]
if not pretend :
log . info ( 'Generating reference documentation' )
generate_docs ( pkg_paths , ** args )
else :
log . info ( "Would generate reference docs with the following params" )
shell . cprint ( '<90>{}' , util . yaml_dump ( args ) . rstrip ( ) )
shell . cprint ( '<90>paths:\n<34>{}' , util . yaml_dump ( pkg_paths ) . rstrip ( ) ) |
def get_uses_implied_permission_list ( self ) :
"""Return all permissions implied by the target SDK or other permissions .
: rtype : list of string""" | target_sdk_version = self . get_effective_target_sdk_version ( )
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = [ ]
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4 :
if WRITE_EXTERNAL_STORAGE not in self . permissions :
implied . append ( [ WRITE_EXTERNAL_STORAGE , None ] )
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self . permissions :
implied . append ( [ READ_PHONE_STATE , None ] )
if ( WRITE_EXTERNAL_STORAGE in self . permissions or implied_WRITE_EXTERNAL_STORAGE ) and READ_EXTERNAL_STORAGE not in self . permissions :
maxSdkVersion = None
for name , version in self . uses_permissions :
if name == WRITE_EXTERNAL_STORAGE :
maxSdkVersion = version
break
implied . append ( [ READ_EXTERNAL_STORAGE , maxSdkVersion ] )
if target_sdk_version < 16 :
if READ_CONTACTS in self . permissions and READ_CALL_LOG not in self . permissions :
implied . append ( [ READ_CALL_LOG , None ] )
if WRITE_CONTACTS in self . permissions and WRITE_CALL_LOG not in self . permissions :
implied . append ( [ WRITE_CALL_LOG , None ] )
return implied |
def kill_current_session ( ctx : Context_T ) -> None :
"""Force kill current session of the given context ,
despite whether it is running or not .
: param ctx : message context""" | ctx_id = context_id ( ctx )
if ctx_id in _sessions :
del _sessions [ ctx_id ] |
def write_grid_tpl ( name , tpl_file , suffix , zn_array = None , shape = None , spatial_reference = None , longnames = False ) :
"""write a grid - based template file
Parameters
name : str
the base parameter name
tpl _ file : str
the template file to write - include path
zn _ array : numpy . ndarray
an array used to skip inactive cells
Returns
df : pandas . DataFrame
a dataframe with parameter information""" | if shape is None and zn_array is None :
raise Exception ( "must pass either zn_array or shape" )
elif shape is None :
shape = zn_array . shape
parnme , x , y = [ ] , [ ] , [ ]
with open ( tpl_file , 'w' ) as f :
f . write ( "ptf ~\n" )
for i in range ( shape [ 0 ] ) :
for j in range ( shape [ 1 ] ) :
if zn_array is not None and zn_array [ i , j ] < 1 :
pname = ' 1.0 '
else :
if longnames :
pname = "{0}_i:{0}_j:{1}_{2}" . format ( name , i , j , suffix )
if spatial_reference is not None :
pname += "_x:{0:10.2E}_y:{1:10.2E}" . format ( sr . xcentergrid [ i , j ] , sr . ycentergrid [ i , j ] )
else :
pname = "{0}{1:03d}{2:03d}" . format ( name , i , j )
if len ( pname ) > 12 :
raise ( "grid pname too long:{0}" . format ( pname ) )
parnme . append ( pname )
pname = ' ~ {0} ~ ' . format ( pname )
if spatial_reference is not None :
x . append ( spatial_reference . xcentergrid [ i , j ] )
y . append ( spatial_reference . ycentergrid [ i , j ] )
f . write ( pname )
f . write ( "\n" )
df = pd . DataFrame ( { "parnme" : parnme } , index = parnme )
if spatial_reference is not None :
df . loc [ : , 'x' ] = x
df . loc [ : , 'y' ] = y
df . loc [ : , "pargp" ] = "{0}{1}" . format ( suffix . replace ( '_' , '' ) , name )
df . loc [ : , "tpl" ] = tpl_file
return df |
def daemonize ( pid_file = None , cwd = None ) :
"""Detach a process from the controlling terminal and run it in the
background as a daemon .
Modified version of :
code . activestate . com / recipes / 278731 - creating - a - daemon - the - python - way /
author = " Chad J . Schroeder "
copyright = " Copyright ( C ) 2005 Chad J . Schroeder " """ | cwd = cwd or '/'
try :
pid = os . fork ( )
except OSError as e :
raise Exception ( "%s [%d]" % ( e . strerror , e . errno ) )
if ( pid == 0 ) : # The first child .
os . setsid ( )
try :
pid = os . fork ( )
# Fork a second child .
except OSError as e :
raise Exception ( "%s [%d]" % ( e . strerror , e . errno ) )
if ( pid == 0 ) : # The second child .
os . chdir ( cwd )
os . umask ( 0 )
else :
os . _exit ( 0 )
# Exit parent ( the first child ) of the second child .
else :
os . _exit ( 0 )
# Exit parent of the first child .
maxfd = resource . getrlimit ( resource . RLIMIT_NOFILE ) [ 1 ]
if ( maxfd == resource . RLIM_INFINITY ) :
maxfd = 1024
# Iterate through and close all file descriptors .
for fd in range ( 0 , maxfd ) :
try :
os . close ( fd )
except OSError : # ERROR , fd wasn ' t open to begin with ( ignored )
pass
os . open ( '/dev/null' , os . O_RDWR )
# standard input ( 0)
# Duplicate standard input to standard output and standard error .
os . dup2 ( 0 , 1 )
# standard output ( 1)
os . dup2 ( 0 , 2 )
# standard error ( 2)
pid_file = pid_file or '%s.pid' % os . getpid ( )
write_file ( pid_file , os . getpid ( ) )
return 0 |
def set_dict_value ( dictionary , keys , value ) :
"""Set a value in a ( nested ) dictionary by defining a list of keys .
. . note : : Side - effects
This function does not make a copy of dictionary , but directly
edits it .
Parameters
dictionary : dict
keys : List [ Any ]
value : object
Returns
dictionary : dict
Examples
> > > d = { ' a ' : { ' b ' : ' c ' , ' d ' : ' e ' } }
> > > expected = { ' a ' : { ' b ' : ' foobar ' , ' d ' : ' e ' } }
> > > set _ dict _ value ( d , [ ' a ' , ' b ' ] , ' foobar ' ) = = expected
True""" | orig = dictionary
for key in keys [ : - 1 ] :
dictionary = dictionary . setdefault ( key , { } )
dictionary [ keys [ - 1 ] ] = value
return orig |
def findGlyph ( self , glyphName ) :
"""Returns a ` ` list ` ` of the group or groups associated with
* * glyphName * * .
* * glyphName * * will be an : ref : ` type - string ` . If no group is found
to contain * * glyphName * * an empty ` ` list ` ` will be returned . : :
> > > font . groups . findGlyph ( " A " )
[ " A _ accented " ]""" | glyphName = normalizers . normalizeGlyphName ( glyphName )
groupNames = self . _findGlyph ( glyphName )
groupNames = [ self . keyNormalizer . __func__ ( groupName ) for groupName in groupNames ]
return groupNames |
def _get_current_migration_state ( self , loader , apps ) :
"""Extract the most recent migrations from the relevant apps .
If no migrations have been performed , return ' zero ' as the most recent migration for the app .
This should only be called from list _ migrations ( ) .""" | # Only care about applied migrations for the passed - in apps .
apps = set ( apps )
relevant_applied = [ migration for migration in loader . applied_migrations if migration [ 0 ] in apps ]
# Sort them by the most recent migration and convert to a dictionary ,
# leaving apps as keys and most recent migration as values .
# NB : this is a dirty trick
most_recents = dict ( sorted ( relevant_applied , key = lambda m : m [ 1 ] ) )
# Fill in the apps with no migrations with ' zero ' .
# NOTE : Unicode Django application names are unsupported .
most_recents = [ [ app , 'zero' if app not in most_recents else str ( most_recents [ app ] ) ] for app in apps ]
return most_recents |
def get_hostfirmware ( self , callb = None ) :
"""Convenience method to request the device firmware info from the device
This method will check whether the value has already been retrieved from the device ,
if so , it will simply return it . If no , it will request the information from the device
and request that callb be executed when a response is received . The default callback
will simply cache the value .
: param callb : Callable to be used when the response is received . If not set ,
self . resp _ set _ label will be used .
: type callb : callable
: returns : The cached value
: rtype : str""" | if self . host_firmware_version is None :
mypartial = partial ( self . resp_set_hostfirmware )
if callb :
mycallb = lambda x , y : ( mypartial ( y ) , callb ( x , y ) )
else :
mycallb = lambda x , y : mypartial ( y )
response = self . req_with_resp ( GetHostFirmware , StateHostFirmware , mycallb )
return ( self . host_firmware_version , self . host_firmware_build_timestamp ) |
def clean ( cls , cpf ) :
u"""Retorna apenas os dígitos do CPF .
> > > CPF . clean ( ' 581.194.436-59 ' )
'58119443659'""" | if isinstance ( cpf , six . string_types ) :
cpf = int ( re . sub ( '[^0-9]' , '' , cpf ) )
return '{0:011d}' . format ( cpf ) |
def generate_version_file ( self , schema_filename , binding_filename ) :
"""Given a DataONE schema , generates a file that contains version information
about the schema .""" | version_filename = binding_filename + '_version.txt'
version_path = os . path . join ( self . binding_dir , version_filename )
schema_path = os . path . join ( self . schema_dir , schema_filename )
try :
tstamp , svnpath , svnrev , version = self . get_version_info_from_svn ( schema_path )
except TypeError :
pass
else :
self . write_version_file ( version_path , tstamp , svnpath , svnrev , version ) |
def get_objects ( self , queryset = None ) :
"""Return an iterator of Django model objects in Elasticsearch order ,
optionally using the given Django queryset . If no queryset is
given , a default queryset ( Model . objects . all ) is used .
: param queryset : Optional queryset to filter in .
: return :""" | if not self :
return
if not queryset :
queryset = self [ 0 ] . django_model . objects . all ( )
pks = [ res . pk for res in self if res . django_model == queryset . model ]
object_map = dict ( ( text_type ( obj . pk ) , obj ) for obj in queryset . filter ( pk__in = pks ) )
result_map = dict ( ( res . pk , res ) for res in self if res . pk in object_map )
for pk in pks :
obj = object_map . get ( pk )
if obj :
obj . _es = result_map . get ( pk )
try :
obj . _score = obj . _es . _meta . score
except AttributeError :
obj . _score = None
yield obj |
def get_timeseries ( self , child_agg_count = 0 , dataframe = False ) :
"""Get time series data for the specified fields and period of analysis
: param child _ agg _ count : the child aggregation count to be used
default = 0
: param dataframe : if dataframe = True , return a pandas . DataFrame object
: returns : dictionary containing " date " , " value " and " unixtime " keys
with lists as values containing data from each bucket in the
aggregation""" | res = self . fetch_aggregation_results ( )
ts = { "date" : [ ] , "value" : [ ] , "unixtime" : [ ] }
if 'buckets' not in res [ 'aggregations' ] [ str ( self . parent_agg_counter - 1 ) ] :
raise RuntimeError ( "Aggregation results have no buckets in time series results." )
for bucket in res [ 'aggregations' ] [ str ( self . parent_agg_counter - 1 ) ] [ 'buckets' ] :
ts [ 'date' ] . append ( parser . parse ( bucket [ 'key_as_string' ] ) . date ( ) )
if str ( child_agg_count ) in bucket : # We have a subaggregation with the value
# If it is percentiles we get the median
if 'values' in bucket [ str ( child_agg_count ) ] :
val = bucket [ str ( child_agg_count ) ] [ 'values' ] [ '50.0' ]
if val == 'NaN' : # ES returns NaN . Convert to None for matplotlib graph
val = None
ts [ 'value' ] . append ( val )
else :
ts [ 'value' ] . append ( bucket [ str ( child_agg_count ) ] [ 'value' ] )
else :
ts [ 'value' ] . append ( bucket [ 'doc_count' ] )
# unixtime comes in ms from ElasticSearch
ts [ 'unixtime' ] . append ( bucket [ 'key' ] / 1000 )
if dataframe :
df = pd . DataFrame . from_records ( ts , index = "date" )
return df . fillna ( 0 )
return ts |
async def connect_to_endpoints ( self , * endpoints : ConnectionConfig ) -> None :
"""Connect to the given endpoints and await until all connections are established .""" | self . _throw_if_already_connected ( * endpoints )
await asyncio . gather ( * ( self . _await_connect_to_endpoint ( endpoint ) for endpoint in endpoints ) , loop = self . event_loop ) |
def download ( ctx ) :
"""Download blobs or files from Azure Storage""" | settings . add_cli_options ( ctx . cli_options , settings . TransferAction . Download )
ctx . initialize ( settings . TransferAction . Download )
specs = settings . create_download_specifications ( ctx . cli_options , ctx . config )
del ctx . cli_options
for spec in specs :
blobxfer . api . Downloader ( ctx . general_options , ctx . credentials , spec ) . start ( ) |
def _propagate_packages ( self ) :
r"""Propogate packages .
Make sure that all the packages included in the previous containers
are part of the full list of packages .""" | super ( ) . _propagate_packages ( )
for item in ( self . preamble ) :
if isinstance ( item , LatexObject ) :
if isinstance ( item , Container ) :
item . _propagate_packages ( )
for p in item . packages :
self . packages . add ( p ) |
def is_local ( self , hadoop_conf = None , hadoop_home = None ) :
"""Is Hadoop configured to run in local mode ?
By default , it is . [ pseudo - ] distributed mode must be
explicitly configured .""" | conf = self . hadoop_params ( hadoop_conf , hadoop_home )
keys = ( 'mapreduce.framework.name' , 'mapreduce.jobtracker.address' , 'mapred.job.tracker' )
for k in keys :
if conf . get ( k , 'local' ) . lower ( ) != 'local' :
return False
return True |
def fastq_info ( path ) :
"""Found some info about how to ignore warnings in code blocks here :
- http : / / stackoverflow . com / questions / 14463277 / how - to - disable - python - warnings""" | numBases = 0
numReads = 0
readLengths = Counter ( )
GCTot = 0
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" )
handle = gzip . open ( path , "rt" )
for record in SeqIO . parse ( handle , "fastq" ) :
numBases += len ( record )
numReads += 1
readLengths [ len ( record ) ] += 1
GCTot += sum ( record . seq . count ( x ) for x in [ 'G' , 'C' , 'g' , 'c' , 'S' , 's' ] )
handle . close ( )
GCPer = ( GCTot / numBases )
avgReadLen = ( sum ( value * count for value , count in readLengths . items ( ) ) / numReads )
return { "numBases" : numBases , "numReads" : numReads , "numGCBases" : GCTot , "portionGC" : GCPer , "avgReadLen" : avgReadLen } |
def stop ( ) :
"""Stop the server , invalidating any viewer URLs .
This allows any previously - referenced data arrays to be garbage collected if there are no other
references to them .""" | global global_server
if global_server is not None :
ioloop = global_server . ioloop
def stop_ioloop ( ) :
ioloop . stop ( )
ioloop . close ( )
global_server . ioloop . add_callback ( stop_ioloop )
global_server = None |
def _packed_data ( self ) :
'''Returns the bit - packed data extracted from the data file . This is not so useful to analyze .
Use the complex _ data method instead .''' | header = self . header ( )
packed_data = np . frombuffer ( self . data , dtype = np . int8 ) . reshape ( ( header [ 'number_of_half_frames' ] , header [ 'half_frame_bytes' ] ) )
# create array of half frames
packed_data = packed_data [ : : - 1 , constants . header_offset : ]
# slice out header and flip half frame order to reverse time ordering
packed_data = packed_data . reshape ( ( header [ 'number_of_half_frames' ] * ( header [ 'half_frame_bytes' ] - constants . header_offset ) ) )
# compact into vector
return packed_data |
def assimilate ( self , path ) :
"""Parses vasp runs . Then insert the result into the db . and return the
task _ id or doc of the insertion .
Returns :
If in simulate _ mode , the entire doc is returned for debugging
purposes . Else , only the task _ id of the inserted doc is returned .""" | try :
d = self . get_task_doc ( path )
if self . mapi_key is not None and d [ "state" ] == "successful" :
self . calculate_stability ( d )
tid = self . _insert_doc ( d )
return tid
except Exception as ex :
import traceback
logger . error ( traceback . format_exc ( ) )
return False |
def _run_program ( self , bin , fastafile , params = None ) :
"""Run XXmotif and predict motifs from a FASTA file .
Parameters
bin : str
Command used to run the tool .
fastafile : str
Name of the FASTA input file .
params : dict , optional
Optional parameters . For some of the tools required parameters
are passed using this dictionary .
Returns
motifs : list of Motif instances
The predicted motifs .
stdout : str
Standard out of the tool .
stderr : str
Standard error of the tool .""" | params = self . _parse_params ( params )
outfile = os . path . join ( self . tmpdir , os . path . basename ( fastafile . replace ( ".fa" , ".pwm" ) ) )
stdout = ""
stderr = ""
cmd = "%s %s %s --localization --batch %s %s" % ( bin , self . tmpdir , fastafile , params [ "background" ] , params [ "strand" ] , )
p = Popen ( cmd , shell = True , stdout = PIPE , stderr = PIPE )
out , err = p . communicate ( )
stdout += out . decode ( )
stderr += err . decode ( )
motifs = [ ]
if os . path . exists ( outfile ) :
motifs = read_motifs ( outfile , fmt = "xxmotif" )
for m in motifs :
m . id = "{0}_{1}" . format ( self . name , m . id )
else :
stdout += "\nMotif file {0} not found!\n" . format ( outfile )
stderr += "\nMotif file {0} not found!\n" . format ( outfile )
return motifs , stdout , stderr |
def uncomment ( comment ) :
"""Converts the comment node received to a non - commented element , in place ,
and will return the new node .
This may fail , primarily due to special characters within the comment that
the xml parser is unable to handle . If it fails , this method will log an
error and return None""" | parent = comment . parentNode
h = html . parser . HTMLParser ( )
data = h . unescape ( comment . data )
try :
node = minidom . parseString ( data ) . firstChild
except xml . parsers . expat . ExpatError : # Could not parse !
log . error ( 'Could not uncomment node due to parsing error!' )
return None
else :
parent . replaceChild ( node , comment )
return node |
def easeOutElastic ( n , amplitude = 1 , period = 0.3 ) :
"""An elastic tween function that overshoots the destination and then " rubber bands " into the destination .
Args :
n ( float ) : The time progress , starting at 0.0 and ending at 1.0.
Returns :
( float ) The line progress , starting at 0.0 and ending at 1.0 . Suitable for passing to getPointOnLine ( ) .""" | _checkRange ( n )
if amplitude < 1 :
amplitude = 1
s = period / 4
else :
s = period / ( 2 * math . pi ) * math . asin ( 1 / amplitude )
return amplitude * 2 ** ( - 10 * n ) * math . sin ( ( n - s ) * ( 2 * math . pi / period ) ) + 1 |
def package_install ( name , ** kwargs ) :
'''Install a " package " on the ssh server''' | cmd = 'pkg_install ' + name
if kwargs . get ( 'version' , False ) :
cmd += ' ' + kwargs [ 'version' ]
# Send the command to execute
out , err = DETAILS [ 'server' ] . sendline ( cmd )
# " scrape " the output and return the right fields as a dict
return parse ( out ) |
def page ( self , course , msg = "" , error = False ) :
"""Get all data and display the page""" | aggregations = OrderedDict ( )
taskids = list ( course . get_tasks ( ) . keys ( ) )
for aggregation in self . user_manager . get_course_aggregations ( course ) :
aggregations [ aggregation [ '_id' ] ] = dict ( list ( aggregation . items ( ) ) + [ ( "tried" , 0 ) , ( "done" , 0 ) , ( "url" , self . submission_url_generator ( aggregation [ '_id' ] ) ) ] )
data = list ( self . database . submissions . aggregate ( [ { "$match" : { "courseid" : course . get_id ( ) , "taskid" : { "$in" : taskids } , "username" : { "$in" : aggregation [ "students" ] } } } , { "$group" : { "_id" : "$taskid" , "tried" : { "$sum" : 1 } , "done" : { "$sum" : { "$cond" : [ { "$eq" : [ "$result" , "success" ] } , 1 , 0 ] } } } } , ] ) )
for c in data :
aggregations [ aggregation [ '_id' ] ] [ "tried" ] += 1 if c [ "tried" ] else 0
aggregations [ aggregation [ '_id' ] ] [ "done" ] += 1 if c [ "done" ] else 0
my_aggregations , other_aggregations = [ ] , [ ]
for aggregation in aggregations . values ( ) :
if self . user_manager . session_username ( ) in aggregation [ "tutors" ] :
my_aggregations . append ( aggregation )
else :
other_aggregations . append ( aggregation )
if "csv" in web . input ( ) :
return make_csv ( data )
return self . template_helper . get_renderer ( ) . course_admin . aggregation_list ( course , [ my_aggregations , other_aggregations ] , msg , error ) |
def classify ( self , url_path ) :
"""Classify an url""" | for dict_api_url in self . user_defined_rules :
api_url = dict_api_url [ 'str' ]
re_api_url = dict_api_url [ 're' ]
if re_api_url . match ( url_path [ 1 : ] ) :
return api_url
return self . RE_SIMPLIFY_URL . sub ( r'(\\d+)/' , url_path ) |
def fit ( self , ** skip_gram_params ) :
"""Creates the embeddings using gensim ' s Word2Vec .
: param skip _ gram _ params : Parameteres for gensim . models . Word2Vec - do not supply ' size ' it is taken from the Node2Vec ' dimensions ' parameter
: type skip _ gram _ params : dict
: return : A gensim word2vec model""" | if 'workers' not in skip_gram_params :
skip_gram_params [ 'workers' ] = self . workers
if 'size' not in skip_gram_params :
skip_gram_params [ 'size' ] = self . dimensions
return gensim . models . Word2Vec ( self . walks , ** skip_gram_params ) |
import re
def discard_lowercase ( input_string ) :
"""This function discards lowercase characters from a given string using regex .
Examples :
discard _ lowercase ( ' KDeoALOklOOHserfLoAJSIskdsf ' ) - > ' KDALOOOHLAJSI '
discard _ lowercase ( ' ProducTnamEstreAmIngMediAplAYer ' ) - > ' PTEAIMAAY '
discard _ lowercase ( ' maNufacTuredbYSheZenTechNolOGIes ' ) - > ' NTYSZTNOGI '
: param input _ string : A string that contains both lower and uppercase characters
: returns : A string in which all lowercase characters have been removed .""" | remove_lower_case = ( lambda text : re . sub ( '[a-z]' , '' , text ) )
return remove_lower_case ( input_string ) |
def _are_nearby_parallel_boxes ( self , b1 , b2 ) :
"Are two boxes nearby , parallel , and similar in width ?" | if not self . _are_aligned_angles ( b1 . angle , b2 . angle ) :
return False
# Otherwise pick the smaller angle and see whether the two boxes are close according to the " up " direction wrt that angle
angle = min ( b1 . angle , b2 . angle )
return abs ( np . dot ( b1 . center - b2 . center , [ - np . sin ( angle ) , np . cos ( angle ) ] ) ) < self . lineskip_tol * ( b1 . height + b2 . height ) and ( b1 . width > 0 ) and ( b2 . width > 0 ) and ( 0.5 < b1 . width / b2 . width < 2.0 ) |
def get_datatype ( object_type , propid , vendor_id = 0 ) :
"""Return the datatype for the property of an object .""" | if _debug :
get_datatype . _debug ( "get_datatype %r %r vendor_id=%r" , object_type , propid , vendor_id )
# get the related class
cls = get_object_class ( object_type , vendor_id )
if not cls :
return None
# get the property
prop = cls . _properties . get ( propid )
if not prop :
return None
# return the datatype
return prop . datatype |
def create_tokenizer ( self , name , config = dict ( ) ) :
"""Create a pipeline component from a factory .
name ( unicode ) : Factory name to look up in ` Language . factories ` .
config ( dict ) : Configuration parameters to initialise component .
RETURNS ( callable ) : Pipeline component .""" | if name not in self . factories :
raise KeyError ( Errors . E002 . format ( name = name ) )
factory = self . factories [ name ]
return factory ( self , ** config ) |
def set_object ( self , object , logmsg = None ) : # @ ReservedAssignment
"""Set the object we point to , possibly dereference our symbolic reference first .
If the reference does not exist , it will be created
: param object : a refspec , a SymbolicReference or an Object instance . SymbolicReferences
will be dereferenced beforehand to obtain the object they point to
: param logmsg : If not None , the message will be used in the reflog entry to be
written . Otherwise the reflog is not altered
: note : plain SymbolicReferences may not actually point to objects by convention
: return : self""" | if isinstance ( object , SymbolicReference ) :
object = object . object
# @ ReservedAssignment
# END resolve references
is_detached = True
try :
is_detached = self . is_detached
except ValueError :
pass
# END handle non - existing ones
if is_detached :
return self . set_reference ( object , logmsg )
# set the commit on our reference
return self . _get_reference ( ) . set_object ( object , logmsg ) |
def define_mask_borders ( image2d , sought_value , nadditional = 0 ) :
"""Generate mask avoiding undesired values at the borders .
Set to True image borders with values equal to ' sought _ value '
Parameters
image2d : numpy array
Initial 2D image .
sought _ value : int , float , bool
Pixel value that indicates missing data in the spectrum .
nadditional : int
Number of additional pixels to be masked at each border .
Returns
mask2d : numpy array
2D mask .
borders : list of tuples
List of tuples ( jmin , jmax ) with the border limits ( in array
coordinates ) found by find _ pix _ borders .""" | # input image size
naxis2 , naxis1 = image2d . shape
# initialize mask
mask2d = np . zeros ( ( naxis2 , naxis1 ) , dtype = bool )
# initialize list to store borders
borders = [ ]
for i in range ( naxis2 ) : # only spectra with values different from ' sought _ value '
jborder_min , jborder_max = find_pix_borders ( image2d [ i , : ] , sought_value = sought_value )
borders . append ( ( jborder_min , jborder_max ) )
if ( jborder_min , jborder_max ) != ( - 1 , naxis1 ) :
if jborder_min != - 1 :
j1 = 0
j2 = jborder_min + nadditional + 1
mask2d [ i , j1 : j2 ] = True
if jborder_max != naxis1 :
j1 = jborder_max - nadditional
j2 = naxis1
mask2d [ i , j1 : j2 ] = True
return mask2d , borders |
def update_layers_esri_imageserver ( service ) :
"""Update layers for an ESRI REST ImageServer .
Sample endpoint : https : / / gis . ngdc . noaa . gov / arcgis / rest / services / bag _ bathymetry / ImageServer / ? f = json""" | try :
esri_service = ArcImageService ( service . url )
# set srs
# both mapserver and imageserver exposes just one srs at the service level
# not sure if other ones are supported , for now we just store this one
obj = json . loads ( esri_service . _contents )
srs_code = obj [ 'spatialReference' ] [ 'wkid' ]
srs , created = SpatialReferenceSystem . objects . get_or_create ( code = srs_code )
service . srs . add ( srs )
service . update_validity ( )
layer , created = Layer . objects . get_or_create ( name = obj [ 'name' ] , service = service , catalog = service . catalog )
if layer . active :
layer . type = 'ESRI:ArcGIS:ImageServer'
links = [ [ layer . type , service . url ] , [ 'OGC:WMTS' , settings . SITE_URL . rstrip ( '/' ) + '/' + layer . get_url_endpoint ( ) ] ]
layer . title = obj [ 'name' ]
layer . abstract = esri_service . serviceDescription
layer . url = service . url
layer . bbox_x0 = str ( obj [ 'extent' ] [ 'xmin' ] )
layer . bbox_y0 = str ( obj [ 'extent' ] [ 'ymin' ] )
layer . bbox_x1 = str ( obj [ 'extent' ] [ 'xmax' ] )
layer . bbox_y1 = str ( obj [ 'extent' ] [ 'ymax' ] )
layer . page_url = layer . get_absolute_url
links . append ( [ 'WWW:LINK' , settings . SITE_URL . rstrip ( '/' ) + layer . page_url ] )
layer . wkt_geometry = bbox2wktpolygon ( [ layer . bbox_x0 , layer . bbox_y0 , layer . bbox_x1 , layer . bbox_y1 ] )
layer . xml = create_metadata_record ( identifier = str ( layer . uuid ) , source = service . url , links = links , format = 'ESRI:ArcGIS:ImageServer' , type = layer . csw_type , relation = service . id_string , title = layer . title , alternative = layer . title , abstract = layer . abstract , wkt_geometry = layer . wkt_geometry )
layer . anytext = gen_anytext ( layer . title , layer . abstract )
layer . save ( )
# dates
add_mined_dates ( layer )
except Exception as err :
message = "update_layers_esri_imageserver: {0}" . format ( err )
check = Check ( content_object = service , success = False , response_time = 0 , message = message )
check . save ( ) |
def get_project ( self , project_id , include_capabilities = None , include_history = None ) :
"""GetProject .
Get project with the specified id or name , optionally including capabilities .
: param str project _ id :
: param bool include _ capabilities : Include capabilities ( such as source control ) in the team project result ( default : false ) .
: param bool include _ history : Search within renamed projects ( that had such name in the past ) .
: rtype : : class : ` < TeamProject > < azure . devops . v5_0 . core . models . TeamProject > `""" | route_values = { }
if project_id is not None :
route_values [ 'projectId' ] = self . _serialize . url ( 'project_id' , project_id , 'str' )
query_parameters = { }
if include_capabilities is not None :
query_parameters [ 'includeCapabilities' ] = self . _serialize . query ( 'include_capabilities' , include_capabilities , 'bool' )
if include_history is not None :
query_parameters [ 'includeHistory' ] = self . _serialize . query ( 'include_history' , include_history , 'bool' )
response = self . _send ( http_method = 'GET' , location_id = '603fe2ac-9723-48b9-88ad-09305aa6c6e1' , version = '5.0' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'TeamProject' , response ) |
def print_file ( self , file_format = 'ctfile' , f = sys . stdout ) :
"""Print representation of : class : ` ~ ctfile . ctfile . CTfile ` .
: param str file _ format : Format to use : ` ` ctfile ` ` or ` ` json ` ` .
: param f : Print to file or stdout .
: type f : File - like
: return : None .
: rtype : : py : obj : ` None ` .""" | print ( self . writestr ( file_format = file_format ) , file = f ) |
def _find_unprocessed ( config ) :
"""Find any finished directories that have not been processed .""" | reported = _read_reported ( config [ "msg_db" ] )
for dname in _get_directories ( config ) :
if os . path . isdir ( dname ) and dname not in reported :
if _is_finished_dumping ( dname ) :
yield dname |
def merge ( self , across = False ) :
"""Merge the range cells into one region in the worksheet .
: param bool across : Optional . Set True to merge cells in each row of the
specified range as separate merged cells .""" | url = self . build_url ( self . _endpoints . get ( 'merge_range' ) )
return bool ( self . session . post ( url , data = { 'across' : across } ) ) |
def bling ( self , target , sender ) :
"will print yo" | if target . startswith ( "#" ) :
self . message ( target , "%s: yo" % sender )
else :
self . message ( sender , "yo" ) |
def check_valid_package ( package , cyg_arch = 'x86_64' , mirrors = None ) :
'''Check if the package is valid on the given mirrors .
Args :
package : The name of the package
cyg _ arch : The cygwin architecture
mirrors : any mirrors to check
Returns ( bool ) : True if Valid , otherwise False
CLI Example :
. . code - block : : bash
salt ' * ' cyg . check _ valid _ package < package name >''' | if mirrors is None :
mirrors = [ { DEFAULT_MIRROR : DEFAULT_MIRROR_KEY } ]
LOG . debug ( 'Checking Valid Mirrors: %s' , mirrors )
for mirror in mirrors :
for mirror_url , key in mirror . items ( ) :
if package in _get_all_packages ( mirror_url , cyg_arch ) :
return True
return False |
def on_change ( self , callable_ ) :
"""Add a change observer to this entity .""" | self . model . add_observer ( callable_ , self . entity_type , 'change' , self . entity_id ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.