signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
async def get_instances ( self , ** kwargs ) -> List [ ApiResource ] : """Returns a list of resource instances . : raises PvApiError when a hub problem occurs ."""
raw_resources = await self . get_resources ( ** kwargs ) _instances = [ self . _resource_factory ( _raw ) for _raw in self . _loop_raw ( raw_resources ) ] return _instances
def obj_deref ( ref ) : """Returns the object identified by ` ref `"""
from indico_livesync . models . queue import EntryType if ref [ 'type' ] == EntryType . category : return Category . get_one ( ref [ 'category_id' ] ) elif ref [ 'type' ] == EntryType . event : return Event . get_one ( ref [ 'event_id' ] ) elif ref [ 'type' ] == EntryType . session : return Session . get_one ( ref [ 'session_id' ] ) elif ref [ 'type' ] == EntryType . contribution : return Contribution . get_one ( ref [ 'contrib_id' ] ) elif ref [ 'type' ] == EntryType . subcontribution : return SubContribution . get_one ( ref [ 'subcontrib_id' ] ) else : raise ValueError ( 'Unexpected object type: {}' . format ( ref [ 'type' ] ) )
def setDraw ( self , p0 = ( 0 , 0 ) , angle = 0 , mode = 'plain' ) : """set element visualization drawing : param p0 : start drawing position , ( x , y ) : param angle : rotation angle [ deg ] of drawing central point , angle is rotating from x - axis to be ' + ' or ' - ' , ' + ' : anticlockwise , ' - ' : clockwise : param mode : artist mode , ' plain ' or ' fancy ' , ' plain ' by default"""
sconf = self . getConfig ( type = 'simu' ) self . _style [ 'w' ] = float ( sconf [ 'l' ] ) # element width _width = self . _style [ 'w' ] _height = self . _style [ 'h' ] _fc = self . _style [ 'fc' ] _ec = self . _style [ 'ec' ] _alpha = self . _style [ 'alpha' ] _kval = float ( sconf [ 'k1' ] ) _lw = self . _style [ 'lw' ] if mode == 'plain' : # _ kval > = 0: # p1 - - - p2 # - - - p0 - - - p3 - - - # _ kval < 0: # - - - p0 - - - p3 - - - # p1 - - - p2 x0 , y0 = p0 if _kval >= 0 : x1 , y1 = x0 , y0 + _height else : x1 , y1 = x0 , y0 - _height x2 , y2 = x0 + _width , y1 x3 , y3 = x2 , y0 vs = [ ( x0 , y0 ) , ( x1 , y1 ) , ( x2 , y2 ) , ( x3 , y3 ) , ( x0 , y0 ) ] cs = [ Path . MOVETO , Path . LINETO , Path . LINETO , Path . LINETO , Path . CLOSEPOLY ] pth = Path ( vs , cs ) ptch = patches . PathPatch ( pth , fc = _fc , ec = _ec , alpha = _alpha , lw = _lw ) self . _patches = [ ] self . _patches . append ( ptch ) self . next_p0 = x3 , y3 self . next_inc_angle = 0 else : # fancy mode # p1 # - - p0 pc p2 ( nextp0 ) - - - # p3 x0 , y0 = p0 x1 , y1 = x0 + _width * 0.5 , y0 + _height * 0.5 x2 , y2 = x1 + _width * 0.5 , y0 x3 , y3 = x1 , y0 - _height * 0.5 pc = x0 + _width * 0.5 , y0 vs0 = [ ( x0 , y0 ) , ( x1 , y1 ) , ( x2 , y2 ) , ( x3 , y3 ) , ( x0 , y0 ) ] vs = MagBlock . rot ( vs0 , theta = angle , pc = p0 ) cs = [ Path . MOVETO , Path . CURVE3 , Path . CURVE3 , Path . CURVE3 , Path . CURVE3 ] pth = Path ( vs , cs ) ptch = patches . PathPatch ( pth , fc = _fc , ec = _ec , alpha = _alpha , lw = _lw ) self . _patches = [ ] self . _patches . append ( ptch ) pout = x0 + _width , y0 # the right most point in x - axis self . next_p0 = tuple ( MagBlock . rot ( pout , theta = angle , pc = p0 ) . tolist ( ) ) self . next_inc_angle = 0 pc = x0 + 0.5 * _width , y0 self . _anote = { 'xypos' : pc , 'textpos' : pc , 'name' : self . name . upper ( ) , 'type' : self . typename }
def get_interface_detail_output_interface_line_protocol_state ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_interface_detail = ET . Element ( "get_interface_detail" ) config = get_interface_detail output = ET . SubElement ( get_interface_detail , "output" ) interface = ET . SubElement ( output , "interface" ) interface_type_key = ET . SubElement ( interface , "interface-type" ) interface_type_key . text = kwargs . pop ( 'interface_type' ) interface_name_key = ET . SubElement ( interface , "interface-name" ) interface_name_key . text = kwargs . pop ( 'interface_name' ) line_protocol_state = ET . SubElement ( interface , "line-protocol-state" ) line_protocol_state . text = kwargs . pop ( 'line_protocol_state' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def filter_table ( table , * column_filters ) : """Apply one or more column slice filters to a ` Table ` Multiple column filters can be given , and will be applied concurrently Parameters table : ` ~ astropy . table . Table ` the table to filter column _ filter : ` str ` , ` tuple ` a column slice filter definition , in one of two formats : - ` str ` - e . g . ` ` ' snr > 10 ` ` - ` tuple ` - ` ` ( < column > , < operator > , < operand > ) ` ` , e . g . ` ` ( ' snr ' , operator . gt , 10 ) ` ` multiple filters can be given and will be applied in order Returns table : ` ~ astropy . table . Table ` a view of the input table with only those rows matching the filters Examples > > > filter ( my _ table , ' snr > 10 ' , ' frequency < 1000 ' ) custom operations can be defined using filter tuple definitions : > > > from gwpy . table . filters import in _ segmentlist > > > filter ( my _ table , ( ' time ' , in _ segmentlist , segs ) )"""
keep = numpy . ones ( len ( table ) , dtype = bool ) for name , op_func , operand in parse_column_filters ( * column_filters ) : col = table [ name ] . view ( numpy . ndarray ) keep &= op_func ( col , operand ) return table [ keep ]
def colRowIsOnSciencePixel ( self , col , row , padding = DEFAULT_PADDING ) : """Is col row on a science pixel ? Ranges taken from Fig 25 or Instrument Handbook ( p50) Padding allows for the fact that distortion means the results from getColRowWithinChannel can be off by a bit . Setting padding > 0 means that objects that are computed to lie a small amount off silicon will return True . To be conservative , set padding to negative"""
if col < 12. - padding or col > 1111 + padding : return False if row < 20 - padding or row > 1043 + padding : return False return True
def create_entry_tag ( sender , instance , created , ** kwargs ) : """Creates EntryTag for Entry corresponding to specified ItemBase instance . : param sender : the sending ItemBase class . : param instance : the ItemBase instance ."""
from . . models import ( Entry , EntryTag ) entry = Entry . objects . get_for_model ( instance . content_object ) [ 0 ] tag = instance . tag if not EntryTag . objects . filter ( tag = tag , entry = entry ) . exists ( ) : EntryTag . objects . create ( tag = tag , entry = entry )
def from_dict ( cls , val ) : """Creates dict2 object from dict object Args : val ( : obj : ` dict ` ) : Value to create from Returns : Equivalent dict2 object ."""
if isinstance ( val , dict2 ) : return val elif isinstance ( val , dict ) : res = cls ( ) for k , v in val . items ( ) : res [ k ] = cls . from_dict ( v ) return res elif isinstance ( val , list ) : res = [ ] for item in val : res . append ( cls . from_dict ( item ) ) return res else : return val
def queue_push ( self , key , value , create = False , ** kwargs ) : """Add an item to the end of a queue . : param key : The document ID of the queue : param value : The item to add to the queue : param create : Whether the queue should be created if it does not exist : param kwargs : Arguments to pass to : meth : ` mutate _ in ` : return : : class : ` OperationResult ` : raise : : cb _ exc : ` NotFoundError ` if the queue does not exist and ` create ` was not specified . example : : # Ensure it ' s removed first cb . remove ( ' a _ queue ' ) cb . queue _ push ( ' a _ queue ' , ' job9999 ' , create = True ) cb . queue _ pop ( ' a _ queue ' ) . value # = > job9999"""
return self . list_prepend ( key , value , ** kwargs )
def RtlGetVersion ( os_version_info_struct ) : """Wraps the lowlevel RtlGetVersion routine . Args : os _ version _ info _ struct : instance of either a RTL _ OSVERSIONINFOW structure or a RTL _ OSVERSIONINFOEXW structure , ctypes . Structure - wrapped , with the dwOSVersionInfoSize field preset to ctypes . sizeof ( self ) . Raises : OSError : if the underlaying routine fails . See : https : / / msdn . microsoft . com / en - us / library / windows / hardware / ff561910 ( v = vs . 85 ) . aspx ."""
rc = ctypes . windll . Ntdll . RtlGetVersion ( ctypes . byref ( os_version_info_struct ) ) if rc != 0 : raise OSError ( "Getting Windows version failed." )
def get_secondary_strain_data ( self , strain_data = None ) : '''Calculate the following and add to data dictionary : 1 ) 2nd invarient of strain 2 ) Dilatation rate 3 ) e1h and e2h 4 ) err : param dict strain _ data : Strain data dictionary ( as described ) - will overwrite current data if input'''
if strain_data : self . data = strain_data if not isinstance ( self . data , dict ) : raise ValueError ( 'Strain data not input or incorrectly formatted' ) # Check to ensure essential attributes are in data dictionary for essential_key in DATA_VARIABLES : if essential_key not in self . data : print ( self . data ) raise ValueError ( 'Essential strain information %s missing!' % essential_key ) self . data_variables = deepcopy ( DATA_VARIABLES ) # Second Invarient self . data [ '2nd_inv' ] = np . sqrt ( ( self . data [ 'exx' ] ** 2. ) + ( self . data [ 'eyy' ] ** 2. ) + 2.0 * ( self . data [ 'exy' ] ** 2. ) ) # Dilatation self . data [ 'dilatation' ] = self . data [ 'exx' ] + self . data [ 'eyy' ] # err self . data [ 'err' ] = - 1. * self . data [ 'dilatation' ] center_normal_rate = ( self . data [ 'exx' ] + self . data [ 'eyy' ] ) / 2. radius_rate = np . sqrt ( ( self . data [ 'exx' ] - center_normal_rate ) ** 2. + ( self . data [ 'exy' ] ** 2. ) ) # e1h and e2h self . data [ 'e1h' ] = center_normal_rate - radius_rate self . data [ 'e2h' ] = center_normal_rate + radius_rate self . data [ 'area' ] = np . zeros ( self . get_number_observations ( ) ) self . data_variables . extend ( [ '2nd_inv' , 'dilatation' , 'err' , 'e1h' , 'e2h' ] )
def exposures_for_layer ( layer_geometry_key ) : """Get hazard categories form layer _ geometry _ key : param layer _ geometry _ key : The geometry key : type layer _ geometry _ key : str : returns : List of hazard : rtype : list"""
result = [ ] for exposure in exposure_all : if layer_geometry_key in exposure . get ( 'allowed_geometries' ) : result . append ( exposure ) return sorted ( result , key = lambda k : k [ 'key' ] )
def convertfields_old ( key_comm , obj , inblock = None ) : """convert the float and interger fields"""
convinidd = ConvInIDD ( ) typefunc = dict ( integer = convinidd . integer , real = convinidd . real ) types = [ ] for comm in key_comm : types . append ( comm . get ( 'type' , [ None ] ) [ 0 ] ) convs = [ typefunc . get ( typ , convinidd . no_type ) for typ in types ] try : inblock = list ( inblock ) except TypeError as e : inblock = [ 'does not start with N' ] * len ( obj ) for i , ( val , conv , avar ) in enumerate ( zip ( obj , convs , inblock ) ) : if i == 0 : # inblock [ 0 ] is the key pass else : val = conv ( val , inblock [ i ] ) obj [ i ] = val return obj
def publishMap ( self , maps_info , fsInfo = None , itInfo = None ) : """Publishes a list of maps . Args : maps _ info ( list ) : A list of JSON configuration maps to publish . Returns : list : A list of results from : py : meth : ` arcrest . manageorg . _ content . UserItem . updateItem ` ."""
if self . securityhandler is None : print ( "Security handler required" ) return itemInfo = None itemId = None map_results = None replaceInfo = None replaceItem = None map_info = None admin = None try : admin = arcrest . manageorg . Administration ( securityHandler = self . _securityHandler ) map_results = [ ] for map_info in maps_info : itemInfo = { } if 'ReplaceInfo' in map_info : replaceInfo = map_info [ 'ReplaceInfo' ] else : replaceInfo = None if replaceInfo != None : for replaceItem in replaceInfo : if replaceItem [ 'ReplaceType' ] == 'Layer' : if fsInfo is not None : for fs in fsInfo : if fs is not None and replaceItem [ 'ReplaceString' ] == fs [ 'ReplaceTag' ] : replaceItem [ 'ReplaceString' ] = fs [ 'FSInfo' ] [ 'url' ] replaceItem [ 'ItemID' ] = fs [ 'FSInfo' ] [ 'itemId' ] replaceItem [ 'ItemFolder' ] = fs [ 'FSInfo' ] [ 'folderId' ] if 'convertCase' in fs [ 'FSInfo' ] : replaceItem [ 'convertCase' ] = fs [ 'FSInfo' ] [ 'convertCase' ] elif 'ItemID' in replaceItem : if 'ItemFolder' in replaceItem == False : itemId = replaceItem [ 'ItemID' ] itemInfo = admin . content . getItem ( itemId = itemId ) if itemInfo . owner : if itemInfo . owner == self . _securityHandler . username and itemInfo . ownerFolder : replaceItem [ 'ItemFolder' ] = itemInfo . ownerFolder else : replaceItem [ 'ItemFolder' ] = None elif replaceItem [ 'ReplaceType' ] == 'Global' : if itInfo is not None : for itm in itInfo : if itm is not None : if replaceItem [ 'ReplaceString' ] == itm [ 'ReplaceTag' ] : if 'ItemInfo' in itm : if 'url' in itm [ 'ItemInfo' ] : replaceItem [ 'ReplaceString' ] = itm [ 'ItemInfo' ] [ 'url' ] if 'ReplaceTag' in map_info : itemInfo = { "ReplaceTag" : map_info [ 'ReplaceTag' ] } else : itemInfo = { "ReplaceTag" : "{WebMap}" } itemInfo [ 'MapInfo' ] = self . _publishMap ( config = map_info , replaceInfo = replaceInfo ) map_results . append ( itemInfo ) print ( "%s webmap created" % itemInfo [ 'MapInfo' ] [ 'Name' ] ) return map_results except common . ArcRestHelperError as e : raise e except Exception as e : line , filename , synerror = trace ( ) raise common . ArcRestHelperError ( { "function" : "publishMap" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : itemInfo = None itemId = None replaceInfo = None replaceItem = None map_info = None admin = None del itemInfo del itemId del replaceInfo del replaceItem del map_info del admin gc . collect ( )
def get_accounts ( self , provider = 'aws' ) : """Get Accounts added to Spinnaker . Args : provider ( str ) : What provider to find accounts for . Returns : list : list of dicts of Spinnaker credentials matching _ provider _ . Raises : AssertionError : Failure getting accounts from Spinnaker ."""
url = '{gate}/credentials' . format ( gate = API_URL ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert response . ok , 'Failed to get accounts: {0}' . format ( response . text ) all_accounts = response . json ( ) self . log . debug ( 'Accounts in Spinnaker:\n%s' , all_accounts ) filtered_accounts = [ ] for account in all_accounts : if account [ 'type' ] == provider : filtered_accounts . append ( account ) if not filtered_accounts : raise ForemastError ( 'No Accounts matching {0}.' . format ( provider ) ) return filtered_accounts
def ls ( path ) : """List files on HDFS . : param path : A string ( potentially with wildcards ) . : rtype : A list of strings representing HDFS paths . : raises : IOError : An error occurred listing the directory ( e . g . , not available ) ."""
rcode , stdout , stderr = _checked_hadoop_fs_command ( 'hadoop fs -ls %s' % path ) found_line = lambda x : re . search ( 'Found [0-9]+ items$' , x ) out = [ x . split ( ' ' ) [ - 1 ] for x in stdout . split ( '\n' ) if x and not found_line ( x ) ] return out
def _get_cert_url ( self ) : """Get the signing certificate URL . Only accept urls that match the domains set in the AWS _ SNS _ BOUNCE _ CERT _ TRUSTED _ DOMAINS setting . Sub - domains are allowed . i . e . if amazonaws . com is in the trusted domains then sns . us - east - 1 . amazonaws . com will match ."""
cert_url = self . _data . get ( 'SigningCertURL' ) if cert_url : if cert_url . startswith ( 'https://' ) : url_obj = urlparse ( cert_url ) for trusted_domain in settings . BOUNCE_CERT_DOMAINS : parts = trusted_domain . split ( '.' ) if url_obj . netloc . split ( '.' ) [ - len ( parts ) : ] == parts : return cert_url logger . warning ( u'Untrusted certificate URL: "%s"' , cert_url ) else : logger . warning ( u'No signing certificate URL: "%s"' , cert_url ) return None
def set_branch_ids ( self ) : """Performs generation and setting of ids of branches for all MV and underlying LV grids . See Also ding0 . core . network . grids . MVGridDing0 . set _ branch _ ids"""
for grid_district in self . mv_grid_districts ( ) : grid_district . mv_grid . set_branch_ids ( ) logger . info ( '=====> Branch IDs set' )
def code128 ( self , data , ** kwargs ) : """Renders given ` ` data ` ` as * * Code 128 * * barcode symbology . : param str codeset : Optional . Keyword argument for the subtype ( code set ) to render . Defaults to : attr : ` escpos . barcode . CODE128 _ A ` . . . warning : : You should draw up your data according to the subtype ( code set ) . The default is * * Code 128 A * * and there is no way ( yet ) to mix code sets in a single barcode rendering ( at least not uniformly ) . Implementations may simply ignore the code set ."""
if not re . match ( r'^[\x20-\x7F]+$' , data ) : raise ValueError ( 'Invalid Code 128 symbology. Code 128 can encode ' 'any ASCII character ranging from 32 (20h) to 127 (7Fh); ' 'got {!r}' . format ( data ) ) codeset = kwargs . pop ( 'codeset' , barcode . CODE128_A ) barcode . validate_barcode_args ( ** kwargs ) return self . _code128_impl ( data , codeset = codeset , ** kwargs )
def json_schema ( self , ** add_keys ) : """Convert our compact schema representation to the standard , but more verbose , JSON Schema standard . Example JSON schema : http : / / json - schema . org / examples . html Core standard : http : / / json - schema . org / latest / json - schema - core . html : param add _ keys : Key , default value pairs to add in , e . g . description = " " """
self . _json_schema_keys = add_keys if self . _json_schema is None : self . _json_schema = self . _build_schema ( self . _schema ) return self . _json_schema
def file_root_name ( name ) : """Returns the root name of a file from a full file path . It will not raise an error if the result is empty , but an warning will be issued ."""
base = os . path . basename ( name ) root = os . path . splitext ( base ) [ 0 ] if not root : warning = 'file_root_name returned an empty root name from \"{0}\"' log . warning ( warning . format ( name ) ) return root
def _replace_with_new_dims ( # type : ignore self : T , variables : 'OrderedDict[Any, Variable]' = None , coord_names : set = None , attrs : 'Optional[OrderedDict]' = __default , indexes : 'Optional[OrderedDict[Any, pd.Index]]' = __default , inplace : bool = False , ) -> T : """Replace variables with recalculated dimensions ."""
dims = dict ( calculate_dimensions ( variables ) ) return self . _replace ( variables , coord_names , dims , attrs , indexes , inplace = inplace )
def _augment ( graph , capacity , flow , source , target ) : """find a shortest augmenting path"""
n = len ( graph ) A = [ 0 ] * n # A [ v ] = min residual cap . on path source - > v augm_path = [ None ] * n # None = node was not visited yet Q = deque ( ) # BFS Q . append ( source ) augm_path [ source ] = source A [ source ] = float ( 'inf' ) while Q : u = Q . popleft ( ) for v in graph [ u ] : cuv = capacity [ u ] [ v ] residual = cuv - flow [ u ] [ v ] if residual > 0 and augm_path [ v ] is None : augm_path [ v ] = u # store predecessor A [ v ] = min ( A [ u ] , residual ) if v == target : break else : Q . append ( v ) return ( augm_path , A [ target ] )
def _request ( self , method , url , params , headers , content , form_content ) : # type : ( str , str , Optional [ Dict [ str , str ] ] , Optional [ Dict [ str , str ] ] , Any , Optional [ Dict [ str , Any ] ] ) - > ClientRequest """Create ClientRequest object . : param str url : URL for the request . : param dict params : URL query parameters . : param dict headers : Headers : param dict form _ content : Form content"""
request = ClientRequest ( method , self . format_url ( url ) ) if params : request . format_parameters ( params ) if headers : request . headers . update ( headers ) # All requests should contain a Accept . # This should be done by Autorest , but wasn ' t in old Autorest # Force it for now , but might deprecate it later . if "Accept" not in request . headers : _LOGGER . debug ( "Accept header absent and forced to application/json" ) request . headers [ 'Accept' ] = 'application/json' if content is not None : request . add_content ( content ) if form_content : request . add_formdata ( form_content ) return request
def p_expression_eq ( self , p ) : 'expression : expression EQ expression'
p [ 0 ] = Eq ( p [ 1 ] , p [ 3 ] , lineno = p . lineno ( 1 ) ) p . set_lineno ( 0 , p . lineno ( 1 ) )
def _metric_names_for_training_job ( self ) : """Helper method to discover the metrics defined for a training job ."""
training_description = self . _sage_client . describe_training_job ( TrainingJobName = self . _training_job_name ) metric_definitions = training_description [ 'AlgorithmSpecification' ] [ 'MetricDefinitions' ] metric_names = [ md [ 'Name' ] for md in metric_definitions ] return metric_names
def download_content ( self , dir_path = '' , filename = None , force_download = False ) : """Download the content for this document to a file : type dir _ path : String : param dir _ path : the path to which to write the data : type filename : String : param filename : filename to write to ( if None , defaults to the document ' s name , as specified by its metadata : type force _ download : Boolean : param force _ download : True to download from the server regardless of the cache ' s contents : rtype : String : returns : the path to the downloaded file : raises : APIError if the API request is not successful"""
if filename is None : filename = self . get_filename ( ) path = os . path . join ( dir_path , filename ) data = self . client . get_document ( self . url ( ) , force_download ) with open ( path , 'wb' ) as f : f . write ( data ) return path
def addLengthMeters ( stream_network ) : """Adds length field in meters to network ( The added field name will be ' LENGTH _ M ' ) . . . note : : This may be needed for generating the kfac file depending on the units of your raster . See : : doc : ` gis _ tools ` . Parameters stream _ network : str Path to stream network file . Here is an example of how to use this : . . code : : python import os from RAPIDpy . gis . taudem import TauDEM output _ directory = ' / path / to / output / files ' TauDEM . addLengthMeters ( os . path . join ( output _ directory , " stream _ reach _ file . shp " ) )"""
network_shapefile = ogr . Open ( stream_network , 1 ) network_layer = network_shapefile . GetLayer ( ) network_layer_defn = network_layer . GetLayerDefn ( ) # make sure projection EPSG : 4326 network_layer_proj = network_layer . GetSpatialRef ( ) geographic_proj = osr . SpatialReference ( ) geographic_proj . ImportFromEPSG ( 4326 ) proj_transform = None if network_layer_proj != geographic_proj : proj_transform = osr . CoordinateTransformation ( network_layer_proj , geographic_proj ) # check for field create_field = True for i in xrange ( network_layer_defn . GetFieldCount ( ) ) : field_name = network_layer_defn . GetFieldDefn ( i ) . GetName ( ) if field_name == 'LENGTH_M' : create_field = False break if create_field : network_layer . CreateField ( ogr . FieldDefn ( 'LENGTH_M' , ogr . OFTReal ) ) geo_manager = Geod ( ellps = "WGS84" ) for network_feature in network_layer : feat_geom = network_feature . GetGeometryRef ( ) # make sure coordinates are geographic if proj_transform : feat_geom . Transform ( proj_transform ) line = shapely_loads ( feat_geom . ExportToWkb ( ) ) lon_list , lat_list = line . xy dist = geo_manager . inv ( lon_list [ : - 1 ] , lat_list [ : - 1 ] , lon_list [ 1 : ] , lat_list [ 1 : ] ) [ 2 ] network_feature . SetField ( 'LENGTH_M' , sum ( dist ) ) network_layer . SetFeature ( network_feature )
def _pluck_provider_state ( raw_provider_state : Dict ) -> ProviderState : """> > > _ pluck _ provider _ state ( { ' name ' : ' there is an egg ' } ) ProviderState ( descriptor = ' there is an egg ' , params = None ) > > > _ pluck _ provider _ state ( { ' name ' : ' there is an egg called ' , ' params ' : { ' name ' : ' humpty ' } } ) ProviderState ( descriptor = ' there is an egg called ' , params = { ' name ' : ' humpty ' } )"""
return ProviderState ( descriptor = raw_provider_state [ 'name' ] , params = raw_provider_state . get ( 'params' ) )
def _create_cluster_connection ( self , node ) : """Create a connection to a Redis server . : param node : The node to connect to : type node : tredis . cluster . ClusterNode"""
LOGGER . debug ( 'Creating a cluster connection to %s:%s' , node . ip , node . port ) conn = _Connection ( node . ip , node . port , 0 , self . _read , self . _on_closed , self . io_loop , cluster_node = True , read_only = 'slave' in node . flags , slots = node . slots ) self . io_loop . add_future ( conn . connect ( ) , self . _on_connected )
def related_to ( self ) : """returns a list of all parameters that are either constrained by or constrain this parameter"""
params = [ ] constraints = self . in_constraints if self . is_constraint is not None : constraints . append ( self . is_constraint ) for constraint in constraints : for var in constraint . _vars : param = var . get_parameter ( ) if param not in params and param . uniqueid != self . uniqueid : params . append ( param ) return params
def getele ( fele , up = None , verbose = False ) : """Reads t . 1 . ele , returns a list of elements . Example : > > > elements , regions = self . getele ( " t . 1 . ele " , MyBar ( " elements : " ) ) > > > elements [ ( 20 , 154 , 122 , 258 ) , ( 86 , 186 , 134 , 238 ) , ( 15 , 309 , 170 , 310 ) , ( 146, 229 , 145 , 285 ) , ( 206 , 207 , 125 , 211 ) , ( 99 , 193 , 39 , 194 ) , ( 185 , 197, 158 , 225 ) , ( 53 , 76 , 74 , 6 ) , ( 19 , 138 , 129 , 313 ) , ( 23 , 60 , 47 , 96 ) , (119 , 321 , 1 , 329 ) , ( 188 , 296 , 122 , 322 ) , ( 30 , 255 , 177 , 256 ) , . . . ] > > > regions {100 : [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18, 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 , 33 , 34 , 35 , 36, 37 , 38 , 39 , 40 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54, 55 , 56 , 57 , 58 , 59 , 60 , 61 , 62 , 63 , 64 , 65 , 66 , 67 , 68 , 69 , 7 , . . . ] ,"""
f = file ( fele ) l = [ int ( x ) for x in f . readline ( ) . split ( ) ] ntetra , nnod , nattrib = l # we have either linear or quadratic tetrahedra : elem = None if nnod in [ 4 , 10 ] : elem = '3_4' linear = ( nnod == 4 ) if nnod in [ 3 , 7 ] : elem = '2_3' linear = ( nnod == 3 ) if elem is None or not linear : raise Exception ( "Only linear triangle and tetrahedra reader is implemented" ) if verbose and up is not None : up . init ( ntetra ) # if nattrib ! = 1: # raise " tetgen didn ' t assign an entity number to each element ( option - A ) " els = [ ] regions = { } for line in f : if line [ 0 ] == "#" : continue l = [ int ( x ) for x in line . split ( ) ] if elem == '2_3' : assert_ ( ( len ( l ) - 1 - nattrib ) == 3 ) els . append ( ( l [ 1 ] , l [ 2 ] , l [ 3 ] ) ) if elem == '3_4' : assert_ ( ( len ( l ) - 1 - nattrib ) == 4 ) els . append ( ( l [ 1 ] , l [ 2 ] , l [ 3 ] , l [ 4 ] ) ) if nattrib == 1 : regionnum = l [ - 1 ] else : regionnum = 1 if regionnum == 0 : print "see %s, element # %d" % ( fele , l [ 0 ] ) raise "there are elements not belonging to any physical entity" if regions . has_key ( regionnum ) : regions [ regionnum ] . append ( l [ 0 ] ) else : regions [ regionnum ] = [ l [ 0 ] ] assert_ ( l [ 0 ] == len ( els ) ) if verbose and up is not None : up . update ( l [ 0 ] ) return elem , els , regions
def image ( self , tag , image , step = None ) : """Saves RGB image summary from onp . ndarray [ H , W ] , [ H , W , 1 ] , or [ H , W , 3 ] . Args : tag : str : label for this data image : ndarray : [ H , W ] , [ H , W , 1 ] , [ H , W , 3 ] save image in greyscale or colors / step : int : training step"""
image = onp . array ( image ) if step is None : step = self . _step else : self . _step = step if len ( onp . shape ( image ) ) == 2 : image = image [ : , : , onp . newaxis ] if onp . shape ( image ) [ - 1 ] == 1 : image = onp . repeat ( image , 3 , axis = - 1 ) image_strio = io . BytesIO ( ) plt . imsave ( image_strio , image , format = 'png' ) image_summary = Summary . Image ( encoded_image_string = image_strio . getvalue ( ) , colorspace = 3 , height = image . shape [ 0 ] , width = image . shape [ 1 ] ) summary = Summary ( value = [ Summary . Value ( tag = tag , image = image_summary ) ] ) self . add_summary ( summary , step )
def moving_statistic ( values , statistic , size , start = 0 , stop = None , step = None , ** kwargs ) : """Calculate a statistic in a moving window over ` values ` . Parameters values : array _ like The data to summarise . statistic : function The statistic to compute within each window . size : int The window size ( number of values ) . start : int , optional The index at which to start . stop : int , optional The index at which to stop . step : int , optional The distance between start positions of windows . If not given , defaults to the window size , i . e . , non - overlapping windows . kwargs Additional keyword arguments are passed through to the ` statistic ` function . Returns out : ndarray , shape ( n _ windows , ) Examples > > > import allel > > > values = [ 2 , 5 , 8 , 16] > > > allel . moving _ statistic ( values , np . sum , size = 2) array ( [ 7 , 24 ] ) > > > allel . moving _ statistic ( values , np . sum , size = 2 , step = 1) array ( [ 7 , 13 , 24 ] )"""
windows = index_windows ( values , size , start , stop , step ) # setup output out = np . array ( [ statistic ( values [ i : j ] , ** kwargs ) for i , j in windows ] ) return out
def WriteEventBody ( self , event ) : """Writes the body of an event object to the output . Args : event ( EventObject ) : event ."""
if not hasattr ( event , 'timestamp' ) : return row = self . _GetSanitizedEventValues ( event ) try : self . _cursor . execute ( self . _INSERT_QUERY , row ) except MySQLdb . Error as exception : logger . warning ( 'Unable to insert into database with error: {0!s}.' . format ( exception ) ) self . _count += 1 # TODO : Experiment if committing the current transaction # every 10000 inserts is the optimal approach . if self . _count % 10000 == 0 : self . _connection . commit ( ) if self . _set_status : self . _set_status ( 'Inserting event: {0:d}' . format ( self . _count ) )
def read ( self , size ) : """Read wrapper . Parameters size : int Number of bytes to read ."""
try : return_val = self . handle . read ( size ) if return_val == '' : print ( ) print ( "Piksi disconnected" ) print ( ) raise IOError return return_val except OSError : print ( ) print ( "Piksi disconnected" ) print ( ) raise IOError
def _create_record_internal ( self , rtype , name , content , identifier = None ) : """Create a new DNS entry in the domain zone if it does not already exist . Args : rtype ( str ) : The DNS type ( e . g . A , TXT , MX , etc ) of the new entry . name ( str ) : The name of the new DNS entry , e . g the domain for which a MX entry shall be valid . content ( str ) : The content of the new DNS entry , e . g . the mail server hostname for a MX entry . [ identifier ] ( str ) : The easyname id of a DNS entry . Use to overwrite an existing entry . Returns : bool : True if the record was created successfully , False otherwise ."""
name = self . _relative_name ( name ) if name is not None else name LOGGER . debug ( 'Creating record with name %s' , name ) if self . _is_duplicate_record ( rtype , name , content ) : return True data = self . _get_post_data_to_create_dns_entry ( rtype , name , content , identifier ) LOGGER . debug ( 'Create DNS data: %s' , data ) create_response = self . session . post ( self . URLS [ 'dns_create_entry' ] . format ( self . domain_id ) , data = data ) self . _invalidate_records_cache ( ) self . _log ( 'Create DNS entry' , create_response ) # Pull a list of records and check for ours was_success = len ( self . _list_records ( rtype , name , content ) ) > 0 if was_success : msg = 'Successfully added record %s' else : msg = 'Failed to add record %s' LOGGER . info ( msg , name ) return was_success
def regon_checksum ( digits ) : """Calculates and returns a control digit for given list of digits basing on REGON standard ."""
weights_for_check_digit = [ 8 , 9 , 2 , 3 , 4 , 5 , 6 , 7 ] check_digit = 0 for i in range ( 0 , 8 ) : check_digit += weights_for_check_digit [ i ] * digits [ i ] check_digit %= 11 if check_digit == 10 : check_digit = 0 return check_digit
def bootstrap ( ** kwargs ) : """Bootstrap an EC2 instance that has been booted into an AMI from http : / / www . daemonology . net / freebsd - on - ec2/ Note : deprecated , current AMI images are basically pre - bootstrapped , they just need to be configured ."""
# the user for the image is ` ec2 - user ` , there is no sudo , but we can su to root w / o password original_host = env . host_string env . host_string = 'ec2-user@%s' % env . instance . uid bootstrap_files = env . instance . config . get ( 'bootstrap-files' , 'bootstrap-files' ) put ( '%s/authorized_keys' % bootstrap_files , '/tmp/authorized_keys' ) put ( join ( bsdploy_path , 'enable_root_login_on_daemonology.sh' ) , '/tmp/' , mode = '0775' ) run ( """su root -c '/tmp/enable_root_login_on_daemonology.sh'""" ) # revert back to root env . host_string = original_host # give sshd a chance to restart sleep ( 2 ) run ( 'rm /tmp/enable_root_login_on_daemonology.sh' ) # allow overwrites from the commandline env . instance . config . update ( kwargs ) bu = BootstrapUtils ( ) bu . ssh_keys = None bu . upload_authorized_keys = False bu . bootstrap_files_yaml = 'daemonology-files.yml' bu . print_bootstrap_files ( ) bu . create_bootstrap_directories ( ) bu . upload_bootstrap_files ( { } ) # we need to install python here , because there is no way to install it via # ansible playbooks bu . install_pkg ( '/' , chroot = False , packages = [ 'python27' ] )
def _build_lv_grid_dict ( network ) : """Creates dict of LV grids LV grid ids are used as keys , LV grid references as values . Parameters network : : class : ` ~ . grid . network . Network ` The eDisGo container object Returns : obj : ` dict ` Format : { : obj : ` int ` : : class : ` ~ . grid . grids . LVGrid ` }"""
lv_grid_dict = { } for lv_grid in network . mv_grid . lv_grids : lv_grid_dict [ lv_grid . id ] = lv_grid return lv_grid_dict
def _get_mpr_table ( self , connection , partition ) : """Returns name of the postgres table who stores mpr data . Args : connection : connection to postgres db who stores mpr data . partition ( orm . Partition ) : Returns : str : Raises : MissingTableError : if partition table not found in the db ."""
# TODO : This is the first candidate for optimization . Add field to partition # with table name and update it while table creation . # Optimized version . # return partition . mpr _ table or raise exception # Not optimized version . # first check either partition has materialized view . logger . debug ( 'Looking for materialized view of the partition.\n partition: {}' . format ( partition . name ) ) foreign_table = partition . vid view_table = '{}_v' . format ( foreign_table ) view_exists = self . _relation_exists ( connection , view_table ) if view_exists : logger . debug ( 'Materialized view of the partition found.\n partition: {}, view: {}' . format ( partition . name , view_table ) ) return view_table # now check for fdw / virtual table logger . debug ( 'Looking for foreign table of the partition.\n partition: {}' . format ( partition . name ) ) foreign_exists = self . _relation_exists ( connection , foreign_table ) if foreign_exists : logger . debug ( 'Foreign table of the partition found.\n partition: {}, foreign table: {}' . format ( partition . name , foreign_table ) ) return foreign_table raise MissingTableError ( 'postgres database does not have table for {} partition.' . format ( partition . vid ) )
def update_ipsecpolicy ( self , ipsecpolicy , body = None ) : """Updates an IPsecPolicy ."""
return self . put ( self . ipsecpolicy_path % ( ipsecpolicy ) , body = body )
def create_pymol_selection_from_PDB_residue_ids ( residue_list ) : '''Elements of residue _ list should be strings extracted from PDB lines from position 21-26 inclusive ( zero - indexing ) i . e . the chain letter concatenated by the 5 - character ( including insertion code ) residue ID .'''
residues_by_chain = { } for residue_id in residue_list : chain_id = residue_id [ 0 ] pruned_residue_id = residue_id [ 1 : ] residues_by_chain [ chain_id ] = residues_by_chain . get ( chain_id , [ ] ) residues_by_chain [ chain_id ] . append ( pruned_residue_id ) str = [ ] for chain_id , residue_list in sorted ( residues_by_chain . iteritems ( ) ) : str . append ( '(chain %s and resi %s)' % ( chain_id , '+' . join ( map ( string . strip , sorted ( residue_list ) ) ) ) ) return ' or ' . join ( str )
def elapsed ( self , label = None , total = True ) : """Get elapsed time since timer start . Parameters label : string , optional ( default None ) Specify the label of the timer for which the elapsed time is required . If it is ` ` None ` ` , the default timer with label specified by the ` ` dfltlbl ` ` parameter of : meth : ` _ _ init _ _ ` is selected . total : bool , optional ( default True ) If ` ` True ` ` return the total elapsed time since the first call of : meth : ` start ` for the selected timer , otherwise return the elapsed time since the most recent call of : meth : ` start ` for which there has not been a corresponding call to : meth : ` stop ` . Returns dlt : float Elapsed time"""
# Get current time t = timer ( ) # Default label is self . dfltlbl if label is None : label = self . dfltlbl # Return 0.0 if default timer selected and it is not initialised if label not in self . t0 : return 0.0 # Raise exception if timer with specified label does not exist if label not in self . t0 : raise KeyError ( 'Unrecognized timer key %s' % label ) # If total flag is True return sum of accumulated time from # previous start / stop calls and current start call , otherwise # return just the time since the current start call te = 0.0 if self . t0 [ label ] is not None : te = t - self . t0 [ label ] if total : te += self . td [ label ] return te
def getProjectionMatrix ( self , eEye , fNearZ , fFarZ ) : """The projection matrix for the specified eye"""
fn = self . function_table . getProjectionMatrix result = fn ( eEye , fNearZ , fFarZ ) return result
def make_unicode ( string ) : """Python 2 and 3 compatibility function that converts a string to Unicode . In case of Unicode , the string is returned unchanged : param string : input string : return : Unicode string"""
if sys . version < '3' and isinstance ( string , str ) : return unicode ( string . decode ( 'utf-8' ) ) return string
def shap_values ( self , X ) : """Estimate the SHAP values for a set of samples . Parameters X : numpy . array or pandas . DataFrame A matrix of samples ( # samples x # features ) on which to explain the model ' s output . Returns For models with a single output this returns a matrix of SHAP values ( # samples x # features ) . Each row sums to the difference between the model output for that sample and the expected value of the model output ( which is stored as expected _ value attribute of the explainer ) ."""
# convert dataframes if str ( type ( X ) ) . endswith ( "pandas.core.series.Series'>" ) : X = X . values elif str ( type ( X ) ) . endswith ( "'pandas.core.frame.DataFrame'>" ) : X = X . values # assert str ( type ( X ) ) . endswith ( " ' numpy . ndarray ' > " ) , " Unknown instance type : " + str ( type ( X ) ) assert len ( X . shape ) == 1 or len ( X . shape ) == 2 , "Instance must have 1 or 2 dimensions!" if self . feature_dependence == "correlation" : phi = np . matmul ( np . matmul ( X [ : , self . valid_inds ] , self . avg_proj . T ) , self . x_transform . T ) - self . mean_transformed phi = np . matmul ( phi , self . avg_proj ) full_phi = np . zeros ( ( ( phi . shape [ 0 ] , self . M ) ) ) full_phi [ : , self . valid_inds ] = phi return full_phi elif self . feature_dependence == "independent" : if len ( self . coef . shape ) == 1 : return np . array ( X - self . mean ) * self . coef else : return [ np . array ( X - self . mean ) * self . coef [ i ] for i in range ( self . coef . shape [ 0 ] ) ]
def _parse_tables ( cls , parsed_content ) : """Parses the information tables found in a world ' s information page . Parameters parsed _ content : : class : ` bs4 . BeautifulSoup ` A : class : ` BeautifulSoup ` object containing all the content . Returns : class : ` OrderedDict ` [ : class : ` str ` , : class : ` list ` [ : class : ` bs4 . Tag ` ] ] A dictionary containing all the table rows , with the table headers as keys ."""
tables = parsed_content . find_all ( 'div' , attrs = { 'class' : 'TableContainer' } ) output = OrderedDict ( ) for table in tables : title = table . find ( "div" , attrs = { 'class' : 'Text' } ) . text title = title . split ( "[" ) [ 0 ] . strip ( ) inner_table = table . find ( "div" , attrs = { 'class' : 'InnerTableContainer' } ) output [ title ] = inner_table . find_all ( "tr" ) return output
def compute_quality_score ( self , quality ) : """Compute the score related to the quality of that dataset ."""
score = 0 UNIT = 2 if 'frequency' in quality : # TODO : should be related to frequency . if quality [ 'update_in' ] < 0 : score += UNIT else : score -= UNIT if 'tags_count' in quality : if quality [ 'tags_count' ] > 3 : score += UNIT if 'description_length' in quality : if quality [ 'description_length' ] > 100 : score += UNIT if 'has_resources' in quality : if quality [ 'has_only_closed_or_no_formats' ] : score -= UNIT else : score += UNIT if quality [ 'has_unavailable_resources' ] : score -= UNIT else : score += UNIT if 'discussions' in quality : if quality [ 'has_untreated_discussions' ] : score -= UNIT else : score += UNIT if score < 0 : return 0 return score
def mouse_up ( self , window , button ) : """Send a mouse release ( aka mouse up ) for a given button at the current mouse location . : param window : The window you want to send the event to or CURRENTWINDOW : param button : The mouse button . Generally , 1 is left , 2 is middle , 3 is right , 4 is wheel up , 5 is wheel down ."""
_libxdo . xdo_mouse_up ( self . _xdo , ctypes . c_ulong ( window ) , ctypes . c_int ( button ) )
def listPhysicsGroups ( self , physics_group_name = "" ) : """Returns all physics groups if physics group names are not passed ."""
if not isinstance ( physics_group_name , basestring ) : dbsExceptionHandler ( 'dbsException-invalid-input' , 'physics group name given is not valid : %s' % physics_group_name ) else : try : physics_group_name = str ( physics_group_name ) except : dbsExceptionHandler ( 'dbsException-invalid-input' , 'physics group name given is not valid : %s' % physics_group_name ) conn = self . dbi . connection ( ) try : result = self . pglist . execute ( conn , physics_group_name ) return result finally : if conn : conn . close ( )
def list_all_zones_by_id ( region = None , key = None , keyid = None , profile = None ) : '''List , by their IDs , all hosted zones in the bound account . region Region to connect to . key Secret key to be used . keyid Access key to be used . profile A dict with region , key and keyid , or a pillar key ( string ) that contains a dict with region , key and keyid . CLI Example : . . code - block : : bash salt myminion boto _ route53 . list _ all _ zones _ by _ id'''
ret = describe_hosted_zones ( region = region , key = key , keyid = keyid , profile = profile ) return [ r [ 'Id' ] . replace ( '/hostedzone/' , '' ) for r in ret ]
def send_spyder_msg ( self , spyder_msg_type , content = None , data = None ) : """Publish custom messages to the Spyder frontend . Parameters spyder _ msg _ type : str The spyder message type content : dict The ( JSONable ) content of the message data : any Any object that is serializable by cloudpickle ( should be most things ) . Will arrive as cloudpickled bytes in ` . buffers [ 0 ] ` ."""
import cloudpickle if content is None : content = { } content [ 'spyder_msg_type' ] = spyder_msg_type msg = self . session . send ( self . iopub_socket , 'spyder_msg' , content = content , buffers = [ cloudpickle . dumps ( data , protocol = PICKLE_PROTOCOL ) ] , parent = self . _parent_header , ) self . log . debug ( msg )
def generator ( self , Xgen , Xexc , Xgov , Vgen ) : """Generator model . Based on Generator . m from MatDyn by Stijn Cole , developed at Katholieke Universiteit Leuven . See U { http : / / www . esat . kuleuven . be / electa / teaching / matdyn / } for more information ."""
generators = self . dyn_generators omegas = 2 * pi * self . freq F = zeros ( Xgen . shape ) typ1 = [ g . _i for g in generators if g . model == CLASSICAL ] typ2 = [ g . _i for g in generators if g . model == FOURTH_ORDER ] # Generator type 1 : classical model omega = Xgen [ typ1 , 1 ] Pm0 = Xgov [ typ1 , 0 ] H = array ( [ g . h for g in generators ] ) [ typ1 ] D = array ( [ g . d for g in generators ] ) [ typ1 ] Pe = Vgen [ typ1 , 2 ] ddelta = omega = omegas domega = pi * self . freq / H * ( - D * ( omega - omegas ) + Pm0 - Pe ) dEq = zeros ( len ( typ1 ) ) F [ typ1 , : ] = c_ [ ddelta , domega , dEq ] # Generator type 2 : 4th order model omega = Xgen [ typ2 , 1 ] Eq_tr = Xgen [ typ2 , 2 ] Ed_tr = Xgen [ typ2 , 3 ] H = array ( [ g . h for g in generators ] ) D = array ( [ g . d for g in generators ] ) xd = array ( [ g . xd for g in generators ] ) xq = array ( [ g . xq for g in generators ] ) xd_tr = array ( [ g . xd_tr for g in generators ] ) xq_tr = array ( [ g . xq_tr for g in generators ] ) Td0_tr = array ( [ g . td for g in generators ] ) Tq0_tr = array ( [ g . tq for g in generators ] ) Id = Vgen [ typ2 , 0 ] Iq = Vgen [ typ2 , 1 ] Pe = Vgen [ typ2 , 2 ] Efd = Xexc [ typ2 , 0 ] Pm = Xgov [ typ2 , 0 ] ddelta = omega - omegas domega = pi * self . freq / H * ( - D * ( omega - omegas ) + Pm - Pe ) dEq = 1 / Td0_tr * ( Efd - Eq_tr + ( xd - xd_tr ) * Id ) dEd = 1 / Tq0_tr * ( - Ed_tr - ( xq - xq_tr ) * Iq ) F [ typ2 , : ] = c_ [ ddelta , domega , dEq , dEd ] # Generator type 3: # Generator type 4: return F
def get_surveys ( self , url = _SURVEYS_URL ) : """Function to get the surveys for the account"""
res = self . client . _get ( url = url , expected_status_code = 200 ) soup = BeautifulSoup ( res . text , _DEFAULT_BEAUTIFULSOUP_PARSER ) surveys_soup = soup . select ( _SURVEYS_SELECTOR ) survey_list = [ ] for survey_soup in surveys_soup : survey_name = _css_select ( survey_soup , _SURVEY_NAME_SELECTOR ) try : url = survey_soup . select ( _SURVEY_URL_SELECTOR ) [ 0 ] [ "href" ] except : raise ValueError ( "Cannot get URL for the survey \ with css selector {}" . format ( _SURVEY_URL_SELECTOR ) ) try : id = int ( url . split ( "survey_id=" ) [ 1 ] . split ( "&" ) [ 0 ] ) except : raise ValueError ( "Cannot extract id from URL {}" . format ( url ) ) survey_location = _css_select ( survey_soup , _SURVEY_LOCATION_SELECTOR ) try : survey_epoch = int ( survey_soup . select ( _SURVEY_DATE_SELECTOR ) [ 0 ] [ "epoch" ] ) survey_date_obj = datetime . fromtimestamp ( survey_epoch ) survey_date = _datetime_object_to_rfc_date_str ( survey_date_obj ) except : raise ValueError ( "Cannot get date for the survey \ with css selector {}" . format ( _SURVEY_DATE_SELECTOR ) ) survey_img_nb_and_size = survey_soup . select ( _SURVEY_IMG_NB_AND_SIZE_SELECTOR ) try : survey_img_nb = survey_img_nb_and_size [ 0 ] . text survey_img_nb = int ( survey_img_nb . split ( " " ) [ 0 ] ) except : raise ValueError ( "Cannot get or convert image number, \ survey_img_nb_and_size = {}" . format ( survey_img_nb_and_size ) ) try : survey_size = survey_img_nb_and_size [ 1 ] . text except : raise ValueError ( "Cannot get survey size, \ survey_img_nb_and_size = {}" . format ( survey_img_nb_and_size ) ) sensor = _css_select ( survey_soup , _SURVEY_SENSOR_SELECTOR ) survey = Survey ( id = id , name = survey_name , url = url , date = survey_date , location = survey_location , image_nb = survey_img_nb , size = survey_size , sensor = sensor ) survey_list . append ( survey ) return survey_list
def catch_exceptions ( orig_func ) : """Catch uncaught exceptions and turn them into http errors"""
@ functools . wraps ( orig_func ) def catch_exceptions_wrapper ( self , * args , ** kwargs ) : try : return orig_func ( self , * args , ** kwargs ) except arvados . errors . ApiError as e : logging . exception ( "Failure" ) return { "msg" : e . _get_reason ( ) , "status_code" : e . resp . status } , int ( e . resp . status ) except subprocess . CalledProcessError as e : return { "msg" : str ( e ) , "status_code" : 500 } , 500 except MissingAuthorization : return { "msg" : "'Authorization' header is missing or empty, expecting Arvados API token" , "status_code" : 401 } , 401 except ValueError as e : return { "msg" : str ( e ) , "status_code" : 400 } , 400 except Exception as e : return { "msg" : str ( e ) , "status_code" : 500 } , 500 return catch_exceptions_wrapper
def _create_tag_table ( self ) : """Creates the table to store blog post tags . : return :"""
with self . _engine . begin ( ) as conn : tag_table_name = self . _table_name ( "tag" ) if not conn . dialect . has_table ( conn , tag_table_name ) : self . _tag_table = sqla . Table ( tag_table_name , self . _metadata , sqla . Column ( "id" , sqla . Integer , primary_key = True ) , sqla . Column ( "text" , sqla . String ( 128 ) , unique = True , index = True ) , info = self . _info ) self . _logger . debug ( "Created table with table name %s" % tag_table_name ) else : self . _tag_table = self . _metadata . tables [ tag_table_name ] self . _logger . debug ( "Reflecting to table with table name %s" % tag_table_name )
def findPatternsInFile ( codes , patternFinder ) : """Find patterns of exceptions in a file . @ param codes : code of the file to check @ param patternFinder : a visitor for pattern checking and save results"""
tree = ast . parse ( codes ) patternFinder . visit ( tree )
def read_pipe ( pipe_out ) : """Read data on a pipe Used to capture stdout data produced by libiperf : param pipe _ out : The os pipe _ out : rtype : unicode string"""
out = b'' while more_data ( pipe_out ) : out += os . read ( pipe_out , 1024 ) return out . decode ( 'utf-8' )
def check_request_parameters ( self , parameters : dict = dict ) : """Check parameters passed to avoid errors and help debug . : param dict response : search request parameters"""
# - - SEMANTIC QUERY - - - - - li_args = parameters . get ( "q" ) . split ( ) logging . debug ( li_args ) # Unicity li_filters = [ i . split ( ":" ) [ 0 ] for i in li_args ] filters_count = Counter ( li_filters ) li_filters_must_be_unique = ( "coordinate-system" , "format" , "owner" , "type" ) for i in filters_count : if i in li_filters_must_be_unique and filters_count . get ( i ) > 1 : raise ValueError ( "This query filter must be unique: {}" " and it occured {} times." . format ( i , filters_count . get ( i ) ) ) # dict dico_query = FILTER_KEYS . copy ( ) for i in li_args : if i . startswith ( "action" ) : dico_query [ "action" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "catalog" ) : dico_query [ "catalog" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "contact" ) and i . split ( ":" ) [ 1 ] == "group" : dico_query [ "contact:group" ] . append ( i . split ( ":" ) [ 1 : ] [ 1 ] ) continue elif i . startswith ( "contact" ) : dico_query [ "contact:isogeo" ] . append ( i . split ( ":" , 1 ) [ 1 ] ) continue elif i . startswith ( "coordinate-system" ) : dico_query [ "coordinate-system" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "data-source" ) : dico_query [ "data-source" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "format" ) : dico_query [ "format" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "has-no" ) : dico_query [ "has-no" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "keyword:isogeo" ) : dico_query [ "keyword:isogeo" ] . append ( i . split ( ":" ) [ 1 : ] [ 1 ] ) continue elif i . startswith ( "keyword:inspire-theme" ) : dico_query [ "keyword:inspire-theme" ] . append ( i . split ( ":" ) [ 1 : ] [ 1 ] ) continue elif i . startswith ( "license:isogeo" ) : dico_query [ "license:isogeo" ] . append ( i . split ( ":" ) [ 1 : ] [ 1 : ] ) continue elif i . startswith ( "license" ) : dico_query [ "license:group" ] . append ( i . split ( ":" , 1 ) [ 1 : ] [ 0 : ] ) continue elif i . startswith ( "owner" ) : dico_query [ "owner" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "provider" ) : dico_query [ "provider" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "share" ) : dico_query [ "share" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue elif i . startswith ( "type" ) : dico_query [ "type" ] . append ( i . split ( ":" ) [ 1 : ] [ 0 ] ) continue else : # logging . debug ( i . split ( " : " ) [ 1 ] , i . split ( " : " ) [ 1 ] . isdigit ( ) ) dico_query [ "text" ] . append ( i ) continue # Values dico_filters = { i . split ( ":" ) [ 0 ] : i . split ( ":" ) [ 1 : ] for i in li_args } if dico_filters . get ( "type" , ( "dataset" , ) ) [ 0 ] . lower ( ) not in FILTER_TYPES : raise ValueError ( "type value must be one of: {}" . format ( " | " . join ( FILTER_TYPES ) ) ) elif dico_filters . get ( "action" , ( "download" , ) ) [ 0 ] . lower ( ) not in FILTER_ACTIONS : raise ValueError ( "action value must be one of: {}" . format ( " | " . join ( FILTER_ACTIONS ) ) ) elif ( dico_filters . get ( "provider" , ( "manual" , ) ) [ 0 ] . lower ( ) not in FILTER_PROVIDERS ) : raise ValueError ( "provider value must be one of: {}" . format ( " | " . join ( FILTER_PROVIDERS ) ) ) else : logging . debug ( dico_filters ) # - - GEOGRAPHIC - - - - - in_box = parameters . get ( "box" ) in_geo = parameters . get ( "geo" ) # geometric relation in_rel = parameters . get ( "rel" ) if in_rel and in_box is None and in_geo is None : raise ValueError ( "'rel' should'nt be used without box or geo." ) elif in_rel not in GEORELATIONS and in_rel is not None : raise ValueError ( "{} is not a correct value for 'georel'." " Must be one of: {}." . format ( in_rel , " | " . join ( GEORELATIONS ) ) )
def imm_transient ( imm ) : '''imm _ transient ( imm ) yields a duplicate of the given immutable imm that is transient .'''
if not is_imm ( imm ) : raise ValueError ( 'Non-immutable given to imm_transient' ) # make a duplicate immutable that is in the transient state dup = copy . copy ( imm ) if _imm_is_init ( imm ) : # this is an initial - state immutable . . . _imm_init_to_trans ( dup ) elif _imm_is_persist ( imm ) : # it ' s persistent ; re - transient - ize this new one dd = object . __getattribute__ ( dup , '__dict__' ) dd [ '_pimms_immutable_is_trans' ] = True return dup
def calculate_overlap ( haystack , needle , allowpartial = True ) : """Calculate the overlap between two sequences . Yields ( overlap , placement ) tuples ( multiple because there may be multiple overlaps ! ) . The former is the part of the sequence that overlaps , and the latter is - 1 if the overlap is on the left side , 0 if it is a subset , 1 if it overlaps on the right side , 2 if its an identical match"""
needle = tuple ( needle ) haystack = tuple ( haystack ) solutions = [ ] # equality check if needle == haystack : return [ ( needle , 2 ) ] if allowpartial : minl = 1 else : minl = len ( needle ) for l in range ( minl , min ( len ( needle ) , len ( haystack ) ) + 1 ) : # print " LEFT - DEBUG " , l , " : " , needle [ - l : ] , " vs " , haystack [ : l ] # print " RIGHT - DEBUG " , l , " : " , needle [ : l ] , " vs " , haystack [ - l : ] # Search for overlap left ( including partial overlap ! ) if needle [ - l : ] == haystack [ : l ] : # print " LEFT MATCH " solutions . append ( ( needle [ - l : ] , - 1 ) ) # Search for overlap right ( including partial overlap ! ) if needle [ : l ] == haystack [ - l : ] : # print " RIGHT MATCH " solutions . append ( ( needle [ : l ] , 1 ) ) if len ( needle ) <= len ( haystack ) : options = list ( iter ( Windower ( haystack , len ( needle ) , beginmarker = None , endmarker = None ) ) ) for option in options [ 1 : - 1 ] : if option == needle : # print " SUBSET MATCH " solutions . append ( ( needle , 0 ) ) return solutions
def delete_cached ( task_id , broker = None ) : """Delete a task from the cache backend"""
if not broker : broker = get_broker ( ) return broker . cache . delete ( '{}:{}' . format ( broker . list_key , task_id ) )
async def handle_frame ( self , frame ) : """Handle incoming API frame , return True if this was the expected frame ."""
if not isinstance ( frame , FrameHouseStatusMonitorEnableConfirmation ) : return False self . success = True return True
def find_trigger_value ( psd_var , idx , start , sample_rate ) : """Find the PSD variation value at a particular time Parameters psd _ var : TimeSeries Time series of the varaibility in the PSD estimation idx : numpy . ndarray Time indices of the triggers start : float GPS start time sample _ rate : float Sample rate defined in ini file Returns vals : Array PSD variation value at a particular time"""
# Find gps time of the trigger time = start + idx / sample_rate # Find where in the psd variation time series the trigger belongs ind = numpy . digitize ( time , psd_var . sample_times ) ind -= 1 vals = psd_var [ ind ] return vals
def init_app ( self , app ) : """This callback can be used to initialize an application for the use with this prometheus reporter setup . This is usually used with a flask " app factory " configuration . Please see : http : / / flask . pocoo . org / docs / 1.0 / patterns / appfactories / Note , that you need to use ` PrometheusMetrics ( app = None , . . . ) ` for this mode , otherwise it is called automatically . : param app : the Flask application"""
if self . path : self . register_endpoint ( self . path , app ) if self . _export_defaults : self . export_defaults ( self . buckets , self . group_by , self . _defaults_prefix , app )
def unique ( seq , key = None ) : """Create a unique list or tuple from a provided list or tuple and preserve the order . : param seq : The list or tuple to preserve unique items from . : type seq : list , tuple : param key : If key is provided it will be called during the comparison process . : type key : function , None"""
if key is None : key = lambda x : x preserved_type = type ( seq ) if preserved_type not in ( list , tuple ) : raise TypeError ( "unique argument 1 must be list or tuple, not {0}" . format ( preserved_type . __name__ ) ) seen = [ ] result = [ ] for item in seq : marker = key ( item ) if marker in seen : continue seen . append ( marker ) result . append ( item ) return preserved_type ( result )
def cmd ( send , msg , _ ) : """Generates a meaning for the specified acronym . Syntax : { command } < acronym >"""
if not msg : send ( "What acronym?" ) return words = get_list ( ) letters = [ c for c in msg . lower ( ) if c in string . ascii_lowercase ] output = " " . join ( [ choice ( words [ c ] ) for c in letters ] ) if output : send ( '%s: %s' % ( msg , output . title ( ) ) ) else : send ( "No acronym found for %s" % msg )
def patch_lazy ( import_path , rvalue = UNDEFINED , side_effect = UNDEFINED , ignore = UNDEFINED , callback = UNDEFINED , ctxt = UNDEFINED ) : """Patches lazy - loaded methods of classes . Patching at the class definition overrides the _ _ getattr _ _ method for the class with a new version that patches any callables returned by _ _ getattr _ _ with a key matching the last element of the dot path given : param str import _ path : The absolute path to the lazy - loaded method to patch . It can be either abstract , or defined by calling _ _ getattr _ _ : param mixed rvalue : The value that should be immediately returned without executing the target . : param mixed side _ effect : The side effect to execute . Either a callable with the same parameters as the target , or an exception . : param caliendo . Ignore ignore : The parameters that should be ignored when determining cachekeys . These are typically the dynamic values such as datetime . datetime . now ( ) or a setting from an environment specific file . : param function callback : The callback function to execute when : param function callback : A pickleable callback to execute when the patched method is called and the cache is hit . ( has to have been cached the first time ) . : param caliendo . hooks . Context ctxt : The context this patch should be executed under . Generally reserved for internal use . The vast majority of use cases should leave this parameter alone . : returns : The patched calling method ."""
def patch_method ( unpatched_method ) : context = get_context ( unpatched_method ) getter , attribute = _get_target ( import_path ) klass = getter ( ) getattr_path = "." . join ( import_path . split ( '.' ) [ 0 : - 1 ] + [ '__getattr__' ] ) def wrapper ( wrapped_method , instance , attr ) : lazy_loaded = wrapped_method . original ( instance , attr ) if attr != attribute : return lazy_loaded return get_replacement_method ( lazy_loaded , side_effect = side_effect , rvalue = rvalue , ignore = ignore , callback = callback , context = context ) @ patch ( getattr_path , side_effect = WrappedMethod ( klass . __getattr__ , wrapper ) , ctxt = context ) def patched_method ( * args , ** kwargs ) : try : return unpatched_method ( * args , ** kwargs ) finally : context . exit ( ) return patched_method return patch_method
def _process_merge_request_change ( self , payload , event , codebase = None ) : """Consumes the merge _ request JSON as a python object and turn it into a buildbot change . : arguments : payload Python Object that represents the JSON sent by GitLab Service Hook ."""
attrs = payload [ 'object_attributes' ] commit = attrs [ 'last_commit' ] when_timestamp = dateparse ( commit [ 'timestamp' ] ) # @ todo provide and document a way to choose between http and ssh url repo_url = attrs [ 'target' ] [ 'git_http_url' ] # project name from http headers is empty for me , so get it from object _ attributes / target / name project = attrs [ 'target' ] [ 'name' ] # Filter out uninteresting events state = attrs [ 'state' ] if re . match ( '^(closed|merged|approved)$' , state ) : log . msg ( "GitLab MR#{}: Ignoring because state is {}" . format ( attrs [ 'iid' ] , state ) ) return [ ] action = attrs [ 'action' ] if not re . match ( '^(open|reopen)$' , action ) and not ( action == "update" and "oldrev" in attrs ) : log . msg ( "GitLab MR#{}: Ignoring because action {} was not open or " "reopen or an update that added code" . format ( attrs [ 'iid' ] , action ) ) return [ ] changes = [ { 'author' : '%s <%s>' % ( commit [ 'author' ] [ 'name' ] , commit [ 'author' ] [ 'email' ] ) , 'files' : [ ] , # @ todo use rest API 'comments' : "MR#{}: {}\n\n{}" . format ( attrs [ 'iid' ] , attrs [ 'title' ] , attrs [ 'description' ] ) , 'revision' : commit [ 'id' ] , 'when_timestamp' : when_timestamp , 'branch' : attrs [ 'target_branch' ] , 'repository' : repo_url , 'project' : project , 'category' : event , 'revlink' : attrs [ 'url' ] , 'properties' : { 'source_branch' : attrs [ 'source_branch' ] , 'source_project_id' : attrs [ 'source_project_id' ] , 'source_repository' : attrs [ 'source' ] [ 'git_http_url' ] , 'source_git_ssh_url' : attrs [ 'source' ] [ 'git_ssh_url' ] , 'target_branch' : attrs [ 'target_branch' ] , 'target_project_id' : attrs [ 'target_project_id' ] , 'target_repository' : attrs [ 'target' ] [ 'git_http_url' ] , 'target_git_ssh_url' : attrs [ 'target' ] [ 'git_ssh_url' ] , 'event' : event , } , } ] if codebase is not None : changes [ 0 ] [ 'codebase' ] = codebase return changes
def start_notebook_on_demand ( self , name , context ) : """Start notebook if not yet running with these settings . Return the updated settings with a port info . : return : ( context dict , created flag )"""
if self . is_running ( name ) : last_context = self . get_context ( name ) logger . info ( "Notebook context change detected for %s" , name ) if not self . is_same_context ( context , last_context ) : self . stop_notebook ( name ) # Make sure we don ' t get race condition over context . json file time . sleep ( 2.0 ) else : return last_context , False err_log = os . path . join ( self . get_work_folder ( name ) , "notebook.stderr.log" ) logger . info ( "Launching new Notebook named %s, context is %s" , name , context ) logger . info ( "Notebook log is %s" , err_log ) self . start_notebook ( name , context ) time . sleep ( 1 ) context = self . get_context ( name ) if "notebook_name" not in context : # Failed to launch within timeout raise RuntimeError ( "Failed to launch IPython Notebook, see {}" . format ( err_log ) ) return context , True
def WriteUserNotification ( self , notification ) : """Writes a notification for a given user ."""
if notification . username not in self . users : raise db . UnknownGRRUserError ( notification . username ) cloned_notification = notification . Copy ( ) if not cloned_notification . timestamp : cloned_notification . timestamp = rdfvalue . RDFDatetime . Now ( ) self . notifications_by_username . setdefault ( cloned_notification . username , [ ] ) . append ( cloned_notification )
def parse_valu ( text , off = 0 ) : '''Special syntax for the right side of equals in a macro'''
_ , off = nom ( text , off , whites ) if nextchar ( text , off , '(' ) : return parse_list ( text , off ) if isquote ( text , off ) : return parse_string ( text , off ) # since it ' s not quoted , we can assume we are bound by both # white space and storm syntax chars ( ) , = valu , off = meh ( text , off , valmeh ) # for now , give it a shot as an int . . . maybe eventually # we ' ll be able to disable this completely , but for now # lets maintain backward compatibility . . . try : # NOTE : this is ugly , but faster than parsing the string valu = int ( valu , 0 ) except ValueError : pass return valu , off
def _generate_service ( service_config ) : '''Generate a service from a service _ config dictionary Parameters service _ config : dict Configuration with keys service , args , and kwargs used to generate a new fs service object Returns service : object fs service object initialized with * args , * * kwargs Examples Generate a temporary filesystem ( no arguments required ) : . . code - block : : python > > > tmp = APIConstructor . _ generate _ service ( . . . { ' service ' : ' TempFS ' } ) > > > from fs . tempfs import TempFS > > > assert isinstance ( tmp , TempFS ) > > > import os > > > assert os . path . isdir ( tmp . getsyspath ( ' / ' ) ) > > > tmp . close ( ) Generate a system filesystem in a temporary directory : . . code - block : : python > > > import tempfile > > > tempdir = tempfile . mkdtemp ( ) > > > local = APIConstructor . _ generate _ service ( . . . ' service ' : ' OSFS ' , . . . ' args ' : [ tempdir ] > > > from fs . osfs import OSFS > > > assert isinstance ( local , OSFS ) > > > import os > > > assert os . path . isdir ( local . getsyspath ( ' / ' ) ) > > > local . close ( ) > > > import shutil > > > shutil . rmtree ( tempdir ) Mock an S3 filesystem with moto : . . code - block : : python > > > import moto > > > m = moto . mock _ s3 ( ) > > > m . start ( ) > > > s3 = APIConstructor . _ generate _ service ( . . . ' service ' : ' S3FS ' , . . . ' args ' : [ ' bucket - name ' ] , . . . ' kwargs ' : { . . . ' aws _ access _ key ' : ' MY _ KEY ' , . . . ' aws _ secret _ key ' : ' MY _ SECRET _ KEY ' > > > from fs . s3fs import S3FS > > > assert isinstance ( s3 , S3FS ) > > > m . stop ( )'''
filesystems = [ ] for _ , modname , _ in pkgutil . iter_modules ( fs . __path__ ) : if modname . endswith ( 'fs' ) : filesystems . append ( modname ) service_mod_name = service_config [ 'service' ] . lower ( ) assert_msg = 'Filesystem "{}" not found in pyFilesystem {}' . format ( service_mod_name , fs . __version__ ) assert service_mod_name in filesystems , assert_msg svc_module = importlib . import_module ( 'fs.{}' . format ( service_mod_name ) ) svc_class = svc_module . __dict__ [ service_config [ 'service' ] ] service = svc_class ( * service_config . get ( 'args' , [ ] ) , ** service_config . get ( 'kwargs' , { } ) ) return service
def expect_column_value_lengths_to_be_between ( self , column , min_value = None , max_value = None , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : """Expect column entries to be strings with length between a minimum value and a maximum value ( inclusive ) . This expectation only works for string - type values . Invoking it on ints or floats will raise a TypeError . expect _ column _ value _ lengths _ to _ be _ between is a : func : ` column _ map _ expectation < great _ expectations . data _ asset . dataset . Dataset . column _ map _ expectation > ` . Args : column ( str ) : The column name . Keyword Args : min _ value ( int or None ) : The minimum value for a column entry length . max _ value ( int or None ) : The maximum value for a column entry length . mostly ( None or a float between 0 and 1 ) : Return ` " success " : True ` if at least mostly percent of values match the expectation . For more detail , see : ref : ` mostly ` . Other Parameters : result _ format ( str or None ) : Which output mode to use : ` BOOLEAN _ ONLY ` , ` BASIC ` , ` COMPLETE ` , or ` SUMMARY ` . For more detail , see : ref : ` result _ format < result _ format > ` . include _ config ( boolean ) : If True , then include the expectation config as part of the result object . For more detail , see : ref : ` include _ config ` . catch _ exceptions ( boolean or None ) : If True , then catch exceptions and include them as part of the result object . For more detail , see : ref : ` catch _ exceptions ` . meta ( dict or None ) : A JSON - serializable dictionary ( nesting allowed ) that will be included in the output without modification . For more detail , see : ref : ` meta ` . Returns : A JSON - serializable expectation result object . Exact fields vary depending on the values passed to : ref : ` result _ format < result _ format > ` and : ref : ` include _ config ` , : ref : ` catch _ exceptions ` , and : ref : ` meta ` . Notes : * min _ value and max _ value are both inclusive . * If min _ value is None , then max _ value is treated as an upper bound , and the number of acceptable rows has no minimum . * If max _ value is None , then min _ value is treated as a lower bound , and the number of acceptable rows has no maximum . See Also : expect _ column _ value _ lengths _ to _ equal"""
raise NotImplementedError
def get_input_list ( self ) : """Description : Get input list Returns an ordered list of all available input keys and names"""
inputs = [ ' ' ] * len ( self . command [ 'input' ] ) for key in self . command [ 'input' ] : inputs [ self . command [ 'input' ] [ key ] [ 'order' ] ] = { "key" : key , "name" : self . command [ 'input' ] [ key ] [ 'name' ] } return inputs
def _validate_func_args ( func , kwargs ) : """Validate decorator args when used to decorate a function ."""
args , varargs , varkw , defaults = inspect . getargspec ( func ) if set ( kwargs . keys ( ) ) != set ( args [ 1 : ] ) : # chop off self raise TypeError ( "decorator kwargs do not match %s()'s kwargs" % func . __name__ )
def get_version ( ) : """Returns shorter version ( digit parts only ) as string ."""
version = '.' . join ( ( str ( each ) for each in VERSION [ : 3 ] ) ) if len ( VERSION ) > 3 : version += VERSION [ 3 ] return version
def readInfo ( stream ) : """Read previously - written information about diffs ."""
try : for line in stream : ( toUUID , fromUUID , size ) = line . split ( ) try : size = int ( size ) except Exception : logger . warning ( "Bad size: %s" , size ) continue logger . debug ( "diff info: %s %s %d" , toUUID , fromUUID , size ) Diff . theKnownSizes [ toUUID ] [ fromUUID ] = size except Exception as error : logger . warn ( "Can't read .bs info file (%s)" , error )
def GetNeighbors ( ID , model = None , neighbors = None , mag_range = None , cdpp_range = None , aperture_name = None , cadence = 'lc' , ** kwargs ) : '''Return ` neighbors ` random bright stars on the same module as ` EPIC ` . : param int ID : The target ID number : param str model : The : py : obj : ` everest ` model name . Only used when imposing CDPP bounds . Default : py : obj : ` None ` : param int neighbors : Number of neighbors to return . Default None : param str aperture _ name : The name of the aperture to use . Select ` custom ` to call : py : func : ` GetCustomAperture ` . Default : py : obj : ` None ` : param str cadence : The light curve cadence . Default ` lc ` : param tuple mag _ range : ( ` low ` , ` high ` ) values for the Kepler magnitude . Default : py : obj : ` None ` : param tuple cdpp _ range : ( ` low ` , ` high ` ) values for the de - trended CDPP . Default : py : obj : ` None `'''
raise NotImplementedError ( 'This mission is not yet supported.' )
def hotmaps_permutation ( obs_stat , context_counts , context_to_mut , seq_context , gene_seq , window , num_permutations = 10000 , stop_criteria = 100 , max_batch = 25000 , null_save_path = None ) : """Performs null - permutations for position - based mutation statistics in a single gene . Parameters obs _ stat : dict dictionary mapping codons to the sum of mutations in a window context _ counts : pd . Series number of mutations for each context context _ to _ mut : dict dictionary mapping nucleotide context to a list of observed somatic base changes . seq _ context : SequenceContext Sequence context for the entire gene sequence ( regardless of where mutations occur ) . The nucleotide contexts are identified at positions along the gene . gene _ seq : GeneSequence Sequence of gene of interest window : int Number of codons to the left / right of a mutated position to consider in the window num _ permutations : int , default : 10000 number of permutations to create for null stop _ criteria : int stop after stop _ criteria iterations are more significant then the observed statistic . max _ batch : int maximum number of whole gene simulations to do at once . For large number of simulations holding a matrix of M x N , where M is the number of mutations and N is the number of simulations , can get quite large . null _ save _ path : str or None File path to save null distribution . If None , don ' t save it . Returns pvals : dict Maps mutated codon position to the calculated p - value"""
# get contexts and somatic base mycontexts = context_counts . index . tolist ( ) somatic_base = [ base for one_context in mycontexts for base in context_to_mut [ one_context ] ] # calculate the # of batches for simulations max_batch = min ( num_permutations , max_batch ) num_batches = num_permutations // max_batch remainder = num_permutations % max_batch batch_sizes = [ max_batch ] * num_batches if remainder : batch_sizes += [ remainder ] # figure out which position has highest value max_key = { w : max ( obs_stat [ w ] , key = ( lambda key : obs_stat [ w ] [ key ] ) ) for w in window } # setup null dist counts null_cts = { w : { k : 0 for k in obs_stat [ w ] } for w in window } # empirical null distribution ( saved if file path provided ) empirical_null = { w : { } for w in window } num_sim = 0 # number of simulations for j , batch_size in enumerate ( batch_sizes ) : # stop iterations if reached sufficient precision # stop iterations if reached sufficient precision stop_flag = [ ( null_cts [ w ] [ max_key [ w ] ] >= stop_criteria ) for w in window ] if all ( stop_flag ) : break # if null _ cts [ max _ key ] > = stop _ criteria : # break # get random positions determined by sequence context tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , batch_size ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) # calculate position - based statistics as a result of random positions for i , row in enumerate ( tmp_mut_pos ) : # get info about mutations tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) # calculate position info tmp_pos , tmp_sim = utils . calc_windowed_sum ( tmp_mut_info [ 'Codon Pos' ] , tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , window ) # update the counts when the empirical null passes the observed for tmp_w in tmp_sim : for tmp_key in tmp_sim [ tmp_w ] : # get mutation count for simulation val = tmp_sim [ tmp_w ] [ tmp_key ] # add to empirical null distribution empirical_null [ tmp_w ] . setdefault ( val , 0 ) empirical_null [ tmp_w ] [ val ] += 1 # update counts used for p - value for key in null_cts [ tmp_w ] : if val >= obs_stat [ tmp_w ] [ key ] : null_cts [ tmp_w ] [ key ] += 1 # update the number of simulations num_sim += len ( tmp_pos ) # stop iterations if reached sufficient precision stop_flag = [ ( null_cts [ w ] [ max_key [ w ] ] >= stop_criteria ) for w in window ] if all ( stop_flag ) : break # calculate p - value from empirical null - distribution pvals = { w : { k : float ( null_cts [ w ] [ k ] ) / ( num_sim ) for k in obs_stat [ w ] } for w in window } # save empirical distribution if null_save_path : for w in window : # create null distribution output = [ [ 'mutation_count' , 'p-value' ] ] sorted_cts = sorted ( empirical_null [ w ] . keys ( ) ) tmp_sum = 0 for i in range ( len ( sorted_cts ) ) : tmp_sum += empirical_null [ w ] [ sorted_cts [ - ( i + 1 ) ] ] tmp_pval = tmp_sum / float ( num_sim ) output . append ( [ sorted_cts [ - ( i + 1 ) ] , tmp_pval ] ) # save output with open ( null_save_path . format ( w ) , 'w' ) as handle : mywriter = csv . writer ( handle , delimiter = '\t' , lineterminator = '\n' ) mywriter . writerows ( output ) return pvals
def subscribe_condition_fulfilled ( self , agreement_id , timeout , callback , args , timeout_callback = None , wait = False ) : """Subscribe to the condition fullfilled event . : param agreement _ id : id of the agreement , hex str : param timeout : : param callback : : param args : : param timeout _ callback : : param wait : if true block the listener until get the event , bool : return :"""
logger . info ( f'Subscribing {self.FULFILLED_EVENT} event with agreement id {agreement_id}.' ) return self . subscribe_to_event ( self . FULFILLED_EVENT , timeout , { '_agreementId' : Web3Provider . get_web3 ( ) . toBytes ( hexstr = agreement_id ) } , callback = callback , timeout_callback = timeout_callback , args = args , wait = wait )
def genslices_ndim ( ndim , shape ) : """Generate all possible slice tuples for ' shape ' ."""
iterables = [ genslices ( shape [ n ] ) for n in range ( ndim ) ] yield from product ( * iterables )
def formatters ( * chained_formatters ) : """Chain formatter functions . : param chained _ formatters : : type chained _ formatters : : return : : rtype :"""
def formatters_chain ( input_string ) : # pylint : disable = missing - docstring for chained_formatter in chained_formatters : input_string = chained_formatter ( input_string ) return input_string return formatters_chain
def _repack_options ( options ) : '''Repack the options data'''
return dict ( [ ( six . text_type ( x ) , _normalize ( y ) ) for x , y in six . iteritems ( salt . utils . data . repack_dictlist ( options ) ) ] )
def setWorkingPlayAreaSize ( self , sizeX , sizeZ ) : """Sets the Play Area in the working copy ."""
fn = self . function_table . setWorkingPlayAreaSize fn ( sizeX , sizeZ )
def deploy ( self , id_networkv4 ) : """Deploy network in equipments and set column ' active = 1 ' in tables redeipv4 : param id _ networkv4 : ID for NetworkIPv4 : return : Equipments configuration output"""
data = dict ( ) uri = 'api/networkv4/%s/equipments/' % id_networkv4 return super ( ApiNetworkIPv4 , self ) . post ( uri , data = data )
def fit ( self , X ) : """Fit a t - SNE embedding for a given data set . Runs the standard t - SNE optimization , consisting of the early exaggeration phase and a normal optimization phase . Parameters X : np . ndarray The data matrix to be embedded . Returns TSNEEmbedding A fully optimized t - SNE embedding ."""
embedding = self . prepare_initial ( X ) try : # Early exaggeration with lower momentum to allow points to find more # easily move around and find their neighbors embedding . optimize ( n_iter = self . early_exaggeration_iter , exaggeration = self . early_exaggeration , momentum = self . initial_momentum , inplace = True , propagate_exception = True , ) # Restore actual affinity probabilities and increase momentum to get # final , optimized embedding embedding . optimize ( n_iter = self . n_iter , exaggeration = self . exaggeration , momentum = self . final_momentum , inplace = True , propagate_exception = True , ) except OptimizationInterrupt as ex : log . info ( "Optimization was interrupted with callback." ) embedding = ex . final_embedding return embedding
def get_length ( topics , yaml_info ) : '''Find the length ( # of rows ) in the created dataframe'''
total = 0 info = yaml_info [ 'topics' ] for topic in topics : for t in info : if t [ 'topic' ] == topic : total = total + t [ 'messages' ] break return total
def gen_dist ( network , techs = None , snapshot = 1 , n_cols = 3 , gen_size = 0.2 , filename = None ) : """Generation distribution Parameters network : PyPSA network container Holds topology of grid including results from powerflow analysis techs : dict type of technologies which shall be plotted snapshot : int snapshot n _ cols : int number of columns of the plot gen _ size : num size of generation bubbles at the buses filename : str Specify filename If not given , figure will be show directly"""
if techs is None : techs = network . generators . carrier . unique ( ) else : techs = techs n_graphs = len ( techs ) n_cols = n_cols if n_graphs % n_cols == 0 : n_rows = n_graphs // n_cols else : n_rows = n_graphs // n_cols + 1 fig , axes = plt . subplots ( nrows = n_rows , ncols = n_cols ) size = 4 fig . set_size_inches ( size * n_cols , size * n_rows ) for i , tech in enumerate ( techs ) : i_row = i // n_cols i_col = i % n_cols ax = axes [ i_row , i_col ] gens = network . generators [ network . generators . carrier == tech ] gen_distribution = network . generators_t . p . mul ( network . snapshot_weightings , axis = 0 ) [ gens . index ] . loc [ network . snapshots [ snapshot ] ] . groupby ( network . generators . bus ) . sum ( ) . reindex ( network . buses . index , fill_value = 0. ) network . plot ( ax = ax , bus_sizes = gen_size * gen_distribution , line_widths = 0.1 ) ax . set_title ( tech ) if filename is None : plt . show ( ) else : plt . savefig ( filename ) plt . close ( )
def get_list_url ( cls , world , town , house_type : HouseType = HouseType . HOUSE ) : """Gets the URL to the house list on Tibia . com with the specified parameters . Parameters world : : class : ` str ` The name of the world . town : : class : ` str ` The name of the town . house _ type : : class : ` HouseType ` Whether to search for houses or guildhalls . Returns : class : ` str ` The URL to the list matching the parameters ."""
house_type = "%ss" % house_type . value return HOUSE_LIST_URL % ( urllib . parse . quote ( world ) , urllib . parse . quote ( town ) , house_type )
def _invert ( self ) : """Invert coverage data from { test _ context : { file : line } } to { file : { test _ context : line } }"""
result = defaultdict ( dict ) for test_context , src_context in six . iteritems ( self . data ) : for src , lines in six . iteritems ( src_context ) : result [ src ] [ test_context ] = lines return result
def process_train_set ( hdf5_file , train_archive , patch_archive , n_train , wnid_map , shuffle_seed = None ) : """Process the ILSVRC2010 training set . Parameters hdf5 _ file : : class : ` h5py . File ` instance HDF5 file handle to which to write . Assumes ` features ` , ` targets ` and ` filenames ` already exist and have first dimension larger than ` n _ train ` . train _ archive : str or file - like object Filename or file handle for the TAR archive of training images . patch _ archive : str or file - like object Filename or file handle for the TAR archive of patch images . n _ train : int The number of items in the training set . wnid _ map : dict A dictionary mapping WordNet IDs to class indices . shuffle _ seed : int or sequence , optional Seed for a NumPy random number generator that permutes the training set on disk . If ` None ` , no permutation is performed ( this is the default ) ."""
producer = partial ( train_set_producer , train_archive = train_archive , patch_archive = patch_archive , wnid_map = wnid_map ) consumer = partial ( image_consumer , hdf5_file = hdf5_file , num_expected = n_train , shuffle_seed = shuffle_seed ) producer_consumer ( producer , consumer )
def SetClipboardData ( type , content ) : """Modeled after http : / / msdn . microsoft . com / en - us / library / ms649016%28VS . 85%29 . aspx # _ win32 _ Copying _ Information _ to _ the _ Clipboard"""
allocators = { clipboard . CF_TEXT : ctypes . create_string_buffer , clipboard . CF_UNICODETEXT : ctypes . create_unicode_buffer , clipboard . CF_HTML : ctypes . create_string_buffer , } if type not in allocators : raise NotImplementedError ( "Only text and HTML types are supported at this time" ) # allocate the memory for the data content = allocators [ type ] ( content ) flags = memory . GMEM_MOVEABLE size = ctypes . sizeof ( content ) handle_to_copy = windll . kernel32 . GlobalAlloc ( flags , size ) with LockedMemory ( handle_to_copy ) as lm : ctypes . memmove ( lm . data_ptr , content , size ) result = clipboard . SetClipboardData ( type , handle_to_copy ) if result is None : raise WindowsError ( )
def Tracing_recordClockSyncMarker ( self , syncId ) : """Function path : Tracing . recordClockSyncMarker Domain : Tracing Method name : recordClockSyncMarker Parameters : Required arguments : ' syncId ' ( type : string ) - > The ID of this clock sync marker No return value . Description : Record a clock sync marker in the trace ."""
assert isinstance ( syncId , ( str , ) ) , "Argument 'syncId' must be of type '['str']'. Received type: '%s'" % type ( syncId ) subdom_funcs = self . synchronous_command ( 'Tracing.recordClockSyncMarker' , syncId = syncId ) return subdom_funcs
def get_slug ( self , language_code , lang_name ) : """Notes : - slug must be unique ! - slug is used to check if page already exists ! : return : ' slug ' string for cms . api . create _ page ( )"""
title = self . get_title ( language_code , lang_name ) assert title != "" title = str ( title ) # e . g . : evaluate a lazy translation slug = slugify ( title ) assert slug != "" , "Title %r results in empty slug!" % title return slug
def configure ( self , options , conf ) : """Get filetype option to specify additional filetypes to watch ."""
Plugin . configure ( self , options , conf ) if options . filetype : self . filetypes += options . filetype
def __add_tokens ( self , token_tier ) : """adds all tokens to the document graph . Exmaralda considers them to be annotations as well , that ' s why we could only extract the token node IDs from the timeline ( using ` ` _ _ add _ tokenization ( ) ` ` ) , but not the tokens themselves . Parameters token _ tier : etree . _ Element an etree element representing the < tier > which contains the tokens"""
for event in token_tier . iter ( 'event' ) : assert len ( self . gen_token_range ( event . attrib [ 'start' ] , event . attrib [ 'end' ] ) ) == 1 , "Events in the token tier must not span more than one token." token_id = event . attrib [ 'start' ] self . node [ token_id ] [ self . ns + ':token' ] = event . text
def decode_datavalue ( self , datatype : str , datavalue : Mapping [ str , object ] ) -> object : """Decode the given ` ` datavalue ` ` using the configured : attr : ` datavalue _ decoder ` . . . versionadded : : 0.3.0"""
decode = cast ( Callable [ [ Client , str , Mapping [ str , object ] ] , object ] , self . datavalue_decoder ) return decode ( self , datatype , datavalue )
def get_rows ( runSetResults ) : """Create list of rows with all data . Each row consists of several RunResults ."""
rows = [ ] for task_results in zip ( * [ runset . results for runset in runSetResults ] ) : rows . append ( Row ( task_results ) ) return rows