signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _shorten_url ( self , text ) : '''Shorten a URL and make sure to not cut of html entities .'''
if len ( text ) > self . _max_url_length and self . _max_url_length != - 1 : text = text [ 0 : self . _max_url_length - 3 ] amp = text . rfind ( '&' ) close = text . rfind ( ';' ) if amp != - 1 and ( close == - 1 or close < amp ) : text = text [ 0 : amp ] return text + '...' else : return text
def getStreamNetworkAsWkt ( self , session , withNodes = True ) : """Retrieve the stream network geometry in Well Known Text format . Args : session ( : mod : ` sqlalchemy . orm . session . Session ` ) : SQLAlchemy session object bound to PostGIS enabled database withNodes ( bool , optional ) : Include nodes . Defaults to False . Returns : str : Well Known Text string ."""
wkt_list = [ ] for link in self . streamLinks : wkt_link = link . getAsWkt ( session ) if wkt_link : wkt_list . append ( wkt_link ) if withNodes : for node in link . nodes : wkt_node = node . getAsWkt ( session ) if wkt_node : wkt_list . append ( wkt_node ) return 'GEOMCOLLECTION ({0})' . format ( ', ' . join ( wkt_list ) )
def tostring ( self , inject ) : """Convert an element to a single string and allow the passed inject method to place content before any element ."""
return inject ( self , '\n' . join ( f'{division.tostring(inject)}' for division in self . divisions ) )
def reload ( self , client = None ) : """Update this notification from the server configuration . See : https : / / cloud . google . com / storage / docs / json _ api / v1 / notifications / get If : attr : ` user _ project ` is set on the bucket , bills the API request to that project . : type client : : class : ` ~ google . cloud . storage . client . Client ` or ` ` NoneType ` ` : param client : Optional . The client to use . If not passed , falls back to the ` ` client ` ` stored on the current bucket . : rtype : bool : returns : True , if the notification exists , else False . : raises ValueError : if the notification has no ID ."""
if self . notification_id is None : raise ValueError ( "Notification not intialized by server" ) client = self . _require_client ( client ) query_params = { } if self . bucket . user_project is not None : query_params [ "userProject" ] = self . bucket . user_project response = client . _connection . api_request ( method = "GET" , path = self . path , query_params = query_params ) self . _set_properties ( response )
def com_google_fonts_check_name_rfn ( ttFont ) : """Name table strings must not contain the string ' Reserved Font Name ' ."""
failed = False for entry in ttFont [ "name" ] . names : string = entry . toUnicode ( ) if "reserved font name" in string . lower ( ) : yield WARN , ( "Name table entry (\"{}\")" " contains \"Reserved Font Name\"." " This is an error except in a few specific" " rare cases." ) . format ( string ) failed = True if not failed : yield PASS , ( "None of the name table strings" " contain \"Reserved Font Name\"." )
def get_lint_config ( config_path = None ) : """Tries loading the config from the given path . If no path is specified , the default config path is tried , and if that is not specified , we the default config is returned ."""
# config path specified if config_path : config = LintConfig . load_from_file ( config_path ) click . echo ( "Using config from {0}" . format ( config_path ) ) # default config path elif os . path . exists ( DEFAULT_CONFIG_FILE ) : config = LintConfig . load_from_file ( DEFAULT_CONFIG_FILE ) click . echo ( "Using config from {0}" . format ( DEFAULT_CONFIG_FILE ) ) # no config file else : config = LintConfig ( ) return config
def load_fam ( self , pheno_covar = None ) : """Load contents from the . fam file , updating the pheno _ covar with family ids found . : param pheno _ covar : Phenotype / covariate object : return : None"""
logging . info ( "Loading file: %s" % ( self . fam_file ) ) pheno_col = 5 if not DataParser . has_sex : pheno_col -= 1 if not DataParser . has_parents : pheno_col -= 2 if not DataParser . has_fid : pheno_col -= 1 sex_col = pheno_col - 1 mask_components = [ ] for line in open ( self . fam_file ) : words = line . strip ( ) . split ( ) if len ( words ) > 1 : indid = ":" . join ( words [ 0 : 2 ] ) if DataParser . valid_indid ( indid ) : mask_components . append ( 0 ) sex = None pheno = None if DataParser . has_sex : sex = int ( words [ sex_col ] ) if DataParser . has_pheno : pheno = float ( words [ pheno_col ] ) if pheno_covar is not None : pheno_covar . add_subject ( indid , sex , pheno ) if len ( words ) > 0 : self . families . append ( words ) else : mask_components . append ( 1 ) mask_components = numpy . array ( mask_components ) self . ind_mask = numpy . zeros ( len ( mask_components ) , dtype = numpy . int8 ) self . ind_mask = mask_components self . ind_count = self . ind_mask . shape [ 0 ] if pheno_covar is not None : pheno_covar . freeze_subjects ( )
def is_valid_pid_for_create ( did ) : """Assert that ` ` did ` ` can be used as a PID for creating a new object with MNStorage . create ( ) or MNStorage . update ( ) ."""
if not d1_gmn . app . did . is_valid_pid_for_create ( did ) : raise d1_common . types . exceptions . IdentifierNotUnique ( 0 , 'Identifier is already in use as {}. did="{}"' . format ( d1_gmn . app . did . classify_identifier ( did ) , did ) , identifier = did , )
def get_log_config ( component , handlers , level = 'DEBUG' , path = '/var/log/vfine/' ) : """Return a log config for django project ."""
config = { 'version' : 1 , 'disable_existing_loggers' : False , 'formatters' : { 'standard' : { 'format' : '%(asctime)s [%(levelname)s][%(threadName)s]' + '[%(name)s.%(funcName)s():%(lineno)d] %(message)s' } , 'color' : { '()' : 'shaw.log.SplitColoredFormatter' , 'format' : "%(asctime)s " + "%(log_color)s%(bold)s[%(levelname)s]%(reset)s" + "[%(threadName)s][%(name)s.%(funcName)s():%(lineno)d] " + "%(blue)s%(message)s" } } , 'handlers' : { 'debug' : { 'level' : 'DEBUG' , 'class' : 'logging.handlers.RotatingFileHandler' , 'filename' : path + component + '.debug.log' , 'maxBytes' : 1024 * 1024 * 1024 , 'backupCount' : 5 , 'formatter' : 'standard' , } , 'color' : { 'level' : 'DEBUG' , 'class' : 'logging.handlers.RotatingFileHandler' , 'filename' : path + component + '.color.log' , 'maxBytes' : 1024 * 1024 * 1024 , 'backupCount' : 5 , 'formatter' : 'color' , } , 'info' : { 'level' : 'INFO' , 'class' : 'logging.handlers.RotatingFileHandler' , 'filename' : path + component + '.info.log' , 'maxBytes' : 1024 * 1024 * 1024 , 'backupCount' : 5 , 'formatter' : 'standard' , } , 'error' : { 'level' : 'ERROR' , 'class' : 'logging.handlers.RotatingFileHandler' , 'filename' : path + component + '.error.log' , 'maxBytes' : 1024 * 1024 * 100 , 'backupCount' : 5 , 'formatter' : 'standard' , } , 'console' : { 'level' : level , 'class' : 'logging.StreamHandler' , 'formatter' : 'standard' } , } , 'loggers' : { 'django' : { 'handlers' : handlers , 'level' : 'INFO' , 'propagate' : False } , 'django.request' : { 'handlers' : handlers , 'level' : 'INFO' , 'propagate' : False , } , '' : { 'handlers' : handlers , 'level' : level , 'propagate' : False } , } } return config
def apply_patches ( self , arch , build_dir = None ) : '''Apply any patches for the Recipe . . . versionchanged : : 0.6.0 Add ability to apply patches from any dir via kwarg ` build _ dir `'''
if self . patches : info_main ( 'Applying patches for {}[{}]' . format ( self . name , arch . arch ) ) if self . is_patched ( arch ) : info_main ( '{} already patched, skipping' . format ( self . name ) ) return build_dir = build_dir if build_dir else self . get_build_dir ( arch . arch ) for patch in self . patches : if isinstance ( patch , ( tuple , list ) ) : patch , patch_check = patch if not patch_check ( arch = arch , recipe = self ) : continue self . apply_patch ( patch . format ( version = self . version , arch = arch . arch ) , arch . arch , build_dir = build_dir ) shprint ( sh . touch , join ( build_dir , '.patched' ) )
def update_vrf_table ( self , route_dist , prefix = None , next_hop = None , route_family = None , route_type = None , tunnel_type = None , is_withdraw = False , redundancy_mode = None , pmsi_tunnel_type = None , ** kwargs ) : """Update a BGP route in the VRF table identified by ` route _ dist ` with the given ` next _ hop ` . If ` is _ withdraw ` is False , which is the default , add a BGP route to the VRF table identified by ` route _ dist ` with the given ` next _ hop ` . If ` is _ withdraw ` is True , remove a BGP route from the VRF table and the given ` next _ hop ` is ignored . If ` route _ family ` is VRF _ RF _ L2 _ EVPN , ` route _ type ` and ` kwargs ` are required to construct EVPN NLRI and ` prefix ` is ignored . ` ` redundancy _ mode ` ` specifies a redundancy mode type . ` ` pmsi _ tunnel _ type ` specifies the type of the PMSI tunnel attribute used to encode the multicast tunnel identifier . This field is advertised only if route _ type is EVPN _ MULTICAST _ ETAG _ ROUTE . Returns assigned VPN label ."""
from ryu . services . protocols . bgp . core import BgpCoreError assert route_dist if is_withdraw : gen_lbl = False next_hop = None else : gen_lbl = True if not ( is_valid_ipv4 ( next_hop ) or is_valid_ipv6 ( next_hop ) ) : raise BgpCoreError ( desc = 'Invalid IPv4/IPv6 nexthop: %s' % next_hop ) vrf_table = self . _tables . get ( ( route_dist , route_family ) ) if vrf_table is None : raise BgpCoreError ( desc = 'VRF table does not exist: route_dist=%s, ' 'route_family=%s' % ( route_dist , route_family ) ) vni = kwargs . get ( 'vni' , None ) if route_family == VRF_RF_IPV4 : if not is_valid_ipv4_prefix ( prefix ) : raise BgpCoreError ( desc = 'Invalid IPv4 prefix: %s' % prefix ) ip , masklen = prefix . split ( '/' ) prefix = IPAddrPrefix ( int ( masklen ) , ip ) elif route_family == VRF_RF_IPV6 : if not is_valid_ipv6_prefix ( prefix ) : raise BgpCoreError ( desc = 'Invalid IPv6 prefix: %s' % prefix ) ip6 , masklen = prefix . split ( '/' ) prefix = IP6AddrPrefix ( int ( masklen ) , ip6 ) elif route_family == VRF_RF_L2_EVPN : assert route_type if route_type == EvpnMacIPAdvertisementNLRI . ROUTE_TYPE_NAME : # MPLS labels will be assigned automatically kwargs [ 'mpls_labels' ] = [ ] if route_type == EvpnInclusiveMulticastEthernetTagNLRI . ROUTE_TYPE_NAME : # Inclusive Multicast Ethernet Tag Route does not have " vni " , # omit " vni " from " kwargs " here . vni = kwargs . pop ( 'vni' , None ) subclass = EvpnNLRI . _lookup_type_name ( route_type ) kwargs [ 'route_dist' ] = route_dist esi = kwargs . get ( 'esi' , None ) if esi is not None : if isinstance ( esi , dict ) : esi_type = esi . get ( 'type' , 0 ) esi_class = EvpnEsi . _lookup_type ( esi_type ) kwargs [ 'esi' ] = esi_class . from_jsondict ( esi ) else : # isinstance ( esi , numbers . Integral ) kwargs [ 'esi' ] = EvpnArbitraryEsi ( type_desc . Int9 . from_user ( esi ) ) if vni is not None : # Disable to generate MPLS labels , # because encapsulation type is not MPLS . from ryu . services . protocols . bgp . api . prefix import ( TUNNEL_TYPE_VXLAN , TUNNEL_TYPE_NVGRE ) assert tunnel_type in [ None , TUNNEL_TYPE_VXLAN , TUNNEL_TYPE_NVGRE ] gen_lbl = False prefix = subclass ( ** kwargs ) else : raise BgpCoreError ( desc = 'Unsupported route family %s' % route_family ) # We do not check if we have a path to given prefix , we issue # withdrawal . Hence multiple withdrawals have not side effect . return vrf_table . insert_vrf_path ( nlri = prefix , next_hop = next_hop , gen_lbl = gen_lbl , is_withdraw = is_withdraw , redundancy_mode = redundancy_mode , vni = vni , tunnel_type = tunnel_type , pmsi_tunnel_type = pmsi_tunnel_type )
def pipe_fetchpage ( context = None , _INPUT = None , conf = None , ** kwargs ) : """A source that fetches the content of a given web site as a string . Loopable . context : pipe2py . Context object _ INPUT : pipeforever asyncPipe or an iterable of items or fields conf : dict URL - - url object contain the URL to download from - - string from where to start the input to - - string to limit the input token - - if present , split the input on this token to generate items Description : http : / / pipes . yahoo . com / pipes / docs ? doc = sources # FetchPage TODOS : - don ' t retrieve pages larger than 200k - don ' t retrieve if page is not indexable . - item delimiter removes the closing tag if using a HTML tag ( not documented but happens ) - items should be cleaned , i . e . stripped of HTML tags Yields _ OUTPUT : items"""
conf = DotDict ( conf ) split_token = conf . get ( 'token' , ** kwargs ) urls = utils . listize ( conf [ 'URL' ] ) for item in _INPUT : for item_url in urls : url = utils . get_value ( DotDict ( item_url ) , DotDict ( item ) , ** kwargs ) url = utils . get_abspath ( url ) if not url : continue f = urlopen ( url ) # TODO : it seems that Yahoo ! converts relative links to # absolute . This needs to be done on the content but seems to # be a non - trival task python ? content = unicode ( f . read ( ) , 'utf-8' ) if context and context . verbose : print '............Content .................' print content print '...............EOF...................' parsed = _parse_content ( content , conf , ** kwargs ) items = parsed . split ( split_token ) if split_token else [ parsed ] if context and context . verbose : print "FetchPage: found count items:" , len ( items ) for i in items : if context and context . verbose : print "--------------item data --------------------" print i print "--------------EOF item data ----------------" yield { "content" : i } if item . get ( 'forever' ) : # _ INPUT is pipeforever and not a loop , # so we just yield our item once break
def from_string ( cls , input_string , cls_ = None ) : """Initialize a ` Structure ` object from a string with data in XSF format . Args : input _ string : String with the structure in XSF format . See http : / / www . xcrysden . org / doc / XSF . html cls _ : Structure class to be created . default : pymatgen structure"""
# CRYSTAL see ( 1) # these are primitive lattice vectors ( in Angstroms ) # PRIMVEC # 0.00000 2.7100000 2.7100000 see ( 2) # 2.7100000 0.00000 2.7100000 # 2.7100000 2.7100000 0.00000 # these are conventional lattice vectors ( in Angstroms ) # CONVVEC # 5.4200000 0.00000 0.00000 see ( 3) # 0.00000 5.4200000 0.00000 # 0.00000 0.00000 5.4200000 # these are atomic coordinates in a primitive unit cell ( in Angstroms ) # PRIMCOORD # 2 1 see ( 4) # 16 0.00000 0.00000 0.00000 see ( 5) # 30 1.3550000 - 1.3550000 - 1.3550000 lattice , coords , species = [ ] , [ ] , [ ] lines = input_string . splitlines ( ) for i in range ( len ( lines ) ) : if "PRIMVEC" in lines [ i ] : for j in range ( i + 1 , i + 4 ) : lattice . append ( [ float ( c ) for c in lines [ j ] . split ( ) ] ) if "PRIMCOORD" in lines [ i ] : num_sites = int ( lines [ i + 1 ] . split ( ) [ 0 ] ) for j in range ( i + 2 , i + 2 + num_sites ) : tokens = lines [ j ] . split ( ) species . append ( int ( tokens [ 0 ] ) ) coords . append ( [ float ( j ) for j in tokens [ 1 : 4 ] ] ) break else : raise ValueError ( "Invalid XSF data" ) if cls_ is None : from pymatgen . core . structure import Structure cls_ = Structure s = cls_ ( lattice , species , coords , coords_are_cartesian = True ) return XSF ( s )
def select_transformers ( grid , s_max = None ) : """Selects LV transformer according to peak load of LV grid district . The transformers are chosen according to max . of load case and feedin - case considering load factors and power factor . The MV - LV transformer with the next higher available nominal apparent power is chosen . Therefore , a max . allowed transformer loading of 100 % is implicitly assumed . If the peak load exceeds the max . power of a single available transformer , multiple transformer are build . By default ` peak _ load ` and ` peak _ generation ` are taken from ` grid ` instance . The behavior can be overridden providing ` s _ max ` as explained in ` ` Arguments ` ` . Parameters grid : LVGridDing0 LV grid data Arguments s _ max : dict dict containing maximum apparent power of load or generation case and str describing the case . For example . . code - block : : python ' s _ max ' : 480, ' case ' : ' load ' or . . code - block : : python ' s _ max ' : 120, ' case ' : ' gen ' s _ max passed overrides ` grid . grid _ district . peak _ load ` respectively ` grid . station ( ) . peak _ generation ` . Returns : pandas : ` pandas . DataFrame < dataframe > ` Parameters of chosen Transformer : obj : ` int ` Count of transformers Notes The LV transformer with the next higher available nominal apparent power is chosen . Therefore , a max . allowed transformer loading of 100 % is implicitly assumed . If the peak load exceeds the max . power of a single available transformer , use multiple trafos ."""
load_factor_lv_trans_lc_normal = cfg_ding0 . get ( 'assumptions' , 'load_factor_lv_trans_lc_normal' ) load_factor_lv_trans_fc_normal = cfg_ding0 . get ( 'assumptions' , 'load_factor_lv_trans_fc_normal' ) cos_phi_load = cfg_ding0 . get ( 'assumptions' , 'cos_phi_load' ) cos_phi_gen = cfg_ding0 . get ( 'assumptions' , 'cos_phi_gen' ) # get equipment parameters of LV transformers trafo_parameters = grid . network . static_data [ 'LV_trafos' ] # determine s _ max from grid object if not provided via arguments if s_max is None : # get maximum from peak load and peak generation s_max_load = grid . grid_district . peak_load / cos_phi_load s_max_gen = grid . station ( ) . peak_generation / cos_phi_gen # check if load or generation is greater respecting corresponding load factor if s_max_load > s_max_gen : # use peak load and load factor from load case load_factor_lv_trans = load_factor_lv_trans_lc_normal s_max = s_max_load else : # use peak generation and load factor for feedin case load_factor_lv_trans = load_factor_lv_trans_fc_normal s_max = s_max_gen else : if s_max [ 'case' ] == 'load' : load_factor_lv_trans = load_factor_lv_trans_lc_normal elif s_max [ 'case' ] == 'gen' : load_factor_lv_trans = load_factor_lv_trans_fc_normal else : logger . error ( 'No proper \'case\' provided for argument s_max' ) raise ValueError ( 'Please provide proper \'case\' for argument ' '`s_max`.' ) s_max = s_max [ 's_max' ] # get max . trafo transformer_max = trafo_parameters . iloc [ trafo_parameters [ 'S_nom' ] . idxmax ( ) ] # peak load is smaller than max . available trafo if s_max < ( transformer_max [ 'S_nom' ] * load_factor_lv_trans ) : # choose trafo transformer = trafo_parameters . iloc [ trafo_parameters [ trafo_parameters [ 'S_nom' ] * load_factor_lv_trans > s_max ] [ 'S_nom' ] . idxmin ( ) ] transformer_cnt = 1 # peak load is greater than max . available trafo - > use multiple trafos else : transformer_cnt = 2 # increase no . of trafos until peak load can be supplied while not any ( trafo_parameters [ 'S_nom' ] * load_factor_lv_trans > ( s_max / transformer_cnt ) ) : transformer_cnt += 1 transformer = trafo_parameters . iloc [ trafo_parameters [ trafo_parameters [ 'S_nom' ] * load_factor_lv_trans > ( s_max / transformer_cnt ) ] [ 'S_nom' ] . idxmin ( ) ] return transformer , transformer_cnt
def read_levels ( text ) : """Read text and get there reffs : param text : Collection ( Readable ) : return :"""
x = [ ] for i in range ( 0 , len ( NAUTILUSRESOLVER . getMetadata ( text ) . citation ) ) : x . append ( NAUTILUSRESOLVER . getReffs ( text , level = i ) ) return x
def load_ext ( self ) : """Read time series data like method | IOSequence . load _ ext | of class | IOSequence | , but with special handling of missing data . The method ' s " special handling " is to convert errors to warnings . We explain the reasons in the documentation on method | Obs . load _ ext | of class | Obs | , from which we borrow the following examples . The only differences are that method | Sim . load _ ext | of class | Sim | does not disable property | IOSequence . memoryflag | and uses option | Options . warnmissingsimfile | instead of | Options . warnmissingobsfile | : > > > from hydpy . core . examples import prepare _ full _ example _ 1 > > > prepare _ full _ example _ 1 ( ) > > > from hydpy import HydPy , pub , TestIO > > > hp = HydPy ( ' LahnH ' ) > > > pub . timegrids = ' 1996-01-01 ' , ' 1996-01-06 ' , ' 1d ' > > > with TestIO ( ) : . . . hp . prepare _ network ( ) . . . hp . init _ models ( ) . . . hp . prepare _ simseries ( ) > > > sim = hp . nodes . dill . sequences . sim > > > with TestIO ( ) : . . . sim . load _ ext ( ) # doctest : + ELLIPSIS Traceback ( most recent call last ) : UserWarning : While trying to load the external data of sequence ` sim ` of node ` dill ` , the following error occurred : [ Errno 2 ] No such file or directory : ' . . . dill _ sim _ q . asc ' > > > sim . series InfoArray ( [ nan , nan , nan , nan , nan ] ) > > > sim . series = 1.0 > > > with TestIO ( ) : . . . sim . save _ ext ( ) > > > sim . series = 0.0 > > > with TestIO ( ) : . . . sim . load _ ext ( ) > > > sim . series InfoArray ( [ 1 . , 1 . , 1 . , 1 . , 1 . ] ) > > > import numpy > > > sim . series [ 2 ] = numpy . nan > > > with TestIO ( ) : . . . pub . sequencemanager . nodeoverwrite = True . . . sim . save _ ext ( ) > > > with TestIO ( ) : . . . sim . load _ ext ( ) Traceback ( most recent call last ) : UserWarning : While trying to load the external data of sequence ` sim ` of node ` dill ` , the following error occurred : The series array of sequence ` sim ` of node ` dill ` contains 1 nan value . > > > sim . series InfoArray ( [ 1 . , 1 . , nan , 1 . , 1 . ] ) > > > sim . series = 0.0 > > > with TestIO ( ) : . . . with pub . options . warnmissingsimfile ( False ) : . . . sim . load _ ext ( ) > > > sim . series InfoArray ( [ 1 . , 1 . , nan , 1 . , 1 . ] )"""
try : super ( ) . load_ext ( ) except BaseException : if hydpy . pub . options . warnmissingsimfile : warnings . warn ( str ( sys . exc_info ( ) [ 1 ] ) )
def getJournalDeals ( self , start = None ) : """Return all journal events from start ."""
# 1 - utworzenie aktu zakupowego ( deala ) , 2 - utworzenie formularza pozakupowego ( karta platnosci ) , 3 - anulowanie formularza pozakupowego ( karta platnosci ) , 4 - zakończenie ( opłacenie ) transakcji przez PzA if start is not None : self . last_event_id = start events = [ ] while self . getJournalDealsInfo ( self . last_event_id ) > 0 : rc = self . __ask__ ( 'doGetSiteJournalDeals' , journalStart = self . last_event_id ) for i in rc : events . append ( { 'allegro_did' : i [ 'dealId' ] , 'deal_status' : i [ 'dealEventType' ] , 'transaction_id' : i [ 'dealTransactionId' ] , 'time' : i [ 'dealEventTime' ] , 'event_id' : i [ 'dealEventId' ] , 'allegro_aid' : i [ 'dealItemId' ] , 'allegro_uid' : i [ 'dealBuyerId' ] , # ' seller _ id ' : i [ ' dealSellerId ' ] , 'quantity' : i [ 'dealQuantity' ] } ) self . last_event_id = rc [ - 1 ] [ 'dealEventId' ] return events
def show_backends ( socket = DEFAULT_SOCKET_URL ) : '''Show HaProxy Backends socket haproxy stats socket , default ` ` / var / run / haproxy . sock ` ` CLI Example : . . code - block : : bash salt ' * ' haproxy . show _ backends'''
ha_conn = _get_conn ( socket ) ha_cmd = haproxy . cmds . showBackends ( ) return ha_conn . sendCmd ( ha_cmd )
def _get_style_of_faulting_term ( self , C , rake ) : """Returns the style of faulting term . Cauzzi et al . determind SOF from the plunge of the B - , T - and P - axes . For consistency with existing GMPEs the Wells & Coppersmith model is preferred"""
if rake > - 150.0 and rake <= - 30.0 : return C [ 'fN' ] elif rake > 30.0 and rake <= 150.0 : return C [ 'fR' ] else : return C [ 'fSS' ]
def error_leader ( self , infile = None , lineno = None ) : "Emit a C - compiler - like , Emacs - friendly error - message leader ."
if infile is None : infile = self . infile if lineno is None : lineno = self . lineno return "\"%s\", line %d: " % ( infile , lineno )
def put ( value ) : """Store an object in the object store . Args : value : The Python object to be stored . Returns : The object ID assigned to this value ."""
worker = global_worker worker . check_connected ( ) with profiling . profile ( "ray.put" ) : if worker . mode == LOCAL_MODE : # In LOCAL _ MODE , ray . put is the identity operation . return value object_id = ray . _raylet . compute_put_id ( worker . current_task_id , worker . task_context . put_index , ) worker . put_object ( object_id , value ) worker . task_context . put_index += 1 return object_id
def info ( self , cloud = None , api_key = None , version = None , ** kwargs ) : """Return the current state of the model associated with a given collection"""
url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "info" } return self . _api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
def _parse_ssl_options ( options ) : """Parse ssl options ."""
use_ssl = options . get ( 'ssl' ) if use_ssl is not None : validate_boolean ( 'ssl' , use_ssl ) certfile = options . get ( 'ssl_certfile' ) keyfile = options . get ( 'ssl_keyfile' ) passphrase = options . get ( 'ssl_pem_passphrase' ) ca_certs = options . get ( 'ssl_ca_certs' ) cert_reqs = options . get ( 'ssl_cert_reqs' ) match_hostname = options . get ( 'ssl_match_hostname' , True ) crlfile = options . get ( 'ssl_crlfile' ) ssl_kwarg_keys = [ k for k in options if k . startswith ( 'ssl_' ) and options [ k ] ] if use_ssl == False and ssl_kwarg_keys : raise ConfigurationError ( "ssl has not been enabled but the " "following ssl parameters have been set: " "%s. Please set `ssl=True` or remove." % ', ' . join ( ssl_kwarg_keys ) ) if ssl_kwarg_keys and use_ssl is None : # ssl options imply ssl = True use_ssl = True if use_ssl is True : ctx = get_ssl_context ( certfile , keyfile , passphrase , ca_certs , cert_reqs , crlfile , match_hostname ) return ctx , match_hostname return None , match_hostname
def source_lookup ( request ) : """JSON endpoint that returns a list of potential sources . Used for upload template autocomplete ."""
source = request . GET [ 'source' ] source_slug = slugify ( source . strip ( ) ) source_candidates = Dataset . objects . values ( 'source' ) . filter ( source_slug__startswith = source_slug ) sources = json . dumps ( [ cand [ 'source' ] for cand in source_candidates ] ) return HttpResponse ( sources , content_type = 'application/json' )
def setup_link ( self , interface , cidr ) : """Setup a link . ip addr add dev interface ip link set dev interface up"""
# clear old ipaddr in interface cmd = [ 'ip' , 'addr' , 'flush' , 'dev' , interface ] agent_utils . execute ( cmd , root = True ) ip = IPNetwork ( cidr ) cmd = [ 'ip' , 'addr' , 'add' , cidr , 'broadcast' , str ( ip . broadcast ) , 'dev' , interface ] stdcode , stdout = agent_utils . execute ( cmd , root = True ) if stdcode == 0 : cmd = [ 'ip' , 'link' , 'set' , 'dev' , interface , 'up' ] stdcode , stdout = agent_utils . execute ( cmd , root = True ) if stdcode == 0 : return agent_utils . make_response ( code = stdcode ) # execute failed . message = stdout . pop ( 0 ) return agent_utils . make_response ( code = stdcode , message = message )
def _log_message ( self , level , freerun_entry , msg ) : """method performs logging into log file and the freerun _ entry"""
self . logger . log ( level , msg ) assert isinstance ( freerun_entry , FreerunProcessEntry ) event_log = freerun_entry . event_log if len ( event_log ) > MAX_NUMBER_OF_EVENTS : del event_log [ - 1 ] event_log . insert ( 0 , msg ) self . freerun_process_dao . update ( freerun_entry )
def _install_requirement ( python_bin : str , package : str , version : str = None , index_url : str = None , clean : bool = True ) -> None : """Install requirements specified using suggested pip binary ."""
previous_version = _pipdeptree ( python_bin , package ) try : cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}" . format ( python_bin , quote ( package ) ) if version : cmd += "=={}" . format ( quote ( version ) ) if index_url : cmd += ' --index-url "{}" ' . format ( quote ( index_url ) ) # Supply trusted host by default so we do not get errors - it safe to # do it here as package indexes are managed by Thoth . trusted_host = urlparse ( index_url ) . netloc cmd += " --trusted-host {}" . format ( trusted_host ) _LOGGER . debug ( "Installing requirement %r in version %r" , package , version ) run_command ( cmd ) yield finally : if clean : _LOGGER . debug ( "Removing installed package %r" , package ) cmd = "{} -m pip uninstall --yes {}" . format ( python_bin , quote ( package ) ) result = run_command ( cmd , raise_on_error = False ) if result . return_code != 0 : _LOGGER . warning ( "Failed to restore previous environment by removing package %r (installed version %r), " "the error is not fatal but can affect future actions: %s" , package , version , result . stderr , ) _LOGGER . debug ( "Restoring previous environment setup after installation of %r (%s)" , package , previous_version ) if previous_version : cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}" . format ( python_bin , quote ( package ) , quote ( previous_version [ "package" ] [ "installed_version" ] ) ) result = run_command ( cmd , raise_on_error = False ) if result . return_code != 0 : _LOGGER . warning ( "Failed to restore previous environment for package %r (installed version %r), " ", the error is not fatal but can affect future actions (previous version: %r): %s" , package , version , previous_version , result . stderr , )
def _wrap_measure ( individual_state_measure_process , state_measure , loaded_processes ) : """Creates a function on a state _ collection , which creates analysis _ collections for each state in the collection . Optionally sorts the collection if the state _ measure has a sort _ by parameter ( see funtool . lib . general . sort _ states for details )"""
def wrapped_measure ( state_collection , overriding_parameters = None , loggers = None ) : if loggers == None : loggers = funtool . logger . set_default_loggers ( ) if loaded_processes != None : if state_measure . grouping_selectors != None : for grouping_selector_name in state_measure . grouping_selectors : state_collection = funtool . state_collection . add_grouping ( state_collection , grouping_selector_name , loaded_processes ) states = state_collection . states measure_parameters = get_measure_parameters ( state_measure , overriding_parameters ) if 'sort_by' in measure_parameters . keys ( ) : states = funtool . lib . general . sort_states ( states , measure_parameters [ 'sort_by' ] ) for state_index , state in enumerate ( states ) : step_size = len ( states ) // 20 if state_index % step_size == 0 : loggers . status_logger . warn ( "{}: {} %" . format ( datetime . datetime . now ( ) , round ( ( state_index / len ( states ) * 100 ) , 1 ) ) ) analysis_collection = funtool . analysis . AnalysisCollection ( state , None , { } , { } ) if state_measure . analysis_selectors != None : for analysis_selector in state_measure . analysis_selectors : analysis_collection = loaded_processes [ "analysis_selector" ] [ analysis_selector ] . process_function ( analysis_collection , state_collection ) if analysis_collection != None : individual_state_measure_process ( analysis_collection , state_collection , overriding_parameters ) return state_collection return wrapped_measure
def dumpToGLIF ( self , glyphFormatVersion = 2 ) : """This will return the glyph ' s contents as a string in ` GLIF format < http : / / unifiedfontobject . org / versions / ufo3 / glyphs / glif / > ` _ . > > > xml = glyph . writeGlyphToString ( ) ` ` glyphFormatVersion ` ` must be a : ref : ` type - int ` that defines the preferred GLIF format version ."""
glyphFormatVersion = normalizers . normalizeGlyphFormatVersion ( glyphFormatVersion ) return self . _dumpToGLIF ( glyphFormatVersion )
def load ( self , name = None , * args , ** kwargs ) : "Load the instance of the object from the stash ."
inst = self . stash . load ( name ) if inst is None : inst = self . instance ( name , * args , ** kwargs ) logger . debug ( f'loaded (conf mng) instance: {inst}' ) return inst
def mine_block ( chain : MiningChain , ** kwargs : Any ) -> MiningChain : """Mine a new block on the chain . Header parameters for the new block can be overridden using keyword arguments ."""
if not isinstance ( chain , MiningChain ) : raise ValidationError ( '`mine_block` may only be used on MiningChain instances' ) chain . mine_block ( ** kwargs ) return chain
def set_bytes_at_offset ( self , offset , data ) : """Overwrite the bytes at the given file offset with the given string . Return True if successful , False otherwise . It can fail if the offset is outside the file ' s boundaries ."""
if not isinstance ( data , bytes ) : raise TypeError ( 'data should be of type: bytes' ) if 0 <= offset < len ( self . __data__ ) : self . __data__ = ( self . __data__ [ : offset ] + data + self . __data__ [ offset + len ( data ) : ] ) else : return False return True
def commit_check ( self , commands = "" , req_format = "text" ) : """Execute a commit check operation . Purpose : This method will take in string of multiple commands , | and perform and ' commit check ' on the device to ensure | the commands are syntactically correct . The response can | be formatted as text or as xml . @ param commands : A string , filepath , or list of multiple commands | that the device will compare with . @ type commands : str or list @ param req _ format : The desired format of the response , defaults to | ' text ' , but also accepts ' xml ' @ type req _ format : str @ returns : The reply from the device . @ rtype : str"""
if not commands : raise InvalidCommandError ( 'No commands specified' ) clean_cmds = [ ] for cmd in clean_lines ( commands ) : clean_cmds . append ( cmd ) self . lock ( ) self . _session . load_configuration ( action = 'set' , config = clean_cmds ) # conn . validate ( ) DOES NOT return a parse - able xml tree , so we # convert it to an ElementTree xml tree . results = ET . fromstring ( self . _session . validate ( source = 'candidate' ) . tostring ) # release the candidate configuration self . unlock ( ) if req_format == "xml" : return ET . tostring ( results ) out = "" # we have to parse the elementTree object , and get the text # from the xml . for i in results . iter ( ) : # the success message is just a tag , so we need to get it # specifically . if i . tag == 'commit-check-success' : out += 'configuration check succeeds\n' # this is for normal output with a tag and inner text , it will # strip the inner text and add it to the output . elif i . text is not None : if i . text . strip ( ) + '\n' != '\n' : out += i . text . strip ( ) + '\n' # this is for elements that don ' t have inner text , it will add the # tag to the output . elif i . text is None : if i . tag + '\n' != '\n' : out += i . tag + '\n' return out
def read_float ( self , little_endian = True ) : """Read 4 bytes as a float value from the stream . Args : little _ endian ( bool ) : specify the endianness . ( Default ) Little endian . Returns : float :"""
if little_endian : endian = "<" else : endian = ">" return self . unpack ( "%sf" % endian , 4 )
def bgp_time_conversion ( bgp_uptime ) : """Convert string time to seconds . Examples 00:14:23 00:13:40 00:00:21 00:00:13 00:00:49 1d11h 1d17h 1w0d 8w5d 1y28w never"""
bgp_uptime = bgp_uptime . strip ( ) uptime_letters = set ( [ 'w' , 'h' , 'd' ] ) if 'never' in bgp_uptime : return - 1 elif ':' in bgp_uptime : times = bgp_uptime . split ( ":" ) times = [ int ( x ) for x in times ] hours , minutes , seconds = times return ( hours * 3600 ) + ( minutes * 60 ) + seconds # Check if any letters ' w ' , ' h ' , ' d ' are in the time string elif uptime_letters & set ( bgp_uptime ) : form1 = r'(\d+)d(\d+)h' # 1d17h form2 = r'(\d+)w(\d+)d' # 8w5d form3 = r'(\d+)y(\d+)w' # 1y28w match = re . search ( form1 , bgp_uptime ) if match : days = int ( match . group ( 1 ) ) hours = int ( match . group ( 2 ) ) return ( days * DAY_SECONDS ) + ( hours * 3600 ) match = re . search ( form2 , bgp_uptime ) if match : weeks = int ( match . group ( 1 ) ) days = int ( match . group ( 2 ) ) return ( weeks * WEEK_SECONDS ) + ( days * DAY_SECONDS ) match = re . search ( form3 , bgp_uptime ) if match : years = int ( match . group ( 1 ) ) weeks = int ( match . group ( 2 ) ) return ( years * YEAR_SECONDS ) + ( weeks * WEEK_SECONDS ) raise ValueError ( "Unexpected value for BGP uptime string: {}" . format ( bgp_uptime ) )
def import_submodules ( package_name : str ) -> None : """Import all submodules under the given package . Primarily useful so that people using AllenNLP as a library can specify their own custom packages and have their custom classes get loaded and registered ."""
importlib . invalidate_caches ( ) # For some reason , python doesn ' t always add this by default to your path , but you pretty much # always want it when using ` - - include - package ` . And if it ' s already there , adding it again at # the end won ' t hurt anything . sys . path . append ( '.' ) # Import at top level module = importlib . import_module ( package_name ) path = getattr ( module , '__path__' , [ ] ) path_string = '' if not path else path [ 0 ] # walk _ packages only finds immediate children , so need to recurse . for module_finder , name , _ in pkgutil . walk_packages ( path ) : # Sometimes when you import third - party libraries that are on your path , # ` pkgutil . walk _ packages ` returns those too , so we need to skip them . if path_string and module_finder . path != path_string : continue subpackage = f"{package_name}.{name}" import_submodules ( subpackage )
def stop_server ( self ) : """Stop serving . Also stops the thread ."""
if self . rpc_server is not None : try : self . rpc_server . socket . shutdown ( socket . SHUT_RDWR ) except : log . warning ( "Failed to shut down server socket" ) self . rpc_server . shutdown ( )
def register ( self , pattern , view = None ) : '''Allow decorator - style construction of URL pattern lists .'''
if view is None : return partial ( self . register , pattern ) self . patterns . append ( self . _make_url ( ( pattern , view ) ) ) return view
def list ( self , from_ = values . unset , to = values . unset , date_created_on_or_before = values . unset , date_created_after = values . unset , limit = None , page_size = None ) : """Lists FaxInstance records from the API as a list . Unlike stream ( ) , this operation is eager and will load ` limit ` records into memory before returning . : param unicode from _ : Retrieve only those faxes sent from this phone number : param unicode to : Retrieve only those faxes sent to this phone number : param datetime date _ created _ on _ or _ before : Retrieve only faxes created on or before this date : param datetime date _ created _ after : Retrieve only faxes created after this date : param int limit : Upper limit for the number of records to return . list ( ) guarantees never to return more than limit . Default is no limit : param int page _ size : Number of records to fetch per request , when not set will use the default value of 50 records . If no page _ size is defined but a limit is defined , list ( ) will attempt to read the limit with the most efficient page size , i . e . min ( limit , 1000) : returns : Generator that will yield up to limit results : rtype : list [ twilio . rest . fax . v1 . fax . FaxInstance ]"""
return list ( self . stream ( from_ = from_ , to = to , date_created_on_or_before = date_created_on_or_before , date_created_after = date_created_after , limit = limit , page_size = page_size , ) )
def validate ( self , profile ) : """Check to see if any args are " missing " from profile . Validate all args from install . json are in the profile . This can be helpful to validate that any new args added to App are included in the profiles . . . Note : : This method does not work with layout . json Apps . Args : profile ( dict ) : The current profile to validate ."""
ij = self . load_install_json ( profile . get ( 'install_json' ) ) print ( '{}{}Profile: "{}".' . format ( c . Style . BRIGHT , c . Fore . BLUE , profile . get ( 'profile_name' ) ) ) for arg in self . profile_settings_args_install_json ( ij , None ) : if profile . get ( 'args' , { } ) . get ( 'app' , { } ) . get ( arg ) is None : print ( '{}{}Input "{}" not found.' . format ( c . Style . BRIGHT , c . Fore . YELLOW , arg ) )
def citations ( self ) : """Cited references as a : class : ` . Feature ` \ . Returns citations : : class : ` . Feature `"""
if hasattr ( self , 'citedReferences' ) : return [ cr . ayjid for cr in self . citedReferences if cr is not None ] return [ ]
def layer_mapping ( self , mapping ) : """Return the mappings that are active in this layer Parameters mapping : aes mappings in the ggplot call Notes Once computed the layer mappings are also stored in self . _ active _ mapping"""
# For certain geoms , it is useful to be able to # ignore the default aesthetics and only use those # set in the layer if self . inherit_aes : aesthetics = defaults ( self . mapping , mapping ) else : aesthetics = self . mapping # drop aesthetic parameters or the calculated aesthetics calculated = set ( get_calculated_aes ( aesthetics ) ) d = dict ( ( ae , v ) for ae , v in aesthetics . items ( ) if ( ae not in self . geom . aes_params ) and ( ae not in calculated ) ) self . _active_mapping = aes ( ** d ) return self . _active_mapping
def to_datetime ( arg ) : """Tries to convert any type of argument to datetime Args : arg : datetime , date , or str . If " ? " , will be converted to 1970-1-1. if 0 or " now " , will be converted to datetime . datetime . now ( )"""
if isinstance ( arg , datetime . datetime ) : return arg elif arg == 0 : return datetime . datetime . now ( ) elif isinstance ( arg , str ) : if arg == "now" : arg = datetime . datetime . now ( ) elif arg == "?" : arg = datetime . datetime ( 1970 , 1 , 1 ) else : arg = str2dt ( arg ) elif isinstance ( arg , datetime . date ) : arg = date2datetime ( arg ) elif isinstance ( arg , ( int , float ) ) : # Suppose it is a timestamp arg = ts2dt ( arg ) else : raise TypeError ( "Wrong type for argument 'arg': {}" . format ( arg . __class__ . __name__ ) ) return arg
def by_geopoint ( self , lat , long ) : """Perform a Yelp Neighborhood API Search based on a geopoint . Args : lat - geopoint latitude long - geopoint longitude"""
header , content = self . _http_request ( self . BASE_URL , lat = lat , long = long ) return json . loads ( content )
def time_str_to_minutes ( time_str ) : """通过时间字符串计算得到这是一天中第多少分钟 : param time _ str : eg : ' 11:10:00' : return : int"""
time_arr = time_str . split ( ":" ) hours = int ( time_arr [ 0 ] ) minutes = int ( time_arr [ 1 ] ) return hours * 60 + minutes
def get_welcome_response ( ) : """If we wanted to initialize the session to have some attributes we could add those here"""
session_attributes = { } card_title = "Welcome" speech_output = "Welcome to the Alexa Skills Kit sample. " "Please tell me your favorite color by saying, " "my favorite color is red" # If the user either does not reply to the welcome message or says something # that is not understood , they will be prompted again with this text . reprompt_text = "Please tell me your favorite color by saying, " "my favorite color is red." should_end_session = False return build_response ( session_attributes , build_speechlet_response ( card_title , speech_output , reprompt_text , should_end_session ) )
def get_csv_reader ( csvfile , dialect = csv . excel , encoding = "utf-8" , ** kwds ) : """Returns csv reader ."""
try : # pylint : disable = pointless - statement unicode return UnicodeReader ( csvfile , dialect = dialect , encoding = encoding , ** kwds ) except NameError : return csv . reader ( csvfile , dialect = dialect , ** kwds )
def _client_builder ( self ) : """Build Elasticsearch client ."""
client_config = self . app . config . get ( 'SEARCH_CLIENT_CONFIG' ) or { } client_config . setdefault ( 'hosts' , self . app . config . get ( 'SEARCH_ELASTIC_HOSTS' ) ) client_config . setdefault ( 'connection_class' , RequestsHttpConnection ) return Elasticsearch ( ** client_config )
def setAccessPolicyResponse ( self , pid , accessPolicy , serialVersion , vendorSpecific = None ) : """CNAuthorization . setAccessPolicy ( session , pid , accessPolicy , serialVersion ) → boolean https : / / releases . dataone . org / online / api - documentation - v2.0.1 / apis / CN _ APIs . html # CNAuthorization . setAccessPolicy . Args : pid : accessPolicy : serialVersion : vendorSpecific : Returns :"""
mmp_dict = { 'serialVersion' : str ( serialVersion ) , 'accessPolicy' : ( 'accessPolicy.xml' , accessPolicy . toxml ( 'utf-8' ) ) , } return self . PUT ( [ 'accessRules' , pid ] , fields = mmp_dict , headers = vendorSpecific )
def plot ( self , file_type ) : """Call file _ type plotting function ."""
samples = self . mod_data [ file_type ] plot_title = file_types [ file_type ] [ 'title' ] plot_func = file_types [ file_type ] [ 'plot_func' ] plot_params = file_types [ file_type ] [ 'plot_params' ] return plot_func ( samples , file_type , plot_title = plot_title , plot_params = plot_params )
def is_ip_filter ( ip , options = None ) : '''Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address .'''
return is_ipv4_filter ( ip , options = options ) or is_ipv6_filter ( ip , options = options )
def call ( self , op_name , query = None , ** kwargs ) : """Make a request to a method in this client . The response data is returned from this call as native Python data structures . This method differs from just calling the client method directly in the following ways : * It automatically handles the pagination rather than relying on a separate pagination method call . * You can pass an optional jmespath query and this query will be applied to the data returned from the low - level call . This allows you to tailor the returned data to be exactly what you want . : type op _ name : str : param op _ name : The name of the request you wish to make . : type query : str : param query : A jmespath query that will be applied to the data returned by the operation prior to returning it to the user . : type kwargs : keyword arguments : param kwargs : Additional keyword arguments you want to pass to the method when making the request ."""
LOG . debug ( kwargs ) if query : query = jmespath . compile ( query ) if self . _client . can_paginate ( op_name ) : paginator = self . _client . get_paginator ( op_name ) results = paginator . paginate ( ** kwargs ) data = results . build_full_result ( ) else : op = getattr ( self . _client , op_name ) done = False data = { } while not done : try : data = op ( ** kwargs ) done = True except ClientError as e : LOG . debug ( e , kwargs ) if 'Throttling' in str ( e ) : time . sleep ( 1 ) elif 'AccessDenied' in str ( e ) : done = True elif 'NoSuchTagSet' in str ( e ) : done = True except Exception : done = True if query : data = query . search ( data ) return data
def _find_mocker ( symbol : str , context : 'torment.contexts.TestContext' ) -> Callable [ [ ] , bool ] : '''Find method within the context that mocks symbol . Given a symbol ( i . e . ` ` tornado . httpclient . AsyncHTTPClient . fetch ` ` ) , find the shortest ` ` mock _ ` ` method that resembles the symbol . Resembles means the lowercased and periods replaced with underscores . If no match is found , a dummy function ( only returns False ) is returned . * * Parameters * * : ` ` symbol ` ` : the symbol to be located : ` ` context ` ` : the search context * * Return Value ( s ) * * The method used to mock the symbol . * * Examples * * Assuming the symbol is ` ` tornado . httpclient . AsyncHTTPClient . fetch ` ` , the first of the following methods would be returned : * ` ` mock _ tornado ` ` * ` ` mock _ tornado _ httpclient ` ` * ` ` mock _ tornado _ httpclient _ asynchttpclient ` ` * ` ` mock _ tornado _ httpclient _ asynchttpclient _ fetch ` `'''
components = [ ] method = None for component in symbol . split ( '.' ) : components . append ( component . lower ( ) ) name = '_' . join ( [ 'mock' ] + components ) if hasattr ( context , name ) : method = getattr ( context , name ) break if method is None : logger . warn ( 'no mocker for %s' , symbol ) def noop ( * args , ** kwargs ) : return False method = noop return method
def from_csv ( cls , path : PathOrStr , csv_name : str , cols : IntsOrStrs = 0 , delimiter : str = None , header : str = 'infer' , processor : PreProcessors = None , ** kwargs ) -> 'ItemList' : """Create an ` ItemList ` in ` path ` from the inputs in the ` cols ` of ` path / csv _ name `"""
df = pd . read_csv ( Path ( path ) / csv_name , delimiter = delimiter , header = header ) return cls . from_df ( df , path = path , cols = cols , processor = processor , ** kwargs )
def resolve ( self , other : Type ) -> Optional [ Type ] : """See ` ` PlaceholderType . resolve ` `"""
if not isinstance ( other , NltkComplexType ) : return None expected_second = ComplexType ( NUMBER_TYPE , ComplexType ( ANY_TYPE , ComplexType ( ComplexType ( ANY_TYPE , ANY_TYPE ) , ANY_TYPE ) ) ) resolved_second = other . second . resolve ( expected_second ) if resolved_second is None : return None # The lambda function that we use inside the argmax must take either a number or a date as # an argument . lambda_arg_type = other . second . second . second . first . first if lambda_arg_type . resolve ( NUMBER_TYPE ) is None and lambda_arg_type . resolve ( DATE_TYPE ) is None : return None try : # This is the first # 1 in the type signature above . selector_function_type = resolved_second . second . first # This is the second # 1 in the type signature above . quant_function_argument_type = resolved_second . second . second . first . second # This is the third # 1 in the type signature above . return_type = resolved_second . second . second . second # All three placeholder ( ph ) types above should resolve against each other . resolved_first_ph = selector_function_type . resolve ( quant_function_argument_type ) resolved_first_ph . resolve ( return_type ) resolved_second_ph = quant_function_argument_type . resolve ( resolved_first_ph ) resolved_second_ph . resolve ( return_type ) resolved_third_ph = return_type . resolve ( resolved_first_ph ) resolved_third_ph = return_type . resolve ( resolved_second_ph ) if not resolved_first_ph or not resolved_second_ph or not resolved_third_ph : return None return ArgExtremeType ( resolved_first_ph , lambda_arg_type ) except AttributeError : return None
def monthly_clear_sky_conditions ( self ) : """A list of 12 monthly clear sky conditions that are used on the design days ."""
if self . _monthly_tau_diffuse is [ ] or self . _monthly_tau_beam is [ ] : return [ OriginalClearSkyCondition ( i , 21 ) for i in xrange ( 1 , 13 ) ] return [ RevisedClearSkyCondition ( i , 21 , x , y ) for i , x , y in zip ( list ( xrange ( 1 , 13 ) ) , self . _monthly_tau_beam , self . _monthly_tau_diffuse ) ]
def get_control ( self ) : """Return the text widget ( or similar ) to give focus to"""
# page _ control is the widget used for paging page_control = self . shellwidget . _page_control if page_control and page_control . isVisible ( ) : return page_control else : return self . shellwidget . _control
def spielman_wr ( self , norm = True ) : """Returns a list of site - specific omega values calculated from the ` ExpCM ` . Args : ` norm ` ( bool ) If ` True ` , normalize the ` omega _ r ` values by the ExpCM gene - wide ` omega ` . Returns : ` wr ` ( list ) list of ` omega _ r ` values of length ` nsites ` Following ` Spielman and Wilke , MBE , 32:1097-1108 < https : / / doi . org / 10.1093 / molbev / msv003 > ` _ , we can predict the ` dN / dS ` value for each site ` r ` , : math : ` \\ rm { spielman } \\ omega _ r ` , from the ` ExpCM ` . When ` norm ` is ` False ` , the ` omega _ r ` values are defined as : math : ` \\ rm { spielman } \\ omega _ r = \\ frac { \\ sum _ x \\ sum _ { y \\ in N _ x } p _ { r , x } P _ { r , xy } } { \\ sum _ x \\ sum _ { y \\ in Nx } p _ { r , x } Q _ { xy } } ` , where ` r , x , y ` , : math : ` p _ { r , x } ` , : math : ` P _ { r , xy } ` , and : math : ` Q _ { x , y } ` have the same definitions as in the main ` ExpCM ` doc string and : math : ` N _ { x } ` is the set of codons which are non - synonymous to codon ` x ` and differ from ` x ` by one nucleotide . When ` norm ` is ` True ` , the ` omega _ r ` values above are divided by the ExpCM ` omega ` value ."""
wr = [ ] for r in range ( self . nsites ) : num = 0 den = 0 for i in range ( N_CODON ) : j = scipy . intersect1d ( scipy . where ( CODON_SINGLEMUT [ i ] == True ) [ 0 ] , scipy . where ( CODON_NONSYN [ i ] == True ) [ 0 ] ) p_i = self . stationarystate [ r ] [ i ] P_xy = self . Prxy [ r ] [ i ] [ j ] . sum ( ) if norm : P_xy = P_xy / self . omega Q_xy = self . Qxy [ i ] [ j ] . sum ( ) num += ( p_i * P_xy ) den += ( p_i * Q_xy ) result = num / den wr . append ( result ) return wr
def raise_error ( e ) : """Take a bravado - core Error model and raise it as an exception"""
code = e . error if code in code_to_class : raise code_to_class [ code ] ( e . error_description ) else : raise InternalServerError ( e . error_description )
def extract_now_state ( self ) : '''Extract now map state . Returns : ` np . ndarray ` of state .'''
x , y = self . __agent_pos state_arr = np . zeros ( self . __map_arr . shape ) state_arr [ x , y ] = 1 return np . expand_dims ( state_arr , axis = 0 )
def shutdown ( self , reason = ConnectionClosed ( ) ) : """Shutdown the socket server . The socket server will stop accepting incoming connections . All connections will be dropped ."""
if self . _shutdown : raise ShutdownError ( ) self . stop ( ) self . _closing = True for connection in self . connections : connection . close ( ) self . connections = set ( ) self . _shutdown = True if isinstance ( reason , ConnectionClosed ) : logger . info ( "server shutdown" ) else : logger . warn ( "server shutdown, reason %s" % str ( reason ) )
def to_hostnames_list ( ref , tab ) : # pragma : no cover , to be deprecated ? """Convert Host list into a list of host _ name : param ref : Not used : type ref : : param tab : Host list : type tab : list [ alignak . objects . host . Host ] : return : host _ name list : rtype : list"""
res = [ ] for host in tab : if hasattr ( host , 'host_name' ) : res . append ( host . host_name ) return res
def execute ( self , style , xpoints , ypoints , zpoints , mask = None , backend = 'vectorized' , specified_drift_arrays = None ) : """Calculates a kriged grid and the associated variance . This is now the method that performs the main kriging calculation . Note that currently measurements ( i . e . , z values ) are considered ' exact ' . This means that , when a specified coordinate for interpolation is exactly the same as one of the data points , the variogram evaluated at the point is forced to be zero . Also , the diagonal of the kriging matrix is also always forced to be zero . In forcing the variogram evaluated at data points to be zero , we are effectively saying that there is no variance at that point ( no uncertainty , so the value is ' exact ' ) . In the future , the code may include an extra ' exact _ values ' boolean flag that can be adjusted to specify whether to treat the measurements as ' exact ' . Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance ( i . e . , when evaluated at data points ) . Instead , the uncertainty in the point will be equal to the nugget . This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero . Parameters style : str Specifies how to treat input kriging points . Specifying ' grid ' treats xpoints , ypoints , and zpoints as arrays of x , y , and z coordinates that define a rectangular grid . Specifying ' points ' treats xpoints , ypoints , and zpoints as arrays that provide coordinates at which to solve the kriging system . Specifying ' masked ' treats xpoints , ypoints , and zpoints as arrays of x , y , and z coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid . xpoints : array _ like , shape ( N , ) or ( N , 1) If style is specific as ' grid ' or ' masked ' , x - coordinates of LxMxN grid . If style is specified as ' points ' , x - coordinates of specific points at which to solve kriging system . ypoints : array _ like , shape ( M , ) or ( M , 1) If style is specified as ' grid ' or ' masked ' , y - coordinates of LxMxN grid . If style is specified as ' points ' , y - coordinates of specific points at which to solve kriging system . Note that in this case , xpoints , ypoints , and zpoints must have the same dimensions ( i . e . , L = M = N ) . zpoints : array _ like , shape ( L , ) or ( L , 1) If style is specified as ' grid ' or ' masked ' , z - coordinates of LxMxN grid . If style is specified as ' points ' , z - coordinates of specific points at which to solve kriging system . Note that in this case , xpoints , ypoints , and zpoints must have the same dimensions ( i . e . , L = M = N ) . mask : boolean array , shape ( L , M , N ) , optional Specifies the points in the rectangular grid defined by xpoints , ypoints , zpoints that are to be excluded in the kriging calculations . Must be provided if style is specified as ' masked ' . False indicates that the point should not be masked , so the kriging system will be solved at the point . True indicates that the point should be masked , so the kriging system will not be solved at the point . backend : string , optional Specifies which approach to use in kriging . Specifying ' vectorized ' will solve the entire kriging problem at once in a vectorized operation . This approach is faster but also can consume a significant amount of memory for large grids and / or large datasets . Specifying ' loop ' will loop through each point at which the kriging system is to be solved . This approach is slower but also less memory - intensive . Default is ' vectorized ' . specified _ drift _ arrays : list of array - like objects , optional Specifies the drift values at the points at which the kriging system is to be evaluated . Required if ' specified ' drift provided in the list of drift terms when instantiating the UniversalKriging3D class . Must be a list of arrays in the same order as the list provided when instantiating the kriging object . Array ( s ) must be the same dimension as the specified grid or have the same number of points as the specified points ; i . e . , the arrays either must be shape ( L , M , N ) , where L is the number of z grid - points , M is the number of y grid - points , and N is the number of x grid - points , or shape ( N , ) or ( N , 1 ) , where N is the number of points at which to evaluate the kriging system . Returns kvalues : ndarray , shape ( L , M , N ) or ( N , ) or ( N , 1) Interpolated values of specified grid or at the specified set of points . If style was specified as ' masked ' , kvalues will be a numpy masked array . sigmasq : ndarray , shape ( L , M , N ) or ( N , ) or ( N , 1) Variance at specified grid points or at the specified set of points . If style was specified as ' masked ' , sigmasq will be a numpy masked array ."""
if self . verbose : print ( "Executing Ordinary Kriging...\n" ) if style != 'grid' and style != 'masked' and style != 'points' : raise ValueError ( "style argument must be 'grid', 'points', " "or 'masked'" ) xpts = np . atleast_1d ( np . squeeze ( np . array ( xpoints , copy = True ) ) ) ypts = np . atleast_1d ( np . squeeze ( np . array ( ypoints , copy = True ) ) ) zpts = np . atleast_1d ( np . squeeze ( np . array ( zpoints , copy = True ) ) ) n = self . X_ADJUSTED . shape [ 0 ] n_withdrifts = n if self . regional_linear_drift : n_withdrifts += 3 if self . specified_drift : n_withdrifts += len ( self . specified_drift_data_arrays ) if self . functional_drift : n_withdrifts += len ( self . functional_drift_terms ) nx = xpts . size ny = ypts . size nz = zpts . size a = self . _get_kriging_matrix ( n , n_withdrifts ) if style in [ 'grid' , 'masked' ] : if style == 'masked' : if mask is None : raise IOError ( "Must specify boolean masking array " "when style is 'masked'." ) if mask . ndim != 3 : raise ValueError ( "Mask is not three-dimensional." ) if mask . shape [ 0 ] != nz or mask . shape [ 1 ] != ny or mask . shape [ 2 ] != nx : if mask . shape [ 0 ] == nx and mask . shape [ 2 ] == nz and mask . shape [ 1 ] == ny : mask = mask . swapaxes ( 0 , 2 ) else : raise ValueError ( "Mask dimensions do not match " "specified grid dimensions." ) mask = mask . flatten ( ) npt = nz * ny * nx grid_z , grid_y , grid_x = np . meshgrid ( zpts , ypts , xpts , indexing = 'ij' ) xpts = grid_x . flatten ( ) ypts = grid_y . flatten ( ) zpts = grid_z . flatten ( ) elif style == 'points' : if xpts . size != ypts . size and ypts . size != zpts . size : raise ValueError ( "xpoints and ypoints must have same " "dimensions when treated as listing " "discrete points." ) npt = nx else : raise ValueError ( "style argument must be 'grid', 'points', " "or 'masked'" ) if specified_drift_arrays is None : specified_drift_arrays = [ ] spec_drift_grids = [ ] if self . specified_drift : if len ( specified_drift_arrays ) == 0 : raise ValueError ( "Must provide drift values for kriging " "points when using 'specified' drift " "capability." ) if type ( specified_drift_arrays ) is not list : raise TypeError ( "Arrays for specified drift terms must " "be encapsulated in a list." ) for spec in specified_drift_arrays : if style in [ 'grid' , 'masked' ] : if spec . ndim < 3 : raise ValueError ( "Dimensions of drift values array do " "not match specified grid dimensions." ) elif spec . shape [ 0 ] != nz or spec . shape [ 1 ] != ny or spec . shape [ 2 ] != nx : if spec . shape [ 0 ] == nx and spec . shape [ 2 ] == nz and spec . shape [ 1 ] == ny : spec_drift_grids . append ( np . squeeze ( spec . swapaxes ( 0 , 2 ) ) ) else : raise ValueError ( "Dimensions of drift values array " "do not match specified grid " "dimensions." ) else : spec_drift_grids . append ( np . squeeze ( spec ) ) elif style == 'points' : if spec . ndim != 1 : raise ValueError ( "Dimensions of drift values array do " "not match specified grid dimensions." ) elif spec . shape [ 0 ] != xpts . size : raise ValueError ( "Number of supplied drift values in " "array do not match specified number " "of kriging points." ) else : spec_drift_grids . append ( np . squeeze ( spec ) ) if len ( spec_drift_grids ) != len ( self . specified_drift_data_arrays ) : raise ValueError ( "Inconsistent number of specified " "drift terms supplied." ) else : if len ( specified_drift_arrays ) != 0 : warnings . warn ( "Provided specified drift values, but " "'specified' drift was not initialized during " "instantiation of UniversalKriging3D class." , RuntimeWarning ) xpts , ypts , zpts = _adjust_for_anisotropy ( np . vstack ( ( xpts , ypts , zpts ) ) . T , [ self . XCENTER , self . YCENTER , self . ZCENTER ] , [ self . anisotropy_scaling_y , self . anisotropy_scaling_z ] , [ self . anisotropy_angle_x , self . anisotropy_angle_y , self . anisotropy_angle_z ] ) . T if style != 'masked' : mask = np . zeros ( npt , dtype = 'bool' ) xyz_points = np . concatenate ( ( zpts [ : , np . newaxis ] , ypts [ : , np . newaxis ] , xpts [ : , np . newaxis ] ) , axis = 1 ) xyz_data = np . concatenate ( ( self . Z_ADJUSTED [ : , np . newaxis ] , self . Y_ADJUSTED [ : , np . newaxis ] , self . X_ADJUSTED [ : , np . newaxis ] ) , axis = 1 ) bd = cdist ( xyz_points , xyz_data , 'euclidean' ) if backend == 'vectorized' : kvalues , sigmasq = self . _exec_vector ( a , bd , xyz_points , mask , n_withdrifts , spec_drift_grids ) elif backend == 'loop' : kvalues , sigmasq = self . _exec_loop ( a , bd , xyz_points , mask , n_withdrifts , spec_drift_grids ) else : raise ValueError ( 'Specified backend {} is not supported for ' '3D ordinary kriging.' . format ( backend ) ) if style == 'masked' : kvalues = np . ma . array ( kvalues , mask = mask ) sigmasq = np . ma . array ( sigmasq , mask = mask ) if style in [ 'masked' , 'grid' ] : kvalues = kvalues . reshape ( ( nz , ny , nx ) ) sigmasq = sigmasq . reshape ( ( nz , ny , nx ) ) return kvalues , sigmasq
def get_pull_requests ( self ) : "https : / / developer . github . com / v3 / pulls / # list - pull - requests"
g = self . github query = { 'state' : 'all' } if self . args . github_token : query [ 'access_token' ] = g [ 'token' ] def f ( pull ) : if self . args . ignore_closed : return ( pull [ 'state' ] == 'opened' or ( pull [ 'state' ] == 'closed' and pull [ 'merged_at' ] ) ) else : return True pulls = filter ( f , self . get ( g [ 'url' ] + "/repos/" + g [ 'repo' ] + "/pulls" , query , self . args . cache ) ) return dict ( [ ( str ( pull [ 'number' ] ) , pull ) for pull in pulls ] )
def reactivate ( domain_name ) : '''Try to reactivate the expired domain name Returns the following information : - Whether or not the domain was reactivated successfully - The amount charged for reactivation - The order ID - The transaction ID CLI Example : . . code - block : : bash salt ' my - minion ' namecheap _ domains . reactivate my - domain - name'''
opts = salt . utils . namecheap . get_opts ( 'namecheap.domains.reactivate' ) opts [ 'DomainName' ] = domain_name response_xml = salt . utils . namecheap . post_request ( opts ) if response_xml is None : return { } domainreactivateresult = response_xml . getElementsByTagName ( 'DomainReactivateResult' ) [ 0 ] return salt . utils . namecheap . xml_to_dict ( domainreactivateresult )
def field_value ( self , value ) : """Validate against NodeType ."""
if not self . is_array : return self . field_type ( value ) if isinstance ( value , ( list , tuple , set ) ) : return [ self . field_type ( item ) for item in value ] return self . field_type ( value )
def get_learning_objectives ( self ) : """Gets the any ` ` Objectives ` ` corresponding to this item . return : ( osid . learning . ObjectiveList ) - the learning objectives raise : OperationFailed - unable to complete request * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for osid . learning . Activity . get _ assets _ template if not bool ( self . _my_map [ 'learningObjectiveIds' ] ) : raise errors . IllegalState ( 'no learningObjectiveIds' ) mgr = self . _get_provider_manager ( 'LEARNING' ) if not mgr . supports_objective_lookup ( ) : raise errors . OperationFailed ( 'Learning does not support Objective lookup' ) # What about the Proxy ? lookup_session = mgr . get_objective_lookup_session ( proxy = getattr ( self , "_proxy" , None ) ) lookup_session . use_federated_objective_bank_view ( ) return lookup_session . get_objectives_by_ids ( self . get_learning_objective_ids ( ) )
def Matches ( self , file_entry ) : """Compares the file entry against the filter . Args : file _ entry ( dfvfs . FileEntry ) : file entry to compare . Returns : bool : True if the file entry matches the filter , False if not or None if the filter does not apply ."""
if not self . _file_scanner or not file_entry . IsFile ( ) : return None file_object = file_entry . GetFileObject ( ) if not file_object : return False try : scan_state = pysigscan . scan_state ( ) self . _file_scanner . scan_file_object ( scan_state , file_object ) except IOError as exception : # TODO : replace location by display name . location = getattr ( file_entry . path_spec , 'location' , '' ) logging . error ( ( '[skipping] unable to scan file: {0:s} for signatures ' 'with error: {1!s}' ) . format ( location , exception ) ) return False finally : file_object . close ( ) return scan_state . number_of_scan_results > 0
def execute_mgmt ( self , database , query , properties = None ) : """Executes a management command . : param str database : Database against query will be executed . : param str query : Query to be executed . : param azure . kusto . data . request . ClientRequestProperties properties : Optional additional properties . : return : Kusto response data set . : rtype : azure . kusto . data . _ response . KustoResponseDataSet"""
return self . _execute ( self . _mgmt_endpoint , database , query , KustoClient . _mgmt_default_timeout , properties )
def stats ( self ) -> pd . DataFrame : """Statistics about flights contained in the structure . Useful for a meaningful representation ."""
key = [ "icao24" , "callsign" ] if self . flight_ids is None else "flight_id" return ( self . data . groupby ( key ) [ [ "timestamp" ] ] . count ( ) . sort_values ( "timestamp" , ascending = False ) . rename ( columns = { "timestamp" : "count" } ) )
def add_load ( self , lv_load ) : """Adds a LV load to _ loads and grid graph if not already existing Parameters lv _ load : Description # TODO"""
if lv_load not in self . _loads and isinstance ( lv_load , LVLoadDing0 ) : self . _loads . append ( lv_load ) self . graph_add_node ( lv_load )
def _loop_payload ( params ) : '''Pass in a dictionary of parameters , loop through them and build a payload containing , parameters who ' s values are not None .'''
# construct the payload payload = { } # set the payload for param , value in six . iteritems ( params ) : if value is not None : payload [ param ] = value return payload
def emit_save_figure ( self ) : """Emit a signal when the toolbutton to save the figure is clicked ."""
self . sig_save_figure . emit ( self . canvas . fig , self . canvas . fmt )
def step ( self , disable_interrupts = True , start = 0 , end = 0 ) : """perform an instruction level step . This function preserves the previous interrupt mask state"""
# Was ' if self . get _ state ( ) ! = TARGET _ HALTED : ' # but now value of dhcsr is saved dhcsr = self . read_memory ( CortexM . DHCSR ) if not ( dhcsr & ( CortexM . C_STEP | CortexM . C_HALT ) ) : logging . error ( 'cannot step: target not halted' ) return self . notify ( Notification ( event = Target . EVENT_PRE_RUN , source = self , data = Target . RUN_TYPE_STEP ) ) self . clear_debug_cause_bits ( ) # Save previous interrupt mask state interrupts_masked = ( CortexM . C_MASKINTS & dhcsr ) != 0 # Mask interrupts - C _ HALT must be set when changing to C _ MASKINTS if not interrupts_masked and disable_interrupts : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_HALT | CortexM . C_MASKINTS ) # Single step using current C _ MASKINTS setting while True : if disable_interrupts or interrupts_masked : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_MASKINTS | CortexM . C_STEP ) else : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_STEP ) # Wait for halt to auto set ( This should be done before the first read ) while not self . read_memory ( CortexM . DHCSR ) & CortexM . C_HALT : pass # Range is empty , ' range step ' will degenerate to ' step ' if start == end : break # Read program counter and compare to [ start , end ) program_counter = self . read_core_register ( CORE_REGISTER [ 'pc' ] ) if program_counter < start or end <= program_counter : break # Check other stop reasons if self . read_memory ( CortexM . DFSR ) & ( CortexM . DFSR_DWTTRAP | CortexM . DFSR_BKPT ) : break # Restore interrupt mask state if not interrupts_masked and disable_interrupts : # Unmask interrupts - C _ HALT must be set when changing to C _ MASKINTS self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_HALT ) self . flush ( ) self . _run_token += 1 self . notify ( Notification ( event = Target . EVENT_POST_RUN , source = self , data = Target . RUN_TYPE_STEP ) )
def event_detach ( self , eventtype ) : """Unregister an event notification . @ param eventtype : the event type notification to be removed ."""
if not isinstance ( eventtype , EventType ) : raise VLCException ( "%s required: %r" % ( 'EventType' , eventtype ) ) k = eventtype . value if k in self . _callbacks : del self . _callbacks [ k ] # remove , regardless of libvlc return value libvlc_event_detach ( self , k , self . _callback_handler , k )
def get_movielens_data ( ) : """Return ( train _ interactions , test _ interactions ) ."""
train_data , test_data = _get_raw_movielens_data ( ) uids = set ( ) iids = set ( ) for uid , iid , rating , timestamp in itertools . chain ( _parse ( train_data ) , _parse ( test_data ) ) : uids . add ( uid ) iids . add ( iid ) rows = max ( uids ) + 1 cols = max ( iids ) + 1 return ( _build_interaction_matrix ( rows , cols , _parse ( train_data ) ) , _build_interaction_matrix ( rows , cols , _parse ( test_data ) ) )
def register_pubkey ( self ) : """XXX Support compressed point format . XXX Check that the pubkey received is on the curve ."""
# point _ format = 0 # if self . point [ 0 ] in [ b ' \ x02 ' , b ' \ x03 ' ] : # point _ format = 1 curve_name = _tls_named_curves [ self . named_curve ] curve = ec . _CURVE_TYPES [ curve_name ] ( ) import_point = ec . EllipticCurvePublicNumbers . from_encoded_point pubnum = import_point ( curve , self . point ) s = self . tls_session s . server_kx_pubkey = pubnum . public_key ( default_backend ( ) ) if not s . client_kx_ecdh_params : s . client_kx_ecdh_params = curve
def export_diagram_plane_elements ( root , diagram_attributes , plane_attributes ) : """Creates ' diagram ' and ' plane ' elements for exported BPMN XML file . Returns a tuple ( diagram , plane ) . : param root : object of Element class , representing a BPMN XML root element ( ' definitions ' ) , : param diagram _ attributes : dictionary that holds attribute values for imported ' BPMNDiagram ' element , : param plane _ attributes : dictionary that holds attribute values for imported ' BPMNPlane ' element ."""
diagram = eTree . SubElement ( root , BpmnDiagramGraphExport . bpmndi_namespace + "BPMNDiagram" ) diagram . set ( consts . Consts . id , diagram_attributes [ consts . Consts . id ] ) diagram . set ( consts . Consts . name , diagram_attributes [ consts . Consts . name ] ) plane = eTree . SubElement ( diagram , BpmnDiagramGraphExport . bpmndi_namespace + "BPMNPlane" ) plane . set ( consts . Consts . id , plane_attributes [ consts . Consts . id ] ) plane . set ( consts . Consts . bpmn_element , plane_attributes [ consts . Consts . bpmn_element ] ) return diagram , plane
def bulk_get ( cls , exports , api = None ) : """Retrieve exports in bulk . : param exports : Exports to be retrieved . : param api : Api instance . : return : list of ExportBulkRecord objects ."""
api = api or cls . _API export_ids = [ Transform . to_export ( export ) for export in exports ] data = { 'export_ids' : export_ids } response = api . post ( url = cls . _URL [ 'bulk_get' ] , data = data ) return ExportBulkRecord . parse_records ( response = response , api = api )
def _init_zeo ( ) : """Start asyncore thread ."""
if not _ASYNCORE_RUNNING : def _run_asyncore_loop ( ) : asyncore . loop ( ) thread . start_new_thread ( _run_asyncore_loop , ( ) ) global _ASYNCORE_RUNNING _ASYNCORE_RUNNING = True
def nanskew ( values , axis = None , skipna = True , mask = None ) : """Compute the sample skewness . The statistic computed here is the adjusted Fisher - Pearson standardized moment coefficient G1 . The algorithm computes this coefficient directly from the second and third central moment . Parameters values : ndarray axis : int , optional skipna : bool , default True mask : ndarray [ bool ] , optional nan - mask if known Returns result : float64 Unless input is a float array , in which case use the same precision as the input array . Examples > > > import pandas . core . nanops as nanops > > > s = pd . Series ( [ 1 , np . nan , 1 , 2 ] ) > > > nanops . nanskew ( s ) 1.7320508075688787"""
values = com . values_from_object ( values ) if mask is None : mask = isna ( values ) if not is_float_dtype ( values . dtype ) : values = values . astype ( 'f8' ) count = _get_counts ( mask , axis ) else : count = _get_counts ( mask , axis , dtype = values . dtype ) if skipna : values = values . copy ( ) np . putmask ( values , mask , 0 ) mean = values . sum ( axis , dtype = np . float64 ) / count if axis is not None : mean = np . expand_dims ( mean , axis ) adjusted = values - mean if skipna : np . putmask ( adjusted , mask , 0 ) adjusted2 = adjusted ** 2 adjusted3 = adjusted2 * adjusted m2 = adjusted2 . sum ( axis , dtype = np . float64 ) m3 = adjusted3 . sum ( axis , dtype = np . float64 ) # floating point error # #18044 in _ libs / windows . pyx calc _ skew follow this behavior # to fix the fperr to treat m2 < 1e - 14 as zero m2 = _zero_out_fperr ( m2 ) m3 = _zero_out_fperr ( m3 ) with np . errstate ( invalid = 'ignore' , divide = 'ignore' ) : result = ( count * ( count - 1 ) ** 0.5 / ( count - 2 ) ) * ( m3 / m2 ** 1.5 ) dtype = values . dtype if is_float_dtype ( dtype ) : result = result . astype ( dtype ) if isinstance ( result , np . ndarray ) : result = np . where ( m2 == 0 , 0 , result ) result [ count < 3 ] = np . nan return result else : result = 0 if m2 == 0 else result if count < 3 : return np . nan return result
def load_from_module ( self , module ) : '''Load all benchmarks from a given module'''
benchmarks = [ ] for name in dir ( module ) : obj = getattr ( module , name ) if ( inspect . isclass ( obj ) and issubclass ( obj , Benchmark ) and obj != Benchmark ) : benchmarks . append ( obj ) return benchmarks
def match_content_type ( entry , content_type , regex = True ) : """Matches the content type of a request using the mimeType metadata . : param entry : ` ` dict ` ` of a single entry from a HarPage : param content _ type : ` ` str ` ` of regex to use for finding content type : param regex : ` ` bool ` ` indicating whether to use regex or exact match ."""
mimeType = entry [ 'response' ] [ 'content' ] [ 'mimeType' ] if regex and re . search ( content_type , mimeType , flags = re . IGNORECASE ) : return True elif content_type == mimeType : return True return False
def join_states ( * states : State ) -> State : """Join two state vectors into a larger qubit state"""
vectors = [ ket . vec for ket in states ] vec = reduce ( outer_product , vectors ) return State ( vec . tensor , vec . qubits )
def _get_pull_requests ( self ) : """Gets all pull requests from the repo since we can ' t do a filtered date merged search"""
for pull in self . repo . pull_requests ( state = "closed" , base = self . github_info [ "master_branch" ] , direction = "asc" ) : if self . _include_pull_request ( pull ) : yield pull
def init ( self , attrs = { } , * args , ** kwargs ) : """Default initialization from a dictionary responded by Mambu in to the elements of the Mambu object . It assings the response to attrs attribute and converts each of its elements from a string to an adequate python object : number , datetime , etc . Basically it stores the response on the attrs attribute , then runs some customizable preprocess method , then runs convertDict2Attrs method to convert the string elements to an adequate python object , then a customizable postprocess method . It also executes each method on the ' methods ' attribute given on instantiation time , and sets new customizable ' properties ' to the object . Why not on _ _ init _ _ ? two reasons : * _ _ init _ _ optionally connects to Mambu , if you don ' t connect to Mambu , the Mambu object will be configured but it won ' t have any Mambu info on it . Only when connected , the Mambu object will be initialized , here . Useful to POST several times the same Mambu object . You make a POST request over and over again by calling it ' s connect ( ) method every time you wish . This init method will configure the response in to the attrs attribute each time . You may also wish to update the info on a previously initialized Mambu object and refresh it with what Mambu now has . Instead of building a new object , you just connect ( ) again and it will be refreshed . * Iterable Mambu objects ( lists ) do not initialize here , the iterable Mambu object _ _ init _ _ goes through each of its elements and then initializes with this code one by one . Please look at some Mambu iterable object code and pydoc for more details ."""
self . attrs = attrs self . preprocess ( ) self . convertDict2Attrs ( * args , ** kwargs ) self . postprocess ( ) try : for meth in kwargs [ 'methods' ] : try : getattr ( self , meth ) ( ) except Exception : pass except Exception : pass try : for propname , propval in kwargs [ 'properties' ] . items ( ) : setattr ( self , propname , propval ) except Exception : pass
def generic_visit ( self , node : AST , dfltChaining : bool = True ) -> str : """Default handler , called if no explicit visitor function exists for a node ."""
for field , value in ast . iter_fields ( node ) : if isinstance ( value , list ) : for item in value : if isinstance ( item , AST ) : self . visit ( item ) elif isinstance ( value , AST ) : self . visit ( value )
def clean ( self , py_value ) : """Cleans the value before storing it . : param : py _ value : < str > : return : < str >"""
try : from webhelpers . text import strip_tags return strip_tags ( py_value ) except ImportError : warnings . warn ( 'Unable to clean string column without webhelpers installed.' ) return py_value
def get_event_noblock ( self ) : '''Get the raw event without blocking or any other niceties'''
assert self . _run_io_loop_sync if not self . cpub : if not self . connect_pub ( ) : return None raw = self . subscriber . read_sync ( timeout = 0 ) if raw is None : return None mtag , data = self . unpack ( raw , self . serial ) return { 'data' : data , 'tag' : mtag }
def getraw ( self , msgid , stream = sys . stdout ) : """Get the whole message and print it ."""
foldername , msgkey = msgid . split ( SEPERATOR ) folder = self . folder if foldername == "INBOX" else self . _getfolder ( foldername ) msg = folder [ msgkey ] print ( msg . content )
def firstAttr ( self , * attrs ) : """Return the first attribute in attrs that is not None ."""
for attr in attrs : value = self . __dict__ . get ( attr ) if value is not None : return value
def smart_search_prefix ( self , auth , query_str , search_options = None , extra_query = None ) : """Perform a smart search on prefix list . * ` auth ` [ BaseAuth ] AAA options . * ` query _ str ` [ string ] Search string * ` search _ options ` [ options _ dict ] Search options . See : func : ` search _ prefix ` . * ` extra _ query ` [ dict _ to _ sql ] Extra search terms , will be AND : ed together with what is extracted from the query string . Return a dict with three elements : * : attr : ` interpretation ` - How the query string was interpreted . * : attr : ` search _ options ` - Various search _ options . * : attr : ` result ` - The search result . The : attr : ` interpretation ` is given as a list of dicts , each explaining how a part of the search key was interpreted ( ie . what prefix attribute the search operation was performed on ) . The : attr : ` result ` is a list of dicts containing the search result . The smart search function tries to convert the query from a text string to a ` query ` dict which is passed to the : func : ` search _ prefix ` function . If multiple search keys are detected , they are combined with a logical AND . It tries to automatically detect IP addresses and prefixes and put these into the ` query ` dict with " contains _ within " operators and so forth . See the : func : ` search _ prefix ` function for an explanation of the ` search _ options ` argument . This is the documentation of the internal backend function . It ' s exposed over XML - RPC , please also see the XML - RPC documentation for : py : func : ` nipap . xmlrpc . NipapXMLRPC . smart _ search _ prefix ` for full understanding ."""
if search_options is None : search_options = { } self . _logger . debug ( "smart_search_prefix query string: %s" % query_str ) success , query = self . _parse_prefix_query ( query_str ) if not success : return { 'interpretation' : query , 'search_options' : search_options , 'result' : [ ] , 'error' : True , 'error_message' : 'query interpretation failed' } if extra_query is not None : query = { 'operator' : 'and' , 'val1' : query , 'val2' : extra_query } self . _logger . debug ( "smart_search_prefix: query expanded to: %s" % unicode ( query ) ) search_result = self . search_prefix ( auth , query , search_options ) search_result [ 'interpretation' ] = query search_result [ 'error' ] = False return search_result
def find_common_root ( elements ) : """Find root which is common for all ` elements ` . Args : elements ( list ) : List of double - linked HTMLElement objects . Returns : list : Vector of HTMLElement containing path to common root ."""
if not elements : raise UserWarning ( "Can't find common root - no elements suplied." ) root_path = el_to_path_vector ( elements . pop ( ) ) for el in elements : el_path = el_to_path_vector ( el ) root_path = common_vector_root ( root_path , el_path ) if not root_path : raise UserWarning ( "Vectors without common root:\n%s" % str ( el_path ) ) return root_path
def _update_page ( self , uri , path ) : """Update page content ."""
if uri in self . _pages : self . _pages [ uri ] . update ( ) else : self . _pages [ uri ] = Page ( uri = uri , path = path )
def fb_github_project_workdir ( self , project_and_path , github_org = 'facebook' ) : 'This helper lets Facebook - internal CI special - cases FB projects'
project , path = project_and_path . split ( '/' , 1 ) return self . github_project_workdir ( github_org + '/' + project , path )
def trips ( self , val ) : """Update ` ` self . _ trips _ i ` ` if ` ` self . trips ` ` changes ."""
self . _trips = val if val is not None and not val . empty : self . _trips_i = self . _trips . set_index ( "trip_id" ) else : self . _trips_i = None
def add_datepart ( df , fldname , drop = True , time = False , errors = "raise" ) : """add _ datepart converts a column of df from a datetime64 to many columns containing the information from the date . This applies changes inplace . Parameters : df : A pandas data frame . df gain several new columns . fldname : A string that is the name of the date column you wish to expand . If it is not a datetime64 series , it will be converted to one with pd . to _ datetime . drop : If true then the original date column will be removed . time : If true time features : Hour , Minute , Second will be added . Examples : > > > df = pd . DataFrame ( { ' A ' : pd . to _ datetime ( [ ' 3/11/2000 ' , ' 3/12/2000 ' , ' 3/13/2000 ' ] , infer _ datetime _ format = False ) } ) > > > df 0 2000-03-11 1 2000-03-12 2 2000-03-13 > > > add _ datepart ( df , ' A ' ) > > > df AYear AMonth AWeek ADay ADayofweek ADayofyear AIs _ month _ end AIs _ month _ start AIs _ quarter _ end AIs _ quarter _ start AIs _ year _ end AIs _ year _ start AElapsed 0 2000 3 10 11 5 71 False False False False False False 952732800 1 2000 3 10 12 6 72 False False False False False False 952819200 2 2000 3 11 13 0 73 False False False False False False 952905600"""
fld = df [ fldname ] fld_dtype = fld . dtype if isinstance ( fld_dtype , pd . core . dtypes . dtypes . DatetimeTZDtype ) : fld_dtype = np . datetime64 if not np . issubdtype ( fld_dtype , np . datetime64 ) : df [ fldname ] = fld = pd . to_datetime ( fld , infer_datetime_format = True , errors = errors ) targ_pre = re . sub ( '[Dd]ate$' , '' , fldname ) attr = [ 'Year' , 'Month' , 'Week' , 'Day' , 'Dayofweek' , 'Dayofyear' , 'Is_month_end' , 'Is_month_start' , 'Is_quarter_end' , 'Is_quarter_start' , 'Is_year_end' , 'Is_year_start' ] if time : attr = attr + [ 'Hour' , 'Minute' , 'Second' ] for n in attr : df [ targ_pre + n ] = getattr ( fld . dt , n . lower ( ) ) df [ targ_pre + 'Elapsed' ] = fld . astype ( np . int64 ) // 10 ** 9 if drop : df . drop ( fldname , axis = 1 , inplace = True )
def _parse_redistribution ( self , config ) : """Parses config file for the OSPF router ID Args : config ( str ) : Running configuration Returns : list : dict : keys : protocol ( str ) route - map ( optional ) ( str )"""
redistributions = list ( ) regexp = r'redistribute .*' matches = re . findall ( regexp , config ) for line in matches : ospf_redist = line . split ( ) if len ( ospf_redist ) == 2 : # simple redist : eg ' redistribute bgp ' protocol = ospf_redist [ 1 ] redistributions . append ( dict ( protocol = protocol ) ) if len ( ospf_redist ) == 4 : # complex redist eg ' redistribute bgp route - map NYSE - RP - MAP ' protocol = ospf_redist [ 1 ] route_map_name = ospf_redist [ 3 ] redistributions . append ( dict ( protocol = protocol , route_map = route_map_name ) ) return dict ( redistributions = redistributions )
def getUTMzone ( geom ) : """Determine UTM Zone for input geometry"""
# If geom has srs properly defined , can do this # geom . TransformTo ( wgs _ srs ) # Get centroid lat / lon lon , lat = geom . Centroid ( ) . GetPoint_2D ( ) # Make sure we ' re - 180 to 180 lon180 = ( lon + 180 ) - np . floor ( ( lon + 180 ) / 360 ) * 360 - 180 zonenum = int ( np . floor ( ( lon180 + 180 ) / 6 ) + 1 ) # Determine N / S hemisphere if lat >= 0 : zonehem = 'N' else : zonehem = 'S' # Deal with special cases if ( lat >= 56.0 and lat < 64.0 and lon180 >= 3.0 and lon180 < 12.0 ) : zonenum = 32 if ( lat >= 72.0 and lat < 84.0 ) : if ( lon180 >= 0.0 and lon180 < 9.0 ) : zonenum = 31 elif ( lon180 >= 9.0 and lon180 < 21.0 ) : zonenum = 33 elif ( lon180 >= 21.0 and lon180 < 33.0 ) : zonenum = 35 elif ( lon180 >= 33.0 and lon180 < 42.0 ) : zonenum = 37 return str ( zonenum ) + zonehem
def list_containers ( self ) : """list all available nspawn containers : return : collection of instances of : class : ` conu . backend . nspawn . container . NspawnContainer `"""
data = run_cmd ( [ "machinectl" , "list" , "--no-legend" , "--no-pager" ] , return_output = True ) output = [ ] reg = re . compile ( r"\s+" ) for line in data . split ( "\n" ) : stripped = line . strip ( ) if stripped : parts = reg . split ( stripped ) name = parts [ 0 ] output . append ( self . ContainerClass ( None , None , name = name ) ) return output