signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def extract_model_meta ( base_meta : dict , extra_meta : dict , model_url : str ) -> dict : """Merge the metadata from the backend and the extra metadata into a dict which is suitable for ` index . json ` . : param base _ meta : tree [ " meta " ] : class : ` dict ` containing data from the backend . : param extra _ meta : dict containing data from the user , similar to ` template _ meta . json ` . : param model _ url : public URL of the model . : return : converted dict ."""
meta = { "default" : { "default" : base_meta [ "uuid" ] , "description" : base_meta [ "description" ] , "code" : extra_meta [ "code" ] } } del base_meta [ "model" ] del base_meta [ "uuid" ] meta [ "model" ] = base_meta meta [ "model" ] . update ( { k : extra_meta [ k ] for k in ( "code" , "datasets" , "references" , "tags" , "extra" ) } ) response = requests . get ( model_url , stream = True ) meta [ "model" ] [ "size" ] = humanize . naturalsize ( int ( response . headers [ "content-length" ] ) ) meta [ "model" ] [ "url" ] = model_url meta [ "model" ] [ "created_at" ] = format_datetime ( meta [ "model" ] [ "created_at" ] ) return meta
def ArcCos ( input_vertex : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex : """Takes the inverse cosine of a vertex , Arccos ( vertex ) : param input _ vertex : the vertex"""
return Double ( context . jvm_view ( ) . ArcCosVertex , label , cast_to_double_vertex ( input_vertex ) )
def _get_color ( color ) : """Returns a QColor built from a Pygments color string ."""
color = str ( color ) . replace ( "#" , "" ) qcolor = QtGui . QColor ( ) qcolor . setRgb ( int ( color [ : 2 ] , base = 16 ) , int ( color [ 2 : 4 ] , base = 16 ) , int ( color [ 4 : 6 ] , base = 16 ) ) return qcolor
def create ( self , object_type , under = None , attributes = None , ** kwattrs ) : """Create a new automation object . Arguments : object _ type - - Type of object to create . under - - Handle of the parent of the new object . attributes - - Dictionary of attributes ( name - value pairs ) . kwattrs - - Optional keyword attributes ( name = value pairs ) . Return : Handle of newly created object ."""
data = self . createx ( object_type , under , attributes , ** kwattrs ) return data [ 'handle' ]
def precision ( links_true , links_pred = None ) : """precision ( links _ true , links _ pred ) Compute the precision . The precision is given by TP / ( TP + FP ) . Parameters links _ true : pandas . MultiIndex , pandas . DataFrame , pandas . Series The true ( or actual ) collection of links . links _ pred : pandas . MultiIndex , pandas . DataFrame , pandas . Series The predicted collection of links . Returns float The precision"""
if _isconfusionmatrix ( links_true ) : confusion_matrix = links_true v = confusion_matrix [ 0 , 0 ] / ( confusion_matrix [ 0 , 0 ] + confusion_matrix [ 1 , 0 ] ) else : tp = true_positives ( links_true , links_pred ) fp = false_positives ( links_true , links_pred ) v = tp / ( tp + fp ) return float ( v )
def get_membership_document ( membership_type : str , current_block : dict , identity : Identity , salt : str , password : str ) -> Membership : """Get a Membership document : param membership _ type : " IN " to ask for membership or " OUT " to cancel membership : param current _ block : Current block data : param identity : Identity document : param salt : Passphrase of the account : param password : Password of the account : rtype : Membership"""
# get current block BlockStamp timestamp = BlockUID ( current_block [ 'number' ] , current_block [ 'hash' ] ) # create keys from credentials key = SigningKey . from_credentials ( salt , password ) # create identity document membership = Membership ( version = 10 , currency = current_block [ 'currency' ] , issuer = key . pubkey , membership_ts = timestamp , membership_type = membership_type , uid = identity . uid , identity_ts = identity . timestamp , signature = None ) # sign document membership . sign ( [ key ] ) return membership
def close ( self ) -> None : """To act as a file ."""
if self . underlying_stream : if self . using_stdout : sys . stdout = self . underlying_stream else : sys . stderr = self . underlying_stream self . underlying_stream = None if self . file : # Do NOT close the file ; we don ' t own it . self . file = None log . debug ( "Finished copying {} to {}" , self . output_description , self . filename )
def tag_evidence_subtype ( evidence ) : """Returns the type and subtype of an evidence object as a string , typically the extraction rule or database from which the statement was generated . For biopax , this is just the database name . Parameters statement : indra . statements . Evidence The statement which we wish to subtype Returns types : tuple A tuple with ( type , subtype ) , both strings Returns ( type , None ) if the type of statement is not yet handled in this function ."""
source_api = evidence . source_api annotations = evidence . annotations if source_api == 'biopax' : subtype = annotations . get ( 'source_sub_id' ) elif source_api in ( 'reach' , 'eidos' ) : if 'found_by' in annotations : from indra . sources . reach . processor import determine_reach_subtype if source_api == 'reach' : subtype = determine_reach_subtype ( annotations [ 'found_by' ] ) elif source_api == 'eidos' : subtype = annotations [ 'found_by' ] else : subtype = None else : logger . debug ( 'Could not find found_by attribute in reach ' 'statement annoations' ) subtype = None elif source_api == 'geneways' : subtype = annotations [ 'actiontype' ] else : subtype = None return ( source_api , subtype )
def perform_job ( self , job ) : """Wraps a job . perform ( ) call with timeout logic and exception handlers . This is the first call happening inside the greenlet ."""
if self . config [ "trace_memory" ] : job . trace_memory_start ( ) set_current_job ( job ) try : job . perform ( ) except MaxConcurrencyInterrupt : self . log . error ( "Max concurrency reached" ) job . _save_status ( "maxconcurrency" , exception = True ) except RetryInterrupt : self . log . error ( "Caught retry" ) job . save_retry ( sys . exc_info ( ) [ 1 ] ) except MaxRetriesInterrupt : self . log . error ( "Max retries reached" ) job . _save_status ( "maxretries" , exception = True ) except AbortInterrupt : self . log . error ( "Caught abort" ) job . save_abort ( ) except TimeoutInterrupt : self . log . error ( "Job timeouted after %s seconds" % job . timeout ) job . _save_status ( "timeout" , exception = True ) except JobInterrupt : self . log . error ( "Job interrupted" ) job . _save_status ( "interrupt" , exception = True ) except Exception : self . log . error ( "Job failed" ) job . _save_status ( "failed" , exception = True ) finally : set_current_job ( None ) self . done_jobs += 1 if self . config [ "trace_memory" ] : job . trace_memory_stop ( )
def serialize ( self ) : """Return the string representation of the receiver ."""
res = '<?xml version="1.0" encoding="UTF-8"?>' for ns in self . namespaces : self . top_grammar . attr [ "xmlns:" + self . namespaces [ ns ] ] = ns res += self . top_grammar . start_tag ( ) for ch in self . top_grammar . children : res += ch . serialize ( ) res += self . tree . serialize ( ) for d in self . global_defs : res += self . global_defs [ d ] . serialize ( ) for i in self . identities : res += self . identities [ i ] . serialize ( ) return res + self . top_grammar . end_tag ( )
def write_measurement ( measurement , root_file = None , xml_path = None , output_path = None , output_suffix = None , write_workspaces = False , apply_xml_patches = True , silence = False ) : """Write a measurement and RooWorkspaces for all contained channels into a ROOT file and write the XML files into a directory . Parameters measurement : HistFactory : : Measurement An asrootpy ' d ` ` HistFactory : : Measurement ` ` object root _ file : ROOT TFile or string , optional ( default = None ) A ROOT file or string file name . The measurement and workspaces will be written to this file . If ` ` root _ file is None ` ` then a new file will be created with the same name as the measurement and with the prefix ` ` ws _ ` ` . xml _ path : string , optional ( default = None ) A directory path to write the XML into . If None , a new directory with the same name as the measurement and with the prefix ` ` xml _ ` ` will be created . output _ path : string , optional ( default = None ) If ` ` root _ file is None ` ` , create the ROOT file under this path . If ` ` xml _ path is None ` ` , create the XML directory under this path . output _ suffix : string , optional ( default = None ) If ` ` root _ file is None ` ` then a new file is created with the same name as the measurement and with the prefix ` ` ws _ ` ` . ` ` output _ suffix ` ` will append a suffix to this file name ( before the . root extension ) . If ` ` xml _ path is None ` ` , then a new directory is created with the same name as the measurement and with the prefix ` ` xml _ ` ` . ` ` output _ suffix ` ` will append a suffix to this directory name . write _ workspaces : bool , optional ( default = False ) If True then also write a RooWorkspace for each channel and for all channels combined . apply _ xml _ patches : bool , optional ( default = True ) Apply fixes on the output of ` ` Measurement : : PrintXML ( ) ` ` to avoid known HistFactory bugs . Some of the patches assume that the ROOT file containing the histograms will exist one directory level up from the XML and that hist2workspace , or any tool that later reads the XML will run from that same directory containing the ROOT file . silence : bool , optional ( default = False ) If True then capture and silence all stdout / stderr output from HistFactory ."""
context = silence_sout_serr if silence else do_nothing output_name = measurement . name if output_suffix is not None : output_name += '_{0}' . format ( output_suffix ) output_name = output_name . replace ( ' ' , '_' ) if xml_path is None : xml_path = 'xml_{0}' . format ( output_name ) if output_path is not None : xml_path = os . path . join ( output_path , xml_path ) if not os . path . exists ( xml_path ) : mkdir_p ( xml_path ) if root_file is None : root_file = 'ws_{0}.root' . format ( output_name ) if output_path is not None : root_file = os . path . join ( output_path , root_file ) own_file = False if isinstance ( root_file , string_types ) : root_file = root_open ( root_file , 'recreate' ) own_file = True with preserve_current_directory ( ) : root_file . cd ( ) log . info ( "writing histograms and measurement in {0} ..." . format ( root_file . GetName ( ) ) ) with context ( ) : measurement . writeToFile ( root_file ) # get modified measurement out_m = root_file . Get ( measurement . name ) log . info ( "writing XML in {0} ..." . format ( xml_path ) ) with context ( ) : out_m . PrintXML ( xml_path ) if write_workspaces : log . info ( "writing combined model in {0} ..." . format ( root_file . GetName ( ) ) ) workspace = make_workspace ( measurement , silence = silence ) workspace . Write ( ) for channel in measurement . channels : log . info ( "writing model for channel `{0}` in {1} ..." . format ( channel . name , root_file . GetName ( ) ) ) workspace = make_workspace ( measurement , channel = channel , silence = silence ) workspace . Write ( ) if apply_xml_patches : # patch the output XML to avoid HistFactory bugs patch_xml ( glob ( os . path . join ( xml_path , '*.xml' ) ) , root_file = os . path . basename ( root_file . GetName ( ) ) ) if own_file : root_file . Close ( )
def create_user ( self , uid , name , password , channel = None , callback = False , link_auth = True , ipmi_msg = True , privilege_level = 'user' ) : """create / ensure a user is created with provided settings ( helper ) : param privilege _ level : User Privilege Limit . ( Determines the maximum privilege level that the user is allowed to switch to on the specified channel . ) * callback * user * operator * administrator * proprietary * no _ access"""
# current user might be trying to update . . dont disable # set _ user _ password ( uid , password , mode = ' disable ' ) if channel is None : channel = self . get_network_channel ( ) self . set_user_name ( uid , name ) self . set_user_password ( uid , password = password ) self . set_user_password ( uid , mode = 'enable' , password = password ) self . set_user_access ( uid , channel , callback = callback , link_auth = link_auth , ipmi_msg = ipmi_msg , privilege_level = privilege_level ) return True
def msg_curse ( self , args = None , max_width = None ) : """Return the string to display in the curse interface ."""
# Init the return message ret = [ ] # Build the string message if args . client : # Client mode if args . cs_status . lower ( ) == "connected" : msg = 'Connected to ' ret . append ( self . curse_add_line ( msg , 'OK' ) ) elif args . cs_status . lower ( ) == "snmp" : msg = 'SNMP from ' ret . append ( self . curse_add_line ( msg , 'OK' ) ) elif args . cs_status . lower ( ) == "disconnected" : msg = 'Disconnected from ' ret . append ( self . curse_add_line ( msg , 'CRITICAL' ) ) # Hostname is mandatory msg = self . stats [ 'hostname' ] ret . append ( self . curse_add_line ( msg , "TITLE" ) ) # System info if self . stats [ 'os_name' ] == "Linux" and self . stats [ 'linux_distro' ] : msg = ' ({} {} / {} {})' . format ( self . stats [ 'linux_distro' ] , self . stats [ 'platform' ] , self . stats [ 'os_name' ] , self . stats [ 'os_version' ] ) else : try : msg = ' ({} {} {})' . format ( self . stats [ 'os_name' ] , self . stats [ 'os_version' ] , self . stats [ 'platform' ] ) except Exception : msg = ' ({})' . format ( self . stats [ 'os_name' ] ) ret . append ( self . curse_add_line ( msg , optional = True ) ) # Return the message with decoration return ret
def access_with_service ( self , service , use_xarray = None ) : """Access the dataset using a particular service . Return an Python object capable of communicating with the server using the particular service . For instance , for ' HTTPServer ' this is a file - like object capable of HTTP communication ; for OPENDAP this is a netCDF4 dataset . Parameters service : str The name of the service for accessing the dataset Returns An instance appropriate for communicating using ` ` service ` ` ."""
service = CaseInsensitiveStr ( service ) if service == 'CdmRemote' : if use_xarray : from . cdmr . xarray_support import CDMRemoteStore try : import xarray as xr provider = lambda url : xr . open_dataset ( CDMRemoteStore ( url ) ) # noqa : E731 except ImportError : raise ImportError ( 'CdmRemote access needs xarray to be installed.' ) else : from . cdmr import Dataset as CDMRDataset provider = CDMRDataset elif service == 'OPENDAP' : if use_xarray : try : import xarray as xr provider = xr . open_dataset except ImportError : raise ImportError ( 'xarray to be installed if `use_xarray` is True.' ) else : try : from netCDF4 import Dataset as NC4Dataset provider = NC4Dataset except ImportError : raise ImportError ( 'OPENDAP access needs netCDF4-python to be installed.' ) elif service in self . ncssServiceNames : from . ncss import NCSS provider = NCSS elif service == 'HTTPServer' : provider = session_manager . urlopen else : raise ValueError ( service + ' is not an access method supported by Siphon' ) try : return provider ( self . access_urls [ service ] ) except KeyError : raise ValueError ( service + ' is not available for this dataset' )
def requires_role ( role_s , logical_operator = all ) : """Requires that the calling Subject be authorized to the extent that is required to satisfy the role _ s specified and the logical operation upon them . : param role _ s : a collection of the role ( s ) required , specified by identifiers ( such as a role name ) : type role _ s : a List of Strings : param logical _ operator : indicates whether all or at least one permission is true ( and , any ) : type : and OR all ( from python standard library ) : raises AuthorizationException : if the user does not have sufficient role membership Elaborate Example : requires _ role ( role _ s = [ ' sysadmin ' , ' developer ' ] , logical _ operator = any ) Basic Example : requires _ role ( ' physician ' )"""
def outer_wrap ( fn ) : @ functools . wraps ( fn ) def inner_wrap ( * args , ** kwargs ) : subject = Yosai . get_current_subject ( ) subject . check_role ( role_s , logical_operator ) return fn ( * args , ** kwargs ) return inner_wrap return outer_wrap
def setLabels ( self , name ) : """Sets plot labels , according to predefined options : param name : The type of plot to create labels for . Options : calibration , tuning , anything else labels for spike counts : type name : str"""
if name == "calibration" : self . setWindowTitle ( "Calibration Curve" ) self . setTitle ( "Calibration Curve" ) self . setLabel ( 'bottom' , "Frequency" , units = 'Hz' ) self . setLabel ( 'left' , 'Recorded Intensity (dB SPL)' ) elif name == "tuning" : self . setWindowTitle ( "Tuning Curve" ) self . setTitle ( "Tuning Curve" ) self . setLabel ( 'bottom' , "Frequency" , units = "Hz" ) self . setLabel ( 'left' , "Spike Count (mean)" ) else : self . setWindowTitle ( "Spike Counts" ) self . setTitle ( "Spike Counts" ) self . setLabel ( 'bottom' , "Test Number" , units = '' ) self . setLabel ( 'left' , "Spike Count (mean)" , units = '' )
def estimate_bg ( self , fit_offset = "mean" , fit_profile = "tilt" , border_px = 0 , from_mask = None , ret_mask = False ) : """Estimate image background Parameters fit _ profile : str The type of background profile to fit : - " offset " : offset only - " poly2o " : 2D 2nd order polynomial with mixed terms - " tilt " : 2D linear tilt with offset ( default ) fit _ offset : str The method for computing the profile offset - " fit " : offset as fitting parameter - " gauss " : center of a gaussian fit - " mean " : simple average - " mode " : mode ( see ` qpimage . bg _ estimate . mode ` ) border _ px : float Assume that a frame of ` border _ px ` pixels around the image is background . from _ mask : boolean np . ndarray or None Use a boolean array to define the background area . The mask image must have the same shape as the input data . ` True ` elements are used for background estimation . ret _ mask : bool Return the mask image used to compute the background . Notes If both ` border _ px ` and ` from _ mask ` are given , the intersection of the two resulting mask images is used . The arguments passed to this method are stored in the hdf5 file ` self . h5 ` and are used for optional integrity checking using ` qpimage . integrity _ check . check ` . See Also qpimage . bg _ estimate . estimate"""
# remove existing bg before accessing imdat . image self . set_bg ( bg = None , key = "fit" ) # compute bg bgimage , mask = bg_estimate . estimate ( data = self . image , fit_offset = fit_offset , fit_profile = fit_profile , border_px = border_px , from_mask = from_mask , ret_mask = True ) attrs = { "fit_offset" : fit_offset , "fit_profile" : fit_profile , "border_px" : border_px } self . set_bg ( bg = bgimage , key = "fit" , attrs = attrs ) # save ` from _ mask ` separately ( arrays vs . h5 attributes ) # ( if ` from _ mask ` is ` None ` , this will remove the array ) self [ "estimate_bg_from_mask" ] = from_mask # return mask image if ret_mask : return mask
def bookmarks ( ) : """Bookmarks ."""
res = None bukudb = getattr ( flask . g , 'bukudb' , get_bukudb ( ) ) page = request . args . get ( get_page_parameter ( ) , type = int , default = 1 ) per_page = request . args . get ( get_per_page_parameter ( ) , type = int , default = int ( current_app . config . get ( 'BUKUSERVER_PER_PAGE' , views . DEFAULT_PER_PAGE ) ) ) url_render_mode = current_app . config [ 'BUKUSERVER_URL_RENDER_MODE' ] create_bookmarks_form = forms . BookmarkForm ( ) if request . method == 'GET' : all_bookmarks = bukudb . get_rec_all ( ) result = { 'bookmarks' : [ ] } for bookmark in all_bookmarks : result_bookmark = { 'url' : bookmark [ 1 ] , 'title' : bookmark [ 2 ] , 'tags' : list ( [ _f for _f in bookmark [ 3 ] . split ( ',' ) if _f ] ) , 'description' : bookmark [ 4 ] } if not request . path . startswith ( '/api/' ) : result_bookmark [ 'id' ] = bookmark [ 0 ] result [ 'bookmarks' ] . append ( result_bookmark ) if request . path . startswith ( '/api/' ) : res = jsonify ( result ) else : if request . args . getlist ( 'tag' ) : tags = request . args . getlist ( 'tag' ) result [ 'bookmarks' ] = [ x for x in result [ 'bookmarks' ] if set ( tags ) . issubset ( set ( x [ 'tags' ] ) ) ] current_app . logger . debug ( 'total bookmarks:{}' . format ( len ( result [ 'bookmarks' ] ) ) ) current_app . logger . debug ( 'per page:{}' . format ( per_page ) ) pagination_total = len ( result [ 'bookmarks' ] ) bms = list ( views . chunks ( result [ 'bookmarks' ] , per_page ) ) try : result [ 'bookmarks' ] = bms [ page - 1 ] except IndexError as err : current_app . logger . debug ( '{}:{}, result bookmarks:{}, page:{}' . format ( type ( err ) , err , len ( result [ 'bookmarks' ] ) , page ) ) pagination = Pagination ( page = page , total = pagination_total , per_page = per_page , search = False , record_name = 'bookmarks' , bs_version = 3 ) res = render_template ( 'bukuserver/bookmarks.html' , result = result , pagination = pagination , search_bookmarks_form = forms . SearchBookmarksForm ( ) , create_bookmarks_form = create_bookmarks_form , url_render_mode = url_render_mode , ) elif request . method == 'POST' : url_data = create_bookmarks_form . url . data result_flag = bukudb . add_rec ( url_data , create_bookmarks_form . title . data , create_bookmarks_form . tags . data , create_bookmarks_form . description . data ) if request . path . startswith ( '/api/' ) : res = [ jsonify ( response . response_template [ 'success' ] ) , status . HTTP_200_OK , { 'ContentType' : 'application/json' } ] if result_flag != - 1 else [ jsonify ( response . response_template [ 'failure' ] ) , status . HTTP_400_BAD_REQUEST , { 'ContentType' : 'application/json' } ] else : bm_text = '[<a href="{0}">{0}</a>]' . format ( url_data ) if result_flag != - 1 : flash ( Markup ( 'Success creating bookmark {}.' . format ( bm_text ) ) , 'success' ) else : flash ( Markup ( 'Failed creating bookmark {}.' . format ( bm_text ) ) , 'danger' ) return redirect ( url_for ( 'bookmarks-html' ) ) elif request . method == 'DELETE' : result_flag = bukudb . cleardb ( ) res = [ jsonify ( response . response_template [ 'success' ] ) , status . HTTP_200_OK , { 'ContentType' : 'application/json' } ] if result_flag else [ jsonify ( response . response_template [ 'failure' ] ) , status . HTTP_400_BAD_REQUEST , { 'ContentType' : 'application/json' } ] return res
def spiceErrorCheck ( f ) : """Decorator for spiceypy hooking into spice error system . If an error is detected , an output similar to outmsg : type f : builtins . function : return : : rtype :"""
@ functools . wraps ( f ) def with_errcheck ( * args , ** kwargs ) : try : res = f ( * args , ** kwargs ) checkForSpiceError ( f ) return res except : raise return with_errcheck
def parse ( self , text ) : """Parse text to obtain list of Segments"""
text = self . preprocess ( text ) token_stack = [ ] last_pos = 0 # Iterate through all matched tokens for match in self . regex . finditer ( text ) : # Find which token has been matched by regex token , match_type , group = self . get_matched_token ( match ) # Get params from stack of tokens params = self . get_params ( token_stack ) # Should we skip interpreting tokens ? skip = token_stack [ - 1 ] . skip if token_stack else False # Check for end token first if match_type == MatchType . end : if not skip or token_stack [ - 1 ] == token : removed = self . remove_token ( token_stack , token ) if removed : skip = False else : skip = True if not skip : # Append text preceding matched token start_pos = match . start ( group ) if start_pos > last_pos : yield Segment ( self . postprocess ( text [ last_pos : start_pos ] ) , ** params ) # Actions specific for start token or single token if match_type == MatchType . start : token_stack . append ( token ) elif match_type == MatchType . single : single_params = params . copy ( ) single_params . update ( token . params ) single_text = token . text if token . text is not None else match . group ( group ) yield Segment ( single_text , token = token , match = match , ** single_params ) # Move last position pointer to the end of matched token last_pos = match . end ( group ) # Append anything that ' s left if last_pos < len ( text ) : params = self . get_params ( token_stack ) yield Segment ( self . postprocess ( text [ last_pos : ] ) , ** params )
def hook ( * hook_patterns ) : """Register the decorated function to run when the current hook matches any of the ` ` hook _ patterns ` ` . This decorator is generally deprecated and should only be used when absolutely necessary . The hook patterns can use the ` ` { interface : . . . } ` ` and ` ` { A , B , . . . } ` ` syntax supported by : func : ` ~ charms . reactive . bus . any _ hook ` . Note that hook decorators * * cannot * * be combined with : func : ` when ` or : func : ` when _ not ` decorators ."""
def _register ( action ) : def arg_gen ( ) : # use a generator to defer calling of hookenv . relation _ type , for tests rel = endpoint_from_name ( hookenv . relation_type ( ) ) if rel : yield rel handler = Handler . get ( action ) handler . add_predicate ( partial ( _hook , hook_patterns ) ) handler . add_args ( arg_gen ( ) ) return action return _register
def page ( self , course ) : """Get all data and display the page"""
if not self . webdav_host : raise web . notfound ( ) url = self . webdav_host + "/" + course . get_id ( ) username = self . user_manager . session_username ( ) apikey = self . user_manager . session_api_key ( ) return self . template_helper . get_renderer ( ) . course_admin . webdav ( course , url , username , apikey )
def connected_outports ( self ) : '''The list of all output ports belonging to this component that are connected to one or more other ports .'''
return [ p for p in self . ports if p . __class__ . __name__ == 'DataOutPort' and p . is_connected ]
def stop ( self ) : """Stop the running task"""
if self . _thread is not None and self . _thread . isAlive ( ) : self . _done . set ( )
def run ( program , * args , ** kwargs ) : """Run ' program ' with ' args '"""
args = flattened ( args , split = SHELL ) full_path = which ( program ) logger = kwargs . pop ( "logger" , LOG . debug ) fatal = kwargs . pop ( "fatal" , True ) dryrun = kwargs . pop ( "dryrun" , is_dryrun ( ) ) include_error = kwargs . pop ( "include_error" , False ) message = "Would run" if dryrun else "Running" message = "%s: %s %s" % ( message , short ( full_path or program ) , represented_args ( args ) ) if logger : logger ( message ) if dryrun : return message if not full_path : return abort ( "%s is not installed" , short ( program ) , fatal = fatal ) stdout = kwargs . pop ( "stdout" , subprocess . PIPE ) stderr = kwargs . pop ( "stderr" , subprocess . PIPE ) args = [ full_path ] + args try : path_env = kwargs . pop ( "path_env" , None ) if path_env : kwargs [ "env" ] = added_env_paths ( path_env , env = kwargs . get ( "env" ) ) p = subprocess . Popen ( args , stdout = stdout , stderr = stderr , ** kwargs ) # nosec output , err = p . communicate ( ) output = decode ( output , strip = True ) err = decode ( err , strip = True ) if p . returncode and fatal is not None : note = ": %s\n%s" % ( err , output ) if output or err else "" message = "%s exited with code %s%s" % ( short ( program ) , p . returncode , note . strip ( ) ) return abort ( message , fatal = fatal ) if include_error and err : output = "%s\n%s" % ( output , err ) return output and output . strip ( ) except Exception as e : return abort ( "%s failed: %s" , short ( program ) , e , exc_info = e , fatal = fatal )
def from_record ( cls , record , crs ) : """Load vector from record ."""
if 'type' not in record : raise TypeError ( "The data isn't a valid record." ) return cls ( to_shape ( record ) , crs )
def schema ( self ) : """List [ : class : ` ~ google . cloud . bigquery . schema . SchemaField ` ] : The schema for the data . See https : / / cloud . google . com / bigquery / docs / reference / rest / v2 / jobs # configuration . query . tableDefinitions . ( key ) . schema https : / / cloud . google . com / bigquery / docs / reference / rest / v2 / tables # externalDataConfiguration . schema"""
prop = self . _properties . get ( "schema" , { } ) return [ SchemaField . from_api_repr ( field ) for field in prop . get ( "fields" , [ ] ) ]
def _gather_files ( app , hidden , filepath_filter_regex = None ) : """Gets all files in static folders and returns in dict ."""
dirs = [ ( six . text_type ( app . static_folder ) , app . static_url_path ) ] if hasattr ( app , 'blueprints' ) : blueprints = app . blueprints . values ( ) bp_details = lambda x : ( x . static_folder , _bp_static_url ( x ) ) dirs . extend ( [ bp_details ( x ) for x in blueprints if x . static_folder ] ) valid_files = defaultdict ( list ) for static_folder , static_url_loc in dirs : if not os . path . isdir ( static_folder ) : logger . warning ( "WARNING - [%s does not exist]" % static_folder ) else : logger . debug ( "Checking static folder: %s" % static_folder ) for root , _ , files in os . walk ( static_folder ) : relative_folder = re . sub ( r'^/' , '' , root . replace ( static_folder , '' ) ) files = [ os . path . join ( root , x ) for x in files if ( ( hidden or x [ 0 ] != '.' ) and # Skip this file if the filter regex is # defined , and this file ' s path is a # negative match . ( filepath_filter_regex == None or re . search ( filepath_filter_regex , os . path . join ( relative_folder , x ) ) ) ) ] if files : valid_files [ ( static_folder , static_url_loc ) ] . extend ( files ) return valid_files
def parse ( cls , args ) : """Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args : ` args ` : sequence of arguments Returns : Dictionary that can be used in create method Raises : ParseError : when the arguments are not correct"""
parsed = { } try : ( options , args ) = cls . optparser . parse_args ( args ) except OptionParsingError as e : raise ParseError ( e . msg , cls . optparser . format_help ( ) ) except OptionParsingExit as e : return None parsed [ 'label' ] = options . label parsed [ 'can_notify' ] = options . can_notify parsed [ 'name' ] = options . name parsed [ 'tags' ] = options . tags parsed [ "command_type" ] = "HadoopCommand" parsed [ 'print_logs' ] = options . print_logs parsed [ 'print_logs_live' ] = options . print_logs_live parsed [ 'pool' ] = options . pool if len ( args ) < 2 : raise ParseError ( "Need at least two arguments" , cls . usage ) subcmd = args . pop ( 0 ) if subcmd not in cls . subcmdlist : raise ParseError ( "First argument must be one of <%s>" % "|" . join ( cls . subcmdlist ) ) parsed [ "sub_command" ] = subcmd parsed [ "sub_command_args" ] = " " . join ( "'" + str ( a ) + "'" for a in args ) return parsed
def iteritems_sorted ( dict_ ) : """change to iteritems ordered"""
if isinstance ( dict_ , OrderedDict ) : return six . iteritems ( dict_ ) else : return iter ( sorted ( six . iteritems ( dict_ ) ) )
def right_shift_blockwise ( x , query_shape , name = None ) : """Right shifts once in every block . Args : x : a tensor of shape [ batch , height , width , depth ] query _ shape : A 2d tuple of ints name : a string Returns : output : a tensor of the same shape as x"""
with tf . variable_scope ( name , default_name = "right_shift_blockwise" , values = [ x ] ) : x_list_shape = x . get_shape ( ) . as_list ( ) x_shape = common_layers . shape_list ( x ) # Add a dummy dimension for heads . x = tf . expand_dims ( x , axis = 1 ) x = pad_to_multiple_2d ( x , query_shape ) padded_x_shape = common_layers . shape_list ( x ) # Set up q blocks . x_indices = gather_indices_2d ( x , query_shape , query_shape ) x_new = get_shifted_center_blocks ( x , x_indices ) # Put representations back into original shapes . output = scatter_blocks_2d ( x_new , x_indices , padded_x_shape ) # Remove the dummy head dimension . output = tf . squeeze ( output , axis = 1 ) # Remove the padding if introduced . output = tf . slice ( output , [ 0 , 0 , 0 , 0 ] , [ - 1 , x_shape [ 1 ] , x_shape [ 2 ] , - 1 ] ) output . set_shape ( x_list_shape ) return output
def update_device ( self , device_id , ** kwargs ) : """Update existing device in catalog . . . code - block : : python existing _ device = api . get _ device ( . . . ) updated _ device = api . update _ device ( existing _ device . id , certificate _ fingerprint = " something new " : param str device _ id : The ID of the device to update ( Required ) : param obj custom _ attributes : Up to 5 custom JSON attributes : param str description : The description of the device : param str name : The name of the device : param str alias : The alias of the device : param str device _ type : The endpoint type of the device - e . g . if the device is a gateway : param str host _ gateway : The endpoint _ name of the host gateway , if appropriate : param str certificate _ fingerprint : Fingerprint of the device certificate : param str certificate _ issuer _ id : ID of the issuer of the certificate : returns : the updated device object : rtype : Device"""
api = self . _get_api ( device_directory . DefaultApi ) device = Device . _create_request_map ( kwargs ) body = DeviceDataPostRequest ( ** device ) return Device ( api . device_update ( device_id , body ) )
def play ( env , transpose = True , fps = 30 , nop_ = 0 ) : """Play the game using the keyboard as a human . Args : env ( gym . Env ) : the environment to use for playing transpose ( bool ) : whether to transpose frame before viewing them fps ( int ) : number of steps of the environment to execute every second nop _ ( any ) : the object to use as a null op action for the environment Returns : None"""
# ensure the observation space is a box of pixels assert isinstance ( env . observation_space , gym . spaces . box . Box ) # ensure the observation space is either B & W pixels or RGB Pixels obs_s = env . observation_space is_bw = len ( obs_s . shape ) == 2 is_rgb = len ( obs_s . shape ) == 3 and obs_s . shape [ 2 ] in [ 1 , 3 ] assert is_bw or is_rgb # get the mapping of keyboard keys to actions in the environment if hasattr ( env , 'get_keys_to_action' ) : keys_to_action = env . get_keys_to_action ( ) # get the mapping of keyboard keys to actions in the unwrapped environment elif hasattr ( env . unwrapped , 'get_keys_to_action' ) : keys_to_action = env . unwrapped . get_keys_to_action ( ) else : raise ValueError ( 'env has no get_keys_to_action method' ) relevant_keys = set ( sum ( map ( list , keys_to_action . keys ( ) ) , [ ] ) ) # determine the size of the video in pixels video_size = env . observation_space . shape [ 0 ] , env . observation_space . shape [ 1 ] if transpose : video_size = tuple ( reversed ( video_size ) ) # generate variables to determine the running state of the game pressed_keys = [ ] running = True env_done = True # setup the screen using pygame flags = pygame . RESIZABLE | pygame . HWSURFACE | pygame . DOUBLEBUF screen = pygame . display . set_mode ( video_size , flags ) pygame . event . set_blocked ( pygame . MOUSEMOTION ) # set the caption for the pygame window . if the env has a spec use its id if env . spec is not None : pygame . display . set_caption ( env . spec . id ) # otherwise just use the default nes - py caption else : pygame . display . set_caption ( 'nes-py' ) # start a clock for limiting the frame rate to the given FPS clock = pygame . time . Clock ( ) # start the main game loop while running : # reset if the environment is done if env_done : env_done = False obs = env . reset ( ) # otherwise take a normal step else : # unwrap the action based on pressed relevant keys action = keys_to_action . get ( tuple ( sorted ( pressed_keys ) ) , nop_ ) obs , rew , env_done , info = env . step ( action ) # make sure the observation exists if obs is not None : # if the observation is just height and width ( B & W ) if len ( obs . shape ) == 2 : # add a dummy channel for pygame display obs = obs [ : , : , None ] # if the observation is single channel ( B & W ) if obs . shape [ 2 ] == 1 : # repeat the single channel 3 times for RGB encoding of B & W obs = obs . repeat ( 3 , axis = 2 ) # display the observation on the pygame screen display_arr ( screen , obs , video_size , transpose ) # process keyboard events for event in pygame . event . get ( ) : # handle a key being pressed if event . type == pygame . KEYDOWN : # make sure the key is in the relevant key list if event . key in relevant_keys : # add the key to pressed keys pressed_keys . append ( event . key ) # ASCII code 27 is the " escape " key elif event . key == 27 : running = False # handle the backup and reset functions elif event . key == ord ( 'e' ) : env . unwrapped . _backup ( ) elif event . key == ord ( 'r' ) : env . unwrapped . _restore ( ) # handle a key being released elif event . type == pygame . KEYUP : # make sure the key is in the relevant key list if event . key in relevant_keys : # remove the key from the pressed keys pressed_keys . remove ( event . key ) # if the event is quit , set running to False elif event . type == pygame . QUIT : running = False # flip the pygame screen pygame . display . flip ( ) # throttle to maintain the framerate clock . tick ( fps ) # quite the pygame setup pygame . quit ( )
def _xfs_estimate_output ( out ) : '''Parse xfs _ estimate output .'''
spc = re . compile ( r"\s+" ) data = { } for line in [ l for l in out . split ( "\n" ) if l . strip ( ) ] [ 1 : ] : directory , bsize , blocks , megabytes , logsize = spc . sub ( " " , line ) . split ( " " ) data [ directory ] = { 'block _size' : bsize , 'blocks' : blocks , 'megabytes' : megabytes , 'logsize' : logsize , } return data
def is_linear ( self ) : """Tests whether all filters in the list are linear . CascadeFilter and ParallelFilter instances are also linear if all filters they group are linear ."""
return all ( isinstance ( filt , LinearFilter ) or ( hasattr ( filt , "is_linear" ) and filt . is_linear ( ) ) for filt in self . callables )
def replace ( self ) : """Replace index with a new one zero _ downtime _ index for safety and rollback"""
with zero_downtime_index ( self . alias_name , self . index_config ( ) ) as target_index : self . index_all ( target_index )
def connect_to ( self , vertex , weight = 1 ) : """Connect this vertex to another one . Args : vertex ( Vertex ) : vertex to connect to . weight ( int ) : weight of the edge . Returns : Edge : the newly created edge ."""
for edge in self . edges_out : if vertex == edge . vertex_in : return edge return Edge ( self , vertex , weight )
def _search ( self , base , fltr , attrs = None , scope = ldap . SCOPE_SUBTREE ) : """Perform LDAP search"""
try : results = self . _conn . search_s ( base , scope , fltr , attrs ) except Exception as e : log . exception ( self . _get_ldap_msg ( e ) ) results = False return results
def get_raw_counts ( self ) : """Determines counts for unique words , repetitions , etc using the raw text response . Adds the following measures to the self . measures dictionary : - COUNT _ total _ words : count of words ( i . e . utterances with semantic content ) spoken by the subject . Filled pauses , silences , coughs , breaths , words by the interviewer , etc . are all excluded from this count . - COUNT _ permissible _ words : Number of words spoken by the subject that qualify as a valid response according to the clustering criteria . Compound words are counted as a single word in SEMANTIC clustering , but as two words in PHONETIC clustering . This is implemented by tokenizing SEMANTIC clustering responses in the _ _ init _ _ method before calling the current method . - COUNT _ exact _ repetitions : Number of words which repeat words spoken earlier in the response . Responses in SEMANTIC clustering are lemmatized before this function is called , so slight variations ( dog , dogs ) may be counted as exact responses . - COUNT _ stem _ repetitions : Number of words stems identical to words uttered earlier in the response , according to the Porter Stemmer . For example , ' sled ' and ' sledding ' have the same stem ( ' sled ' ) , and ' sledding ' would be counted as a stem repetition . - COUNT _ examiner _ words : Number of words uttered by the examiner . These start with " E _ " in . TextGrid files . - COUNT _ filled _ pauses : Number of filled pauses uttered by the subject . These begin with " FILLEDPAUSE _ " in the . TextGrid file . - COUNT _ word _ fragments : Number of word fragments uttered by the subject . These end with " - " in the . TextGrid file . - COUNT _ asides : Words spoken by the subject that do not adhere to the test criteria are counted as asides , i . e . words that do not start with the appropriate letter or that do not represent an animal . - COUNT _ unique _ permissible _ words : Number of works spoken by the subject , less asides , stem repetitions and exact repetitions ."""
# for making the table at the end words = [ ] labels = [ ] words_said = set ( ) # Words like " polar _ bear " as one semantically but two phonetically # Uncategorizable words are counted as asides for unit in self . parsed_response : word = unit . text test = False if self . type == "PHONETIC" : test = ( word . startswith ( self . letter ) and "T_" not in word and "E_" not in word and "!" not in word and # Weed out tags "FILLEDPAUSE_" not in word and # Weed out filled pauses not word . endswith ( '-' ) and # Weed out false starts word . lower ( ) in self . english_words ) # weed out non - words elif self . type == "SEMANTIC" : # automatically weed out all non - semantically - appropriate responses test = ( word in self . permissible_words ) if test : self . measures [ 'COUNT_total_words' ] += 1 self . measures [ 'COUNT_permissible_words' ] += 1 if any ( word == w for w in words_said ) : self . measures [ 'COUNT_exact_repetitions' ] += 1 labels . append ( 'EXACT REPETITION' ) elif any ( stemmer . stem ( word ) == stemmer . stem ( w ) for w in words_said ) : self . measures [ 'COUNT_stem_repetitions' ] += 1 labels . append ( 'STEM REPETITION' ) else : labels . append ( 'PERMISSIBLE WORD' ) words_said . add ( word ) words . append ( word ) elif word . lower ( ) . startswith ( 'e_' ) : self . measures [ 'COUNT_examiner_words' ] += 1 words . append ( word ) labels . append ( 'EXAMINER WORD' ) elif word . endswith ( '-' ) : self . measures [ 'COUNT_word_fragments' ] += 1 words . append ( word ) labels . append ( 'WORD FRAGMENT' ) elif word . lower ( ) . startswith ( 'filledpause' ) : self . measures [ 'COUNT_filled_pauses' ] += 1 words . append ( word ) labels . append ( 'FILLED PAUSE' ) elif word . lower ( ) not in [ '!sil' , 't_noise' , 't_cough' , 't_lipsmack' , 't_breath' ] : self . measures [ 'COUNT_total_words' ] += 1 self . measures [ 'COUNT_asides' ] += 1 words . append ( word ) labels . append ( 'ASIDE' ) if not self . quiet : print print "Labels:" print_table ( [ ( word , label ) for word , label in zip ( words , labels ) ] ) self . measures [ 'COUNT_unique_permissible_words' ] = self . measures [ 'COUNT_permissible_words' ] - self . measures [ 'COUNT_exact_repetitions' ] - self . measures [ 'COUNT_stem_repetitions' ] if not self . quiet : print print "Counts:" collection_measures = [ x for x in self . measures if x . startswith ( "COUNT_" ) ] collection_measures . sort ( ) if not self . quiet : print_table ( [ ( k , str ( self . measures [ k ] ) ) for k in collection_measures ] )
def transform ( data , keysToSplit = [ ] ) : """Transform a SPARQL json result by : 1 ) outputing only { key : value } , removing datatype 2 ) for some keys , transform them into array based on SEPARATOR"""
transformed = { } for key in data : if key in keysToSplit : transformed [ key ] = data [ key ] [ 'value' ] . split ( SEPARATOR ) else : transformed [ key ] = data [ key ] [ 'value' ] return transformed
def get_attachment_formset_kwargs ( self ) : """Returns the keyword arguments for instantiating the attachment formset ."""
kwargs = { 'prefix' : 'attachment' , } if self . request . method in ( 'POST' , 'PUT' ) : kwargs . update ( { 'data' : self . request . POST , 'files' : self . request . FILES , } ) else : post = self . get_post ( ) attachment_queryset = Attachment . objects . filter ( post = post ) kwargs . update ( { 'queryset' : attachment_queryset , } ) return kwargs
def _check_and_uninstall_ruby ( ret , ruby , user = None ) : '''Verify that ruby is uninstalled'''
ret = _ruby_installed ( ret , ruby , user = user ) if ret [ 'result' ] : if ret [ 'default' ] : __salt__ [ 'rbenv.default' ] ( 'system' , runas = user ) if __salt__ [ 'rbenv.uninstall_ruby' ] ( ruby , runas = user ) : ret [ 'result' ] = True ret [ 'changes' ] [ ruby ] = 'Uninstalled' ret [ 'comment' ] = 'Successfully removed ruby' return ret else : ret [ 'result' ] = False ret [ 'comment' ] = 'Failed to uninstall ruby' return ret else : ret [ 'result' ] = True ret [ 'comment' ] = 'Ruby {0} is already absent' . format ( ruby ) return ret
def _supervised_evaluation_error_checking ( targets , predictions ) : """Perform basic error checking for the evaluation metrics . Check types and sizes of the inputs ."""
_raise_error_if_not_sarray ( targets , "targets" ) _raise_error_if_not_sarray ( predictions , "predictions" ) if ( len ( targets ) != len ( predictions ) ) : raise _ToolkitError ( "Input SArrays 'targets' and 'predictions' must be of the same length." )
def _handleSmsReceived ( self , notificationLine ) : """Handler for " new SMS " unsolicited notification line"""
self . log . debug ( 'SMS message received' ) cmtiMatch = self . CMTI_REGEX . match ( notificationLine ) if cmtiMatch : msgMemory = cmtiMatch . group ( 1 ) msgIndex = cmtiMatch . group ( 2 ) sms = self . readStoredSms ( msgIndex , msgMemory ) self . deleteStoredSms ( msgIndex ) self . smsReceivedCallback ( sms )
def fit ( self , matrix , epochs = 5 , no_threads = 2 , verbose = False ) : """Estimate the word embeddings . Parameters : - scipy . sparse . coo _ matrix matrix : coocurrence matrix - int epochs : number of training epochs - int no _ threads : number of training threads - bool verbose : print progress messages if True"""
shape = matrix . shape if ( len ( shape ) != 2 or shape [ 0 ] != shape [ 1 ] ) : raise Exception ( 'Coocurrence matrix must be square' ) if not sp . isspmatrix_coo ( matrix ) : raise Exception ( 'Coocurrence matrix must be in the COO format' ) random_state = check_random_state ( self . random_state ) self . word_vectors = ( ( random_state . rand ( shape [ 0 ] , self . no_components ) - 0.5 ) / self . no_components ) self . word_biases = np . zeros ( shape [ 0 ] , dtype = np . float64 ) self . vectors_sum_gradients = np . ones_like ( self . word_vectors ) self . biases_sum_gradients = np . ones_like ( self . word_biases ) shuffle_indices = np . arange ( matrix . nnz , dtype = np . int32 ) if verbose : print ( 'Performing %s training epochs ' 'with %s threads' % ( epochs , no_threads ) ) for epoch in range ( epochs ) : if verbose : print ( 'Epoch %s' % epoch ) # Shuffle the coocurrence matrix random_state . shuffle ( shuffle_indices ) fit_vectors ( self . word_vectors , self . vectors_sum_gradients , self . word_biases , self . biases_sum_gradients , matrix . row , matrix . col , matrix . data , shuffle_indices , self . learning_rate , self . max_count , self . alpha , self . max_loss , int ( no_threads ) ) if not np . isfinite ( self . word_vectors ) . all ( ) : raise Exception ( 'Non-finite values in word vectors. ' 'Try reducing the learning rate or the ' 'max_loss parameter.' )
def extend ( self , iterable ) : """Add each item from iterable to the end of the list"""
with self . lock : for item in iterable : self . append ( item )
def list_settings ( self ) : """Get list of all appropriate settings and their default values . The returned list is then used in setup ( ) and get _ setup ( ) methods to setup the widget internal settings ."""
return [ ( self . SETTING_FLAG_PLAIN , False ) , ( self . SETTING_FLAG_ASCII , False ) , ( self . SETTING_WIDTH , 0 ) , ( self . SETTING_ALIGN , '<' ) , ( self . SETTING_TEXT_FORMATING , { } ) , ( self . SETTING_DATA_FORMATING , '{:s}' ) , ( self . SETTING_DATA_TYPE , None ) , ( self . SETTING_PADDING , None ) , ( self . SETTING_PADDING_CHAR , ' ' ) , ( self . SETTING_PADDING_LEFT , None ) , ( self . SETTING_PADDING_RIGHT , None ) , ( self . SETTING_MARGIN , None ) , ( self . SETTING_MARGIN_CHAR , ' ' ) , ( self . SETTING_MARGIN_LEFT , None ) , ( self . SETTING_MARGIN_RIGHT , None ) , ]
def delete_namespaced_custom_object ( self , group , version , namespace , plural , name , body , ** kwargs ) : """Deletes the specified namespace scoped custom object This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . delete _ namespaced _ custom _ object ( group , version , namespace , plural , name , body , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str group : the custom resource ' s group ( required ) : param str version : the custom resource ' s version ( required ) : param str namespace : The custom resource ' s namespace ( required ) : param str plural : the custom resource ' s plural name . For TPRs this would be lowercase plural kind . ( required ) : param str name : the custom object ' s name ( required ) : param V1DeleteOptions body : ( required ) : param int grace _ period _ seconds : The duration in seconds before the object should be deleted . Value must be non - negative integer . The value zero indicates delete immediately . If this value is nil , the default grace period for the specified type will be used . Defaults to a per object value if not specified . zero means delete immediately . : param bool orphan _ dependents : Deprecated : please use the PropagationPolicy , this field will be deprecated in 1.7 . Should the dependent objects be orphaned . If true / false , the \" orphan \" finalizer will be added to / removed from the object ' s finalizers list . Either this field or PropagationPolicy may be set , but not both . : param str propagation _ policy : Whether and how garbage collection will be performed . Either this field or OrphanDependents may be set , but not both . The default policy is decided by the existing finalizer set in the metadata . finalizers and the resource - specific default policy . : return : object If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . delete_namespaced_custom_object_with_http_info ( group , version , namespace , plural , name , body , ** kwargs ) else : ( data ) = self . delete_namespaced_custom_object_with_http_info ( group , version , namespace , plural , name , body , ** kwargs ) return data
def _assign_par_snps ( self ) : """Assign PAR SNPs to the X or Y chromosome using SNP position . References . . [ 1 ] National Center for Biotechnology Information , Variation Services , RefSNP , https : / / api . ncbi . nlm . nih . gov / variation / v0/ . . [ 2 ] Yates et . al . ( doi : 10.1093 / bioinformatics / btu613 ) , http : / / europepmc . org / search / ? query = DOI : 10.1093 / bioinformatics / btu613 . . [ 3 ] Zerbino et . al . ( doi . org / 10.1093 / nar / gkx1098 ) , https : / / doi . org / 10.1093 / nar / gkx1098 . . [ 4 ] Sherry ST , Ward MH , Kholodov M , Baker J , Phan L , Smigielski EM , Sirotkin K . dbSNP : the NCBI database of genetic variation . Nucleic Acids Res . 2001 Jan 1; 29(1 ) : 308-11. . . [ 5 ] Database of Single Nucleotide Polymorphisms ( dbSNP ) . Bethesda ( MD ) : National Center for Biotechnology Information , National Library of Medicine . dbSNP accession : rs28736870, rs113313554 , and rs758419898 ( dbSNP Build ID : 151 ) . Available from : http : / / www . ncbi . nlm . nih . gov / SNP /"""
rest_client = EnsemblRestClient ( server = "https://api.ncbi.nlm.nih.gov" ) for rsid in self . snps . loc [ self . snps [ "chrom" ] == "PAR" ] . index . values : if "rs" in rsid : try : id = rsid . split ( "rs" ) [ 1 ] response = rest_client . perform_rest_action ( "/variation/v0/beta/refsnp/" + id ) if response is not None : for item in response [ "primary_snapshot_data" ] [ "placements_with_allele" ] : if "NC_000023" in item [ "seq_id" ] : assigned = self . _assign_snp ( rsid , item [ "alleles" ] , "X" ) elif "NC_000024" in item [ "seq_id" ] : assigned = self . _assign_snp ( rsid , item [ "alleles" ] , "Y" ) else : assigned = False if assigned : if not self . build_detected : self . build = self . _extract_build ( item ) self . build_detected = True continue except Exception as err : print ( err )
def put ( self , name , values , request = None , process = None , wait = None , get = True ) : """Write a new value of some number of PVs . : param name : A single name string or list of name strings : param values : A single value , a list of values , a dict , a ` Value ` . May be modified by the constructor nt = argument . : param request : A : py : class : ` p4p . Value ` or string to qualify this request , or None to use a default . : param str process : Control remote processing . May be ' true ' , ' false ' , ' passive ' , or None . : param bool wait : Wait for all server processing to complete . : param bool get : Whether to do a Get before the Put . If True then the value passed to the builder callable will be initialized with recent PV values . eg . use this with NTEnum to find the enumeration list . When invoked with a single name then returns is a single value . When invoked with a list of name , then returns a list of values If ' wait ' or ' process ' is specified , then ' request ' must be omitted or None . : : with Context ( ' pva ' ) as ctxt : yield from ctxt . put ( ' pv : name ' , 5.0) yield from ctxt . put ( [ ' pv : 1 ' , ' pv : 2 ' ] , [ 1.0 , 2.0 ] ) yield from ctxt . put ( ' pv : name ' , { ' value ' : 5 } ) The provided value ( s ) will be automatically coerced to the target type . If this is not possible then an Exception is raised / returned . Unless the provided value is a dict , it is assumed to be a plain value and an attempt is made to store it in ' . value ' field ."""
if request and ( process or wait is not None ) : raise ValueError ( "request= is mutually exclusive to process= or wait=" ) elif process or wait is not None : request = 'field()record[block=%s,process=%s]' % ( 'true' if wait else 'false' , process or 'passive' ) singlepv = isinstance ( name , ( bytes , str ) ) if singlepv : return ( yield from self . _put_one ( name , values , request = request , get = get ) ) elif request is None : request = [ None ] * len ( name ) assert len ( name ) == len ( request ) , ( name , request ) assert len ( name ) == len ( values ) , ( name , values ) futs = [ self . _put_one ( N , V , request = R , get = get ) for N , V , R in zip ( name , values , request ) ] yield from asyncio . gather ( * futs , loop = self . loop )
def _check_extension_shape ( self , Y ) : """Private method to check if new data matches ` self . data ` Parameters Y : array - like , shape = [ n _ samples _ y , n _ features _ y ] Input data Returns Y : array - like , shape = [ n _ samples _ y , n _ pca ] ( Potentially transformed ) input data Raises ValueError : if ` n _ features _ y ` is not either ` self . data . shape [ 1 ] ` or ` self . n _ pca ` ."""
if len ( Y . shape ) != 2 : raise ValueError ( "Expected a 2D matrix. Y has shape {}" . format ( Y . shape ) ) if not Y . shape [ 1 ] == self . data_nu . shape [ 1 ] : # try PCA transform if Y . shape [ 1 ] == self . data . shape [ 1 ] : Y = self . transform ( Y ) else : # wrong shape if self . data . shape [ 1 ] != self . data_nu . shape [ 1 ] : # PCA is possible msg = ( "Y must be of shape either " "(n, {}) or (n, {})" ) . format ( self . data . shape [ 1 ] , self . data_nu . shape [ 1 ] ) else : # no PCA , only one choice of shape msg = "Y must be of shape (n, {})" . format ( self . data . shape [ 1 ] ) raise ValueError ( msg ) return Y
def ensure_permissions ( path , user , group , permissions , maxdepth = - 1 ) : """Ensure permissions for path . If path is a file , apply to file and return . If path is a directory , apply recursively ( if required ) to directory contents and return . : param user : user name : param group : group name : param permissions : octal permissions : param maxdepth : maximum recursion depth . A negative maxdepth allows infinite recursion and maxdepth = 0 means no recursion . : returns : None"""
if not os . path . exists ( path ) : log ( "File '%s' does not exist - cannot set permissions" % ( path ) , level = WARNING ) return _user = pwd . getpwnam ( user ) os . chown ( path , _user . pw_uid , grp . getgrnam ( group ) . gr_gid ) os . chmod ( path , permissions ) if maxdepth == 0 : log ( "Max recursion depth reached - skipping further recursion" , level = DEBUG ) return elif maxdepth > 0 : maxdepth -= 1 if os . path . isdir ( path ) : contents = glob . glob ( "%s/*" % ( path ) ) for c in contents : ensure_permissions ( c , user = user , group = group , permissions = permissions , maxdepth = maxdepth )
def hexdigest ( self ) : """Terminate and return digest in HEX form . Like digest ( ) except the digest is returned as a string of length 32 , containing only hexadecimal digits . This may be used to exchange the value safely in email or other non - binary environments ."""
d = map ( None , self . digest ( ) ) d = map ( ord , d ) d = map ( lambda x : "%02x" % x , d ) d = string . join ( d , '' ) return d
def _set_syslog_server ( self , v , load = False ) : """Setter method for syslog _ server , mapped from YANG variable / logging / syslog _ server ( list ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ syslog _ server is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ syslog _ server ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = YANGListType ( "syslogip use_vrf" , syslog_server . syslog_server , yang_name = "syslog-server" , rest_name = "syslog-server" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'syslogip use-vrf' , extensions = { u'tailf-common' : { u'cli-compact-syntax' : None , u'cli-suppress-list-no' : None , u'callpoint' : u'RASSingleCallPoint' , u'info' : u'Configure upto 4 syslog-server address.' } } ) , is_container = 'list' , yang_name = "syslog-server" , rest_name = "syslog-server" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-compact-syntax' : None , u'cli-suppress-list-no' : None , u'callpoint' : u'RASSingleCallPoint' , u'info' : u'Configure upto 4 syslog-server address.' } } , namespace = 'urn:brocade.com:mgmt:brocade-ras' , defining_module = 'brocade-ras' , yang_type = 'list' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """syslog_server must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("syslogip use_vrf",syslog_server.syslog_server, yang_name="syslog-server", rest_name="syslog-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='syslogip use-vrf', extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-suppress-list-no': None, u'callpoint': u'RASSingleCallPoint', u'info': u'Configure upto 4 syslog-server address.'}}), is_container='list', yang_name="syslog-server", rest_name="syslog-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-suppress-list-no': None, u'callpoint': u'RASSingleCallPoint', u'info': u'Configure upto 4 syslog-server address.'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='list', is_config=True)""" , } ) self . __syslog_server = t if hasattr ( self , '_set' ) : self . _set ( )
def meta ( r ) : """Convert 60 character string ` r ` , the metadata from an image file . Returns a 5 - tuple ( * chan * , * minx * , * miny * , * limx * , * limy * ) . 5 - tuples may settle into lists in transit . As per http : / / plan9 . bell - labs . com / magic / man2html / 6 / image the metadata comprises 5 words separated by blanks . As it happens each word starts at an index that is a multiple of 12 , but this routine does not care about that ."""
r = r . split ( ) # : todo : raise FormatError assert len ( r ) == 5 r = [ r [ 0 ] ] + map ( int , r [ 1 : ] ) return r
async def issuer_create_schema ( issuer_did : str , name : str , version : str , attrs : str ) -> ( str , str ) : """Create credential schema entity that describes credential attributes list and allows credentials interoperability . Schema is public and intended to be shared with all anoncreds workflow actors usually by publishing SCHEMA transaction to Indy distributed ledger . It is IMPORTANT for current version POST Schema in Ledger and after that GET it from Ledger with correct seq _ no to save compatibility with Ledger . After that can call indy _ issuer _ create _ and _ store _ credential _ def to build corresponding Credential Definition . : param issuer _ did : DID of schema issuer : param name : a name the schema : param version : a version of the schema : param attrs : a list of schema attributes descriptions ( the number of attributes should be less or equal than 125) : return : schema _ id : identifier of created schema schema _ json : schema as json"""
logger = logging . getLogger ( __name__ ) logger . debug ( "issuer_create_schema: >>> issuer_did: %r, name: %r, version: %r, attrs: %r" , issuer_did , name , version , attrs ) if not hasattr ( issuer_create_schema , "cb" ) : logger . debug ( "issuer_create_schema: Creating callback" ) issuer_create_schema . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p , c_char_p ) ) c_issuer_did = c_char_p ( issuer_did . encode ( 'utf-8' ) ) c_name = c_char_p ( name . encode ( 'utf-8' ) ) c_version = c_char_p ( version . encode ( 'utf-8' ) ) c_attrs = c_char_p ( attrs . encode ( 'utf-8' ) ) ( schema_id , schema_json ) = await do_call ( 'indy_issuer_create_schema' , c_issuer_did , c_name , c_version , c_attrs , issuer_create_schema . cb ) res = ( schema_id . decode ( ) , schema_json . decode ( ) ) logger . debug ( "issuer_create_schema: <<< res: %r" , res ) return res
def pub ( self , topic , message ) : '''Publish a message to a topic'''
return self . post ( 'pub' , params = { 'topic' : topic } , data = message )
def message_length ( message ) : '''message _ length returns visual length of message . Ascii chars are counted as 1 , non - asciis are 2. : param str message : random unicode mixed text : rtype : int'''
length = 0 for char in map ( east_asian_width , message ) : if char == 'W' : length += 2 elif char == 'Na' : length += 1 return length
def dvds_current_releases ( self , ** kwargs ) : """Gets the upcoming movies from the API . Args : page _ limit ( optional ) : number of movies to show per page , default = 16 page ( optional ) : results page number , default = 1 country ( optional ) : localized data for selected country , default = " us " Returns : A dict respresentation of the JSON returned from the API ."""
path = self . _get_path ( 'dvds_current_releases' ) response = self . _GET ( path , kwargs ) self . _set_attrs_to_values ( response ) return response
def moving_average ( a , n ) : """Moving average over one - dimensional array . Parameters a : np . ndarray One - dimensional array . n : int Number of entries to average over . n = 2 means averaging over the currrent the previous entry . Returns An array view storing the moving average ."""
ret = np . cumsum ( a , dtype = float ) ret [ n : ] = ret [ n : ] - ret [ : - n ] return ret [ n - 1 : ] / n
def get_data_object ( data_id , use_data_config = True ) : """Normalize the data _ id and query the server . If that is unavailable try the raw ID"""
normalized_data_reference = normalize_data_name ( data_id , use_data_config = use_data_config ) client = DataClient ( ) data_obj = client . get ( normalized_data_reference ) # Try with the raw ID if not data_obj and data_id != normalized_data_reference : data_obj = client . get ( data_id ) return data_obj
def markForRebuild ( self , state = True ) : """Sets the rebuild state for this item . : param state | < bool >"""
self . _rebuildRequired = state if ( state ) : self . show ( ) self . update ( )
def base_url ( self , space_id , content_type_id , environment_id = None , ** kwargs ) : """Returns the URI for the editor interface ."""
return "spaces/{0}{1}/content_types/{2}/editor_interface" . format ( space_id , '/environments/{0}' . format ( environment_id ) if environment_id is not None else '' , content_type_id )
def get_callable ( key , dct ) : """Get the callable mapped by a key from a dictionary . This is necessary for pickling ( so we don ' t try to pickle an unbound method ) . Parameters key : str The key for the ` ` dct ` ` dictionary . dct : dict The dictionary of callables ."""
from sklearn . externals import six fun = dct . get ( key , None ) if not isinstance ( key , six . string_types ) or fun is None : # ah , that ' s no fun : ( raise ValueError ( 'key must be a string in one in %r, but got %r' % ( dct , key ) ) return fun
def create ( self , name , path , table = None , create = False ) : """Create a new migration at the given path . : param name : The name of the migration : type name : str : param path : The path of the migrations : type path : str : param table : The table name : type table : str : param create : Whether it ' s a create migration or not : type create : bool : rtype : str"""
path = self . _get_path ( name , path ) if not os . path . exists ( os . path . dirname ( path ) ) : mkdir_p ( os . path . dirname ( path ) ) parent = os . path . join ( os . path . dirname ( path ) , "__init__.py" ) if not os . path . exists ( parent ) : with open ( parent , "w" ) : pass stub = self . _get_stub ( table , create ) with open ( path , "w" ) as fh : fh . write ( self . _populate_stub ( name , stub , table ) ) return path
def _mean_prediction ( self , sigma2 , Y , scores , h , t_params ) : """Creates a h - step ahead mean prediction Parameters sigma2 : np . array The past predicted values Y : np . array The past data scores : np . array The past scores h : int How many steps ahead for the prediction t _ params : np . array A vector of ( transformed ) latent variables Returns h - length vector of mean predictions"""
# Create arrays to iteratre over sigma2_exp = sigma2 . copy ( ) scores_exp = scores . copy ( ) # Loop over h time periods for t in range ( 0 , h ) : new_value = t_params [ 0 ] # ARCH if self . q != 0 : for j in range ( 1 , self . q + 1 ) : new_value += t_params [ j ] * scores_exp [ - j ] # GARCH if self . p != 0 : for k in range ( 1 , self . p + 1 ) : new_value += t_params [ k + self . q ] * sigma2_exp [ - k ] sigma2_exp = np . append ( sigma2_exp , [ new_value ] ) # For indexing consistency scores_exp = np . append ( scores_exp , [ 0 ] ) # expectation of score is zero return sigma2_exp
def verify ( self , obj ) : """Verify that the object conforms to this verifier ' s schema Args : obj ( object ) : A python object to verify Raises : ValidationError : If there is a problem verifying the dictionary , a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation ."""
if len ( self . _options ) == 0 : raise ValidationError ( "No options" , reason = 'no options given in options verifier, matching not possible' , object = obj ) exceptions = { } for i , option in enumerate ( self . _options ) : try : obj = option . verify ( obj ) return obj except ValidationError as exc : exceptions [ 'option_%d' % ( i + 1 ) ] = exc . params [ 'reason' ] raise ValidationError ( "Object did not match any of a set of options" , reason = "object did not match any given option (first failure = '%s')" % exceptions [ 'option_1' ] , ** exceptions )
def extend_parents ( parents ) : """extend _ parents ( parents ) Returns a set containing nearest conditionally stochastic ( Stochastic , not Deterministic ) ancestors ."""
new_parents = set ( ) for parent in parents : new_parents . add ( parent ) if isinstance ( parent , DeterministicBase ) : new_parents . remove ( parent ) new_parents |= parent . extended_parents elif isinstance ( parent , ContainerBase ) : for contained_parent in parent . stochastics : new_parents . add ( contained_parent ) for contained_parent in parent . deterministics : new_parents |= contained_parent . extended_parents return new_parents
def initialize_valid_actions ( grammar : Grammar , keywords_to_uppercase : List [ str ] = None ) -> Dict [ str , List [ str ] ] : """We initialize the valid actions with the global actions . These include the valid actions that result from the grammar and also those that result from the tables provided . The keys represent the nonterminals in the grammar and the values are lists of the valid actions of that nonterminal ."""
valid_actions : Dict [ str , Set [ str ] ] = defaultdict ( set ) for key in grammar : rhs = grammar [ key ] # Sequence represents a series of expressions that match pieces of the text in order . # Eg . A - > B C if isinstance ( rhs , Sequence ) : valid_actions [ key ] . add ( format_action ( key , " " . join ( rhs . _unicode_members ( ) ) , # pylint : disable = protected - access keywords_to_uppercase = keywords_to_uppercase ) ) # OneOf represents a series of expressions , one of which matches the text . # Eg . A - > B / C elif isinstance ( rhs , OneOf ) : for option in rhs . _unicode_members ( ) : # pylint : disable = protected - access valid_actions [ key ] . add ( format_action ( key , option , keywords_to_uppercase = keywords_to_uppercase ) ) # A string literal , eg . " A " elif isinstance ( rhs , Literal ) : if rhs . literal != "" : valid_actions [ key ] . add ( format_action ( key , repr ( rhs . literal ) , keywords_to_uppercase = keywords_to_uppercase ) ) else : valid_actions [ key ] = set ( ) valid_action_strings = { key : sorted ( value ) for key , value in valid_actions . items ( ) } return valid_action_strings
def cursor_blink_mode_changed ( self , settings , key , user_data ) : """Called when cursor blink mode settings has been changed"""
for term in self . guake . notebook_manager . iter_terminals ( ) : term . set_property ( "cursor-blink-mode" , settings . get_int ( key ) )
def _run_external_commands ( self ) : """Post external _ commands to scheduler ( from arbiter ) Wrapper to to app . sched . run _ external _ commands method : return : None"""
commands = cherrypy . request . json with self . app . lock : self . app . sched . run_external_commands ( commands [ 'cmds' ] )
def run ( self , src_project = None , path_to_zip_file = None ) : """Run deploy the lambdas defined in our project . Steps : * Build Artefact * Read file or deploy to S3 . It ' s defined in config [ " deploy " ] [ " deploy _ method " ] * Reload conf with deploy changes * check lambda if exist * Create Lambda * Update Lambda : param src _ project : str . Name of the folder or path of the project where our code lives : param path _ to _ zip _ file : str . : return : bool"""
if path_to_zip_file : code = self . set_artefact_path ( path_to_zip_file ) elif not self . config [ "deploy" ] . get ( "deploy_file" , False ) : code = self . build_artefact ( src_project ) else : code = self . set_artefact_path ( self . config [ "deploy" ] . get ( "deploy_file" ) ) self . set_artefact ( code = code ) # Reload conf because each lambda conf need to read again the global conf self . config . reload_conf ( ) self . deploy ( ) return True
def _array_star ( args ) : """Unpacks the tuple ` args ` and calls _ array . Needed to pass multiple args to a pool . map - ed function"""
fn , cls , genelist , kwargs = args return _array ( fn , cls , genelist , ** kwargs )
def purge_service ( self , service_id ) : """Purge everything from a service ."""
content = self . _fetch ( "/service/%s/purge_all" % service_id , method = "POST" ) return self . _status ( content )
def get_cgi_fieldstorage_from_wsgi_env ( env : Dict [ str , str ] , include_query_string : bool = True ) -> cgi . FieldStorage : """Returns a : class : ` cgi . FieldStorage ` object from the WSGI environment ."""
# http : / / stackoverflow . com / questions / 530526 / accessing - post - data - from - wsgi post_env = env . copy ( ) if not include_query_string : post_env [ 'QUERY_STRING' ] = '' form = cgi . FieldStorage ( fp = env [ 'wsgi.input' ] , environ = post_env , keep_blank_values = True ) return form
def render_partial ( parser , token ) : """Inserts the output of a view , using fully qualified view name , or view name from urls . py . { % render _ partial view _ name arg [ arg2 ] k = v [ k2 = v2 . . . ] % } IMPORTANT : the calling template must receive a context variable called ' request ' containing the original HttpRequest . This means you should be OK with permissions and other session state . ( Note that every argument will be evaluated against context except for the names of any keyword arguments . )"""
args = [ ] kwargs = { } tokens = token . split_contents ( ) if len ( tokens ) < 2 : raise TemplateSyntaxError ( '%r tag requires one or more arguments' % token . contents . split ( ) [ 0 ] ) tokens . pop ( 0 ) # tag name view_name = tokens . pop ( 0 ) for token in tokens : equals = token . find ( '=' ) if equals == - 1 : args . append ( token ) else : kwargs [ str ( token [ : equals ] ) ] = token [ equals + 1 : ] return ViewNode ( view_name , args , kwargs )
def parse_name ( cls , name ) : """Parses a name into a dictionary of identified subsections with accompanying information to correctly identify and replace if necessary : param name : str , string to be parsed : return : dict , dictionary with relevant parsed information"""
parse_dict = dict . fromkeys ( cls . PARSABLE , None ) parse_dict [ 'date' ] = cls . get_date ( name ) parse_dict [ 'version' ] = cls . get_version ( name ) parse_dict [ 'udim' ] = cls . get_udim ( name ) parse_dict [ 'side' ] = cls . get_side ( name ) parse_dict [ 'basename' ] = cls . get_base_naive ( cls . _reduce_name ( name , parse_dict ) ) return parse_dict
def resolve ( self , authorization : http . Header ) : """Determine the user associated with a request , using HTTP Basic Authentication ."""
if authorization is None : return None scheme , token = authorization . split ( ) if scheme . lower ( ) != 'basic' : return None username , password = base64 . b64decode ( token ) . decode ( 'utf-8' ) . split ( ':' ) user = authenticate ( username = username , password = password ) return user
def increase_reads_in_units ( current_provisioning , units , max_provisioned_reads , consumed_read_units_percent , log_tag ) : """Increase the current _ provisioning with units units : type current _ provisioning : int : param current _ provisioning : The current provisioning : type units : int : param units : How many units should we increase with : returns : int - - New provisioning value : type max _ provisioned _ reads : int : param max _ provisioned _ reads : Configured max provisioned reads : returns : int - - New provisioning value : type consumed _ read _ units _ percent : float : param consumed _ read _ units _ percent : Number of consumed read units : type log _ tag : str : param log _ tag : Prefix for the log"""
units = int ( units ) current_provisioning = float ( current_provisioning ) consumed_read_units_percent = float ( consumed_read_units_percent ) consumption_based_current_provisioning = int ( math . ceil ( current_provisioning * ( consumed_read_units_percent / 100 ) ) ) if consumption_based_current_provisioning > current_provisioning : updated_provisioning = consumption_based_current_provisioning + units else : updated_provisioning = int ( current_provisioning ) + units if max_provisioned_reads > 0 : if updated_provisioning > max_provisioned_reads : logger . info ( '{0} - Reached provisioned reads max limit: {1}' . format ( log_tag , max_provisioned_reads ) ) return max_provisioned_reads logger . debug ( '{0} - Read provisioning will be increased to {1:d} units' . format ( log_tag , int ( updated_provisioning ) ) ) return updated_provisioning
def _compute_value ( self , pkt ) : # type : ( packet . Packet ) - > int """Computes the value of this field based on the provided packet and the length _ of field and the adjust callback @ param packet . Packet pkt : the packet from which is computed this field value . # noqa : E501 @ return int : the computed value for this field . @ raise KeyError : the packet nor its payload do not contain an attribute with the length _ of name . @ raise AssertionError @ raise KeyError if _ length _ of is not one of pkt fields"""
fld , fval = pkt . getfield_and_val ( self . _length_of ) val = fld . i2len ( pkt , fval ) ret = self . _adjust ( val ) assert ( ret >= 0 ) return ret
def present ( name , mediatype , ** kwargs ) : '''Creates new mediatype . NOTE : This function accepts all standard mediatype properties : keyword argument names differ depending on your zabbix version , see : https : / / www . zabbix . com / documentation / 3.0 / manual / api / reference / host / object # host _ inventory : param name : name of the mediatype : param _ connection _ user : Optional - zabbix user ( can also be set in opts or pillar , see module ' s docstring ) : param _ connection _ password : Optional - zabbix password ( can also be set in opts or pillar , see module ' s docstring ) : param _ connection _ url : Optional - url of zabbix frontend ( can also be set in opts , pillar , see module ' s docstring ) . . code - block : : yaml make _ new _ mediatype : zabbix _ mediatype . present : - name : ' Email ' - mediatype : 0 - smtp _ server : smtp . example . com - smtp _ hello : zabbix . example . com - smtp _ email : zabbix @ example . com'''
connection_args = { } if '_connection_user' in kwargs : connection_args [ '_connection_user' ] = kwargs [ '_connection_user' ] if '_connection_password' in kwargs : connection_args [ '_connection_password' ] = kwargs [ '_connection_password' ] if '_connection_url' in kwargs : connection_args [ '_connection_url' ] = kwargs [ '_connection_url' ] ret = { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : '' } # Comment and change messages comment_mediatype_created = 'Mediatype {0} created.' . format ( name ) comment_mediatype_updated = 'Mediatype {0} updated.' . format ( name ) comment_mediatype_notcreated = 'Unable to create mediatype: {0}. ' . format ( name ) comment_mediatype_exists = 'Mediatype {0} already exists.' . format ( name ) changes_mediatype_created = { name : { 'old' : 'Mediatype {0} does not exist.' . format ( name ) , 'new' : 'Mediatype {0} created.' . format ( name ) , } } # Zabbix API expects script parameters as a string of arguments seperated by newline characters if 'exec_params' in kwargs : if isinstance ( kwargs [ 'exec_params' ] , list ) : kwargs [ 'exec_params' ] = '\n' . join ( kwargs [ 'exec_params' ] ) + '\n' else : kwargs [ 'exec_params' ] = six . text_type ( kwargs [ 'exec_params' ] ) + '\n' mediatype_exists = __salt__ [ 'zabbix.mediatype_get' ] ( name , ** connection_args ) if mediatype_exists : mediatypeobj = mediatype_exists [ 0 ] mediatypeid = int ( mediatypeobj [ 'mediatypeid' ] ) update_email = False update_email_port = False update_email_security = False update_email_verify_peer = False update_email_verify_host = False update_email_auth = False update_script = False update_script_params = False update_sms = False update_jabber = False update_eztext = False update_status = False if int ( mediatype ) == 0 and 'smtp_server' in kwargs and 'smtp_helo' in kwargs and 'smtp_email' in kwargs : if ( int ( mediatype ) != int ( mediatypeobj [ 'type' ] ) or kwargs [ 'smtp_server' ] != mediatypeobj [ 'smtp_server' ] or kwargs [ 'smtp_email' ] != mediatypeobj [ 'smtp_email' ] or kwargs [ 'smtp_helo' ] != mediatypeobj [ 'smtp_helo' ] ) : update_email = True if int ( mediatype ) == 0 and 'smtp_port' in kwargs : if int ( kwargs [ 'smtp_port' ] ) != int ( mediatypeobj [ 'smtp_port' ] ) : update_email_port = True if int ( mediatype ) == 0 and 'smtp_security' in kwargs : if int ( kwargs [ 'smtp_security' ] ) != int ( mediatypeobj [ 'smtp_security' ] ) : update_email_security = True if int ( mediatype ) == 0 and 'smtp_verify_peer' in kwargs : if int ( kwargs [ 'smtp_verify_peer' ] ) != int ( mediatypeobj [ 'smtp_verify_peer' ] ) : update_email_verify_peer = True if int ( mediatype ) == 0 and 'smtp_verify_host' in kwargs : if int ( kwargs [ 'smtp_verify_host' ] ) != int ( mediatypeobj [ 'smtp_verify_host' ] ) : update_email_verify_host = True if int ( mediatype ) == 0 and 'smtp_authentication' in kwargs and 'username' in kwargs and 'passwd' in kwargs : if ( int ( kwargs [ 'smtp_authentication' ] ) != int ( mediatypeobj [ 'smtp_authentication' ] ) or kwargs [ 'username' ] != mediatypeobj [ 'username' ] or kwargs [ 'passwd' ] != mediatypeobj [ 'passwd' ] ) : update_email_auth = True if int ( mediatype ) == 1 and 'exec_path' in kwargs : if ( int ( mediatype ) != int ( mediatypeobj [ 'type' ] ) or kwargs [ 'exec_path' ] != mediatypeobj [ 'exec_path' ] ) : update_script = True if int ( mediatype ) == 1 and 'exec_params' in kwargs : if kwargs [ 'exec_params' ] != mediatypeobj [ 'exec_params' ] : update_script_params = True if int ( mediatype ) == 2 and 'gsm_modem' in kwargs : if ( int ( mediatype ) != int ( mediatypeobj [ 'type' ] ) or kwargs [ 'gsm_modem' ] != mediatypeobj [ 'gsm_modem' ] ) : update_sms = True if int ( mediatype ) == 3 and 'username' in kwargs and 'passwd' in kwargs : if ( int ( mediatype ) != int ( mediatypeobj [ 'type' ] ) or kwargs [ 'username' ] != mediatypeobj [ 'username' ] or kwargs [ 'passwd' ] != mediatypeobj [ 'passwd' ] ) : update_jabber = True if int ( mediatype ) == 100 and 'username' in kwargs and 'passwd' in kwargs and 'exec_path' in kwargs : if ( int ( mediatype ) != int ( mediatypeobj [ 'type' ] ) or kwargs [ 'username' ] != mediatypeobj [ 'username' ] or kwargs [ 'passwd' ] != mediatypeobj [ 'passwd' ] or kwargs [ 'exec_path' ] != mediatypeobj [ 'exec_path' ] ) : update_eztext = True if 'status' in kwargs : if int ( kwargs [ 'status' ] ) != int ( mediatypeobj [ 'status' ] ) : update_status = True # Dry run , test = true mode if __opts__ [ 'test' ] : if mediatype_exists : if update_status : ret [ 'result' ] = None ret [ 'comment' ] = comment_mediatype_updated else : ret [ 'result' ] = True ret [ 'comment' ] = comment_mediatype_exists else : ret [ 'result' ] = None ret [ 'comment' ] = comment_mediatype_created return ret error = [ ] if mediatype_exists : if ( update_email or update_email_port or update_email_security or update_email_verify_peer or update_email_verify_host or update_email_auth or update_script or update_script_params or update_sms or update_jabber or update_eztext or update_status ) : ret [ 'result' ] = True ret [ 'comment' ] = comment_mediatype_updated if update_email : updated_email = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , type = mediatype , smtp_server = kwargs [ 'smtp_server' ] , smtp_helo = kwargs [ 'smtp_helo' ] , smtp_email = kwargs [ 'smtp_email' ] , ** connection_args ) if 'error' in updated_email : error . append ( updated_email [ 'error' ] ) else : ret [ 'changes' ] [ 'smtp_server' ] = kwargs [ 'smtp_server' ] ret [ 'changes' ] [ 'smtp_helo' ] = kwargs [ 'smtp_helo' ] ret [ 'changes' ] [ 'smtp_email' ] = kwargs [ 'smtp_email' ] if update_email_port : updated_email_port = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , smtp_port = kwargs [ 'smtp_port' ] , ** connection_args ) if 'error' in updated_email_port : error . append ( updated_email_port [ 'error' ] ) else : ret [ 'changes' ] [ 'smtp_port' ] = kwargs [ 'smtp_port' ] if update_email_security : updated_email_security = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , smtp_security = kwargs [ 'smtp_security' ] , ** connection_args ) if 'error' in updated_email_security : error . append ( updated_email_security [ 'error' ] ) else : ret [ 'changes' ] [ 'smtp_security' ] = kwargs [ 'smtp_security' ] if update_email_verify_peer : updated_email_verify_peer = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , smtp_verify_peer = kwargs [ 'smtp_verify_peer' ] , ** connection_args ) if 'error' in updated_email_verify_peer : error . append ( updated_email_verify_peer [ 'error' ] ) else : ret [ 'changes' ] [ 'smtp_verify_peer' ] = kwargs [ 'smtp_verify_peer' ] if update_email_verify_host : updated_email_verify_host = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , smtp_verify_host = kwargs [ 'smtp_verify_host' ] , ** connection_args ) if 'error' in updated_email_verify_host : error . append ( updated_email_verify_host [ 'error' ] ) else : ret [ 'changes' ] [ 'smtp_verify_host' ] = kwargs [ 'smtp_verify_host' ] if update_email_auth : updated_email_auth = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , username = kwargs [ 'username' ] , passwd = kwargs [ 'passwd' ] , smtp_authentication = kwargs [ 'smtp_authentication' ] , ** connection_args ) if 'error' in updated_email_auth : error . append ( updated_email_auth [ 'error' ] ) else : ret [ 'changes' ] [ 'smtp_authentication' ] = kwargs [ 'smtp_authentication' ] ret [ 'changes' ] [ 'username' ] = kwargs [ 'username' ] if update_script : updated_script = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , type = mediatype , exec_path = kwargs [ 'exec_path' ] , ** connection_args ) if 'error' in updated_script : error . append ( updated_script [ 'error' ] ) else : ret [ 'changes' ] [ 'exec_path' ] = kwargs [ 'exec_path' ] if update_script_params : updated_script_params = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , exec_params = kwargs [ 'exec_params' ] , ** connection_args ) if 'error' in updated_script_params : error . append ( updated_script [ 'error' ] ) else : ret [ 'changes' ] [ 'exec_params' ] = kwargs [ 'exec_params' ] if update_sms : updated_sms = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , type = mediatype , gsm_modem = kwargs [ 'gsm_modem' ] , ** connection_args ) if 'error' in updated_sms : error . append ( updated_sms [ 'error' ] ) else : ret [ 'changes' ] [ 'gsm_modem' ] = kwargs [ 'gsm_modem' ] if update_jabber : updated_jabber = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , type = mediatype , username = kwargs [ 'username' ] , passwd = kwargs [ 'passwd' ] , ** connection_args ) if 'error' in updated_jabber : error . append ( updated_jabber [ 'error' ] ) else : ret [ 'changes' ] [ 'username' ] = kwargs [ 'username' ] if update_eztext : updated_eztext = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , type = mediatype , username = kwargs [ 'username' ] , passwd = kwargs [ 'passwd' ] , exec_path = kwargs [ 'exec_path' ] , ** connection_args ) if 'error' in updated_eztext : error . append ( updated_eztext [ 'error' ] ) else : ret [ 'changes' ] [ 'username' ] = kwargs [ 'username' ] ret [ 'changes' ] [ 'exec_path' ] = kwargs [ 'exec_path' ] if update_status : updated_status = __salt__ [ 'zabbix.mediatype_update' ] ( mediatypeid , status = kwargs [ 'status' ] , ** connection_args ) if 'error' in updated_status : error . append ( updated_status [ 'error' ] ) else : ret [ 'changes' ] [ 'status' ] = kwargs [ 'status' ] else : ret [ 'result' ] = True ret [ 'comment' ] = comment_mediatype_exists else : mediatype_create = __salt__ [ 'zabbix.mediatype_create' ] ( name , mediatype , ** kwargs ) if 'error' not in mediatype_create : ret [ 'result' ] = True ret [ 'comment' ] = comment_mediatype_created ret [ 'changes' ] = changes_mediatype_created else : ret [ 'result' ] = False ret [ 'comment' ] = comment_mediatype_notcreated + six . text_type ( mediatype_create [ 'error' ] ) # error detected if error : ret [ 'changes' ] = { } ret [ 'result' ] = False ret [ 'comment' ] = six . text_type ( error ) return ret
def get_chunks ( data , chunks = None ) : """Try to guess a reasonable chunk shape to use for block - wise algorithms operating over ` data ` ."""
if chunks is None : if hasattr ( data , 'chunklen' ) and hasattr ( data , 'shape' ) : # bcolz carray , chunk first dimension only return ( data . chunklen , ) + data . shape [ 1 : ] elif hasattr ( data , 'chunks' ) and hasattr ( data , 'shape' ) and len ( data . chunks ) == len ( data . shape ) : # h5py dataset or zarr array return data . chunks else : # fall back to something simple , ~ 4Mb chunks of first dimension row = np . asarray ( data [ 0 ] ) chunklen = max ( 1 , ( 2 ** 22 ) // row . nbytes ) if row . shape : chunks = ( chunklen , ) + row . shape else : chunks = ( chunklen , ) return chunks else : return chunks
def merge ( self , gdefs ) : """Merge overlapped guides For example : : from plotnine import * gg = ggplot ( mtcars , aes ( y = ' wt ' , x = ' mpg ' , colour = ' factor ( cyl ) ' ) ) gg = gg + stat _ smooth ( aes ( fill = ' factor ( cyl ) ' ) , method = ' lm ' ) gg = gg + geom _ point ( ) gg This would create two guides with the same hash"""
# group guide definitions by hash , and # reduce each group to a single guide # using the guide . merge method df = pd . DataFrame ( { 'gdef' : gdefs , 'hash' : [ g . hash for g in gdefs ] } ) grouped = df . groupby ( 'hash' , sort = False ) gdefs = [ ] for name , group in grouped : # merge gdef = group [ 'gdef' ] . iloc [ 0 ] for g in group [ 'gdef' ] . iloc [ 1 : ] : gdef = gdef . merge ( g ) gdefs . append ( gdef ) return gdefs
def destroy_elb ( app = '' , env = 'dev' , region = 'us-east-1' , ** _ ) : """Destroy ELB Resources . Args : app ( str ) : Spinnaker Application name . env ( str ) : Deployment environment . region ( str ) : AWS region . Returns : True upon successful completion ."""
task_json = get_template ( template_file = 'destroy/destroy_elb.json.j2' , app = app , env = env , region = region , vpc = get_vpc_id ( account = env , region = region ) ) wait_for_task ( task_json ) return True
def insert_state_into_selected_state ( state , as_template = False ) : """Adds a State to the selected state : param state : the state which is inserted : param as _ template : If a state is a library state can be insert as template : return : boolean : success of the insertion"""
smm_m = rafcon . gui . singleton . state_machine_manager_model if not isinstance ( state , State ) : logger . warning ( "A state is needed to be insert not {0}" . format ( state ) ) return False if not smm_m . selected_state_machine_id : logger . warning ( "Please select a container state within a state machine first" ) return False selection = smm_m . state_machines [ smm_m . selected_state_machine_id ] . selection if len ( selection . states ) > 1 : logger . warning ( "Please select exactly one state for the insertion" ) return False if len ( selection . states ) == 0 : logger . warning ( "Please select a state for the insertion" ) return False if is_selection_inside_of_library_state ( selected_elements = [ selection . get_selected_state ( ) ] ) : logger . warning ( "State is not insert because target state is inside of a library state." ) return False gui_helper_state . insert_state_as ( selection . get_selected_state ( ) , state , as_template ) return True
def high_frequency_cutoff_from_config ( cp ) : """Gets the high frequency cutoff from the given config file . This looks for ` ` high - frequency - cutoff ` ` in the ` ` [ model ] ` ` section and casts it to float . If none is found , will just return ` ` None ` ` . Parameters cp : WorkflowConfigParser Config file parser to read . Returns float or None : The high frequency cutoff ."""
if cp . has_option ( 'model' , 'high-frequency-cutoff' ) : high_frequency_cutoff = float ( cp . get ( 'model' , 'high-frequency-cutoff' ) ) else : high_frequency_cutoff = None return high_frequency_cutoff
def inspect ( self , mrf = True ) : """Inspects a Packer Templates file ( ` packer inspect - machine - readable ` ) To return the output in a readable form , the ` - machine - readable ` flag is appended automatically , afterwhich the output is parsed and returned as a dict of the following format : " variables " : [ " name " : " aws _ access _ key " , " value " : " { { env ` AWS _ ACCESS _ KEY _ ID ` } } " " name " : " aws _ secret _ key " , " value " : " { { env ` AWS _ ACCESS _ KEY ` } } " " provisioners " : [ " type " : " shell " " builders " : [ " type " : " amazon - ebs " , " name " : " amazon " : param bool mrf : output in machine - readable form ."""
self . packer_cmd = self . packer . inspect self . _add_opt ( '-machine-readable' if mrf else None ) self . _add_opt ( self . packerfile ) result = self . packer_cmd ( ) if mrf : result . parsed_output = self . _parse_inspection_output ( result . stdout . decode ( ) ) else : result . parsed_output = None return result
def start_all_linking ( self , link_type , group_id ) : """Start all linking"""
self . logger . info ( "Start_all_linking for device %s type %s group %s" , self . device_id , link_type , group_id ) self . hub . direct_command ( self . device_id , '02' , '64' + link_type + group_id )
def inspect_axes ( ax ) : """Inspect an axes or subplot to get the initialization parameters"""
ret = { 'fig' : ax . get_figure ( ) . number } if mpl . __version__ < '2.0' : ret [ 'axisbg' ] = ax . get_axis_bgcolor ( ) else : # axisbg is depreceated ret [ 'facecolor' ] = ax . get_facecolor ( ) proj = getattr ( ax , 'projection' , None ) if proj is not None and not isinstance ( proj , six . string_types ) : proj = ( proj . __class__ . __module__ , proj . __class__ . __name__ ) ret [ 'projection' ] = proj ret [ 'visible' ] = ax . get_visible ( ) ret [ 'spines' ] = { } ret [ 'zorder' ] = ax . get_zorder ( ) ret [ 'yaxis_inverted' ] = ax . yaxis_inverted ( ) ret [ 'xaxis_inverted' ] = ax . xaxis_inverted ( ) for key , val in ax . spines . items ( ) : ret [ 'spines' ] [ key ] = { } for prop in [ 'linestyle' , 'edgecolor' , 'linewidth' , 'facecolor' , 'visible' ] : ret [ 'spines' ] [ key ] [ prop ] = getattr ( val , 'get_' + prop ) ( ) if isinstance ( ax , mfig . SubplotBase ) : sp = ax . get_subplotspec ( ) . get_topmost_subplotspec ( ) ret [ 'grid_spec' ] = sp . get_geometry ( ) [ : 2 ] ret [ 'subplotspec' ] = [ sp . num1 , sp . num2 ] ret [ 'is_subplot' ] = True else : ret [ 'args' ] = [ ax . get_position ( True ) . bounds ] ret [ 'is_subplot' ] = False return ret
def evaluate ( self , env ) : """Evaluate the function call in the environment , returning a Unicode string ."""
if self . ident in env . functions : arg_vals = [ expr . evaluate ( env ) for expr in self . args ] try : out = env . functions [ self . ident ] ( * arg_vals ) except Exception as exc : # Function raised exception ! Maybe inlining the name of # the exception will help debug . return u'<%s>' % str ( exc ) return str ( out ) else : return self . original
def kill ( self , id , signal = signal . SIGTERM ) : """Kill a job with given id : WARNING : beware of what u kill , if u killed redis for example core0 or coreX won ' t be reachable : param id : job id to kill"""
args = { 'id' : id , 'signal' : int ( signal ) , } self . _kill_chk . check ( args ) return self . _client . json ( 'job.kill' , args )
def get_next_want_file ( self , byte_index , block ) : '''Returns the leftmost file in the user ' s list of wanted files ( want _ file _ pos ) . If the first file it finds isn ' t in the list , it will keep searching until the length of ' block ' is exceeded .'''
while block : rightmost = get_rightmost_index ( byte_index = byte_index , file_starts = self . file_starts ) if rightmost in self . want_file_pos : return rightmost , byte_index , block else : file_start = ( self . file_starts [ rightmost ] ) file_length = self . file_list [ rightmost ] [ 'length' ] bytes_rem = file_start + file_length - byte_index if len ( block ) > bytes_rem : block = block [ bytes_rem : ] byte_index = byte_index + bytes_rem else : block = '' else : return None
def decode_base64_and_inflate ( value , ignore_zip = False ) : """base64 decodes and then inflates according to RFC1951 : param value : a deflated and encoded string : type value : string : param ignore _ zip : ignore zip errors : returns : the string after decoding and inflating : rtype : string"""
encoded = OneLogin_Saml2_Utils . b64decode ( value ) try : return zlib . decompress ( encoded , - 15 ) except zlib . error : if not ignore_zip : raise return encoded
def _diagnostic ( self , event ) : """This method is employed instead of _ tap ( ) if the PyKeyboardEvent is initialized with diagnostic = True . This makes some basic testing quickly and easily available . It will print out information regarding the event instead of passing information along to self . tap ( )"""
print ( '\n---Keyboard Event Diagnostic---' ) print ( 'MessageName:' , event . MessageName ) print ( 'Message:' , event . Message ) print ( 'Time:' , event . Time ) print ( 'Window:' , event . Window ) print ( 'WindowName:' , event . WindowName ) print ( 'Ascii:' , event . Ascii , ',' , chr ( event . Ascii ) ) print ( 'Key:' , event . Key ) print ( 'KeyID:' , event . KeyID ) print ( 'ScanCode:' , event . ScanCode ) print ( 'Extended:' , event . Extended ) print ( 'Injected:' , event . Injected ) print ( 'Alt' , event . Alt ) print ( 'Transition' , event . Transition ) print ( '---' )
def mjd2gmst ( mjd ) : """Convert Modfied Juian Date ( JD = 2400000.5 ) to GMST Taken from P . T . Walace routines ."""
tu = ( mjd - MJD0 ) / ( 100 * DPY ) st = math . fmod ( mjd , 1.0 ) * D2PI + ( 24110.54841 + ( 8640184.812866 + ( 0.093104 - 6.2e-6 * tu ) * tu ) * tu ) * DS2R w = math . fmod ( st , D2PI ) if w >= 0.0 : return w else : return w + D2PI
def hex_encode_abi_type ( abi_type , value , force_size = None ) : """Encodes value into a hex string in format of abi _ type"""
validate_abi_type ( abi_type ) validate_abi_value ( abi_type , value ) data_size = force_size or size_of_type ( abi_type ) if is_array_type ( abi_type ) : sub_type = sub_type_of_array_type ( abi_type ) return "" . join ( [ remove_0x_prefix ( hex_encode_abi_type ( sub_type , v , 256 ) ) for v in value ] ) elif is_bool_type ( abi_type ) : return to_hex_with_size ( value , data_size ) elif is_uint_type ( abi_type ) : return to_hex_with_size ( value , data_size ) elif is_int_type ( abi_type ) : return to_hex_twos_compliment ( value , data_size ) elif is_address_type ( abi_type ) : return pad_hex ( value , data_size ) elif is_bytes_type ( abi_type ) : if is_bytes ( value ) : return encode_hex ( value ) else : return value elif is_string_type ( abi_type ) : return to_hex ( text = value ) else : raise ValueError ( "Unsupported ABI type: {0}" . format ( abi_type ) )
def sample_tmatrix ( C , nsample = 1 , nsteps = None , reversible = False , mu = None , T0 = None , return_statdist = False ) : r"""samples transition matrices from the posterior distribution Parameters C : ( M , M ) ndarray or scipy . sparse matrix Count matrix nsample : int number of samples to be drawn nstep : int , default = None number of full Gibbs sampling sweeps internally done for each sample returned . This option is meant to ensure approximately uncorrelated samples for every call to sample ( ) . If None , the number of steps will be automatically determined based on the other options and the matrix size . nstep > 1 will only be used for reversible sampling , because nonreversible sampling generates statistically independent transition matrices every step . reversible : bool If true sample from the ensemble of transition matrices restricted to those obeying a detailed balance condition , else draw from the whole ensemble of stochastic matrices . mu : array _ like A fixed stationary distribution . Transition matrices with that stationary distribution will be sampled T0 : ndarray , shape = ( n , n ) or scipy . sparse matrix Starting point of the MC chain of the sampling algorithm . Has to obey the required constraints . return _ statdist : bool , optional , default = False if true , will also return the stationary distribution . Returns P : ndarray ( n , n ) or array of ndarray ( n , n ) sampled transition matrix ( or multiple matrices if nsample > 1) Notes The transition matrix sampler generates transition matrices from the posterior distribution . The posterior distribution is given as a product of Dirichlet distributions . . math : : \ mathbb { P } ( T | C ) \ propto \ prod _ { i = 1 } ^ { M } \ left ( \ prod _ { j = 1 } ^ { M } p _ { ij } ^ { c _ { ij } } \ right ) See also tmatrix _ sampler"""
if issparse ( C ) : _showSparseConversionWarning ( ) C = C . toarray ( ) sampler = tmatrix_sampler ( C , reversible = reversible , mu = mu , T0 = T0 , nsteps = nsteps ) return sampler . sample ( nsamples = nsample , return_statdist = return_statdist )
def delete_archive ( self , archive_id ) : """Deletes an OpenTok archive . You can only delete an archive which has a status of " available " or " uploaded " . Deleting an archive removes its record from the list of archives . For an " available " archive , it also removes the archive file , making it unavailable for download . : param String archive _ id : The archive ID of the archive to be deleted ."""
response = requests . delete ( self . endpoints . archive_url ( archive_id ) , headers = self . json_headers ( ) , proxies = self . proxies , timeout = self . timeout ) if response . status_code < 300 : pass elif response . status_code == 403 : raise AuthError ( ) elif response . status_code == 404 : raise NotFoundError ( "Archive not found" ) else : raise RequestError ( "An unexpected error occurred" , response . status_code )
def log_metrics ( metrics , summ_writer , log_prefix , step , history = None ) : """Log metrics to summary writer and history ."""
rjust_len = max ( [ len ( name ) for name in metrics ] ) for name , value in six . iteritems ( metrics ) : step_log ( step , "%s %s | % .8f" % ( log_prefix . ljust ( 5 ) , name . rjust ( rjust_len ) , value ) ) full_name = "metrics/" + name if history : history . append ( log_prefix , full_name , step , value ) if summ_writer : summ_writer . scalar ( full_name , value , step )
def create_table ( self , table_name , obj = None , ** kwargs ) : """Dispatch to ImpalaClient . create _ table . See that function ' s docstring for more"""
return self . client . create_table ( table_name , obj = obj , database = self . name , ** kwargs )