signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def to_triples ( self , short_pred = True , properties = True ) : """Encode the Dmrs as triples suitable for PENMAN serialization ."""
ts = [ ] qs = set ( self . nodeids ( quantifier = True ) ) for n in nodes ( self ) : pred = n . pred . short_form ( ) if short_pred else n . pred . string ts . append ( ( n . nodeid , 'predicate' , pred ) ) if n . lnk is not None : ts . append ( ( n . nodeid , 'lnk' , '"{}"' . format ( str ( n . lnk ) ) ) ) if n . carg is not None : ts . append ( ( n . nodeid , 'carg' , '"{}"' . format ( n . carg ) ) ) if properties and n . nodeid not in qs : for key , value in n . sortinfo . items ( ) : ts . append ( ( n . nodeid , key . lower ( ) , value ) ) for l in links ( self ) : if safe_int ( l . start ) == LTOP_NODEID : ts . append ( ( l . start , 'top' , l . end ) ) else : relation = '{}-{}' . format ( l . rargname . upper ( ) , l . post ) ts . append ( ( l . start , relation , l . end ) ) return ts
def ReadVarBytes ( self , max = sys . maxsize ) : """Read a variable length of bytes from the stream . The NEO network protocol supports encoded storage for space saving . See : http : / / docs . neo . org / en - us / node / network - protocol . html # convention Args : max ( int ) : ( Optional ) maximum number of bytes to read . Returns : bytes :"""
length = self . ReadVarInt ( max ) return self . ReadBytes ( length )
def ihs ( h , pos , map_pos = None , min_ehh = 0.05 , min_maf = 0.05 , include_edges = False , gap_scale = 20000 , max_gap = 200000 , is_accessible = None , use_threads = True ) : """Compute the unstandardized integrated haplotype score ( IHS ) for each variant , comparing integrated haplotype homozygosity between the reference ( 0 ) and alternate ( 1 ) alleles . Parameters h : array _ like , int , shape ( n _ variants , n _ haplotypes ) Haplotype array . pos : array _ like , int , shape ( n _ variants , ) Variant positions ( physical distance ) . map _ pos : array _ like , float , shape ( n _ variants , ) Variant positions ( genetic map distance ) . min _ ehh : float , optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation . min _ maf : float , optional Do not compute integrated haplotype homozogysity for variants with minor allele frequency below this value . include _ edges : bool , optional If True , report scores even if EHH does not decay below ` min _ ehh ` before reaching the edge of the data . gap _ scale : int , optional Rescale distance between variants if gap is larger than this value . max _ gap : int , optional Do not report scores if EHH spans a gap larger than this number of base pairs . is _ accessible : array _ like , bool , optional Genome accessibility array . If provided , distance between variants will be computed as the number of accessible bases between them . use _ threads : bool , optional If True use multiple threads to compute . Returns score : ndarray , float , shape ( n _ variants , ) Unstandardized IHS scores . Notes This function will calculate IHS for all variants . To exclude variants below a given minor allele frequency , filter the input haplotype array before passing to this function . This function computes IHS comparing the reference and alternate alleles . These can be polarised by switching the sign for any variant where the reference allele is derived . This function returns NaN for any IHS calculations where haplotype homozygosity does not decay below ` min _ ehh ` before reaching the first or last variant . To disable this behaviour , set ` include _ edges ` to True . Note that the unstandardized score is returned . Usually these scores are then standardized in different allele frequency bins . See Also standardize _ by _ allele _ count"""
# check inputs h = asarray_ndim ( h , 2 ) check_integer_dtype ( h ) pos = asarray_ndim ( pos , 1 ) check_dim0_aligned ( h , pos ) h = memoryview_safe ( h ) pos = memoryview_safe ( pos ) # compute gaps between variants for integration gaps = compute_ihh_gaps ( pos , map_pos , gap_scale , max_gap , is_accessible ) # setup kwargs kwargs = dict ( min_ehh = min_ehh , min_maf = min_maf , include_edges = include_edges ) if use_threads and multiprocessing . cpu_count ( ) > 1 : # run with threads # create pool pool = ThreadPool ( 2 ) # scan forward result_fwd = pool . apply_async ( ihh01_scan , ( h , gaps ) , kwargs ) # scan backward result_rev = pool . apply_async ( ihh01_scan , ( h [ : : - 1 ] , gaps [ : : - 1 ] ) , kwargs ) # wait for both to finish pool . close ( ) pool . join ( ) # obtain results ihh0_fwd , ihh1_fwd = result_fwd . get ( ) ihh0_rev , ihh1_rev = result_rev . get ( ) # cleanup pool . terminate ( ) else : # run without threads # scan forward ihh0_fwd , ihh1_fwd = ihh01_scan ( h , gaps , ** kwargs ) # scan backward ihh0_rev , ihh1_rev = ihh01_scan ( h [ : : - 1 ] , gaps [ : : - 1 ] , ** kwargs ) # handle reverse scan ihh0_rev = ihh0_rev [ : : - 1 ] ihh1_rev = ihh1_rev [ : : - 1 ] # compute unstandardized score ihh0 = ihh0_fwd + ihh0_rev ihh1 = ihh1_fwd + ihh1_rev score = np . log ( ihh1 / ihh0 ) return score
def alterar ( self , id_egrupo , nome ) : """Altera os dados de um grupo de equipamento a partir do seu identificador . : param id _ egrupo : Identificador do grupo de equipamento . : param nome : Nome do grupo de equipamento . : return : None : raise InvalidParameterError : O identificador e / ou o nome do grupo são nulos ou inválidos . : raise GrupoEquipamentoNaoExisteError : Grupo de equipamento não cadastrado . : raise NomeGrupoEquipamentoDuplicadoError : Nome do grupo de equipamento duplicado . : raise DataBaseError : Falha na networkapi ao acessar o banco de dados . : raise XMLError : Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta ."""
if not is_valid_int_param ( id_egrupo ) : raise InvalidParameterError ( u'O identificador do grupo de equipamento é inválido ou não foi informado.' ) url = 'egrupo/' + str ( id_egrupo ) + '/' egrupo_map = dict ( ) egrupo_map [ 'nome' ] = nome code , xml = self . submit ( { 'grupo' : egrupo_map } , 'PUT' , url ) return self . response ( code , xml )
def parent_for_matching_rest_name ( self , rest_names ) : """Return parent that matches a rest name"""
parent = self while parent : if parent . rest_name in rest_names : return parent parent = parent . parent_object return None
def install_from_pypi ( context ) : """Attempts to install your package from pypi ."""
tmp_dir = venv . create_venv ( ) install_cmd = '%s/bin/pip install %s' % ( tmp_dir , context . module_name ) package_index = 'pypi' if context . pypi : install_cmd += '-i %s' % context . pypi package_index = context . pypi try : result = shell . dry_run ( install_cmd , context . dry_run ) if not context . dry_run and not result : log . error ( 'Failed to install %s from %s' , context . module_name , package_index ) else : log . info ( 'Successfully installed %s from %s' , context . module_name , package_index ) except Exception as e : error_msg = 'Error installing %s from %s' % ( context . module_name , package_index ) log . exception ( error_msg ) raise Exception ( error_msg , e )
def do_handle_log ( self , workunit , level , * msg_elements ) : """Implementation of Reporter callback ."""
if not self . is_under_main_root ( workunit ) : return # If the element is a ( msg , detail ) pair , we ignore the detail . There ' s no # useful way to display it on the console . elements = [ e if isinstance ( e , six . string_types ) else e [ 0 ] for e in msg_elements ] msg = '\n' + '' . join ( elements ) if self . use_color_for_workunit ( workunit , self . settings . color ) : msg = self . _COLOR_BY_LEVEL . get ( level , lambda x : x ) ( msg ) self . emit ( self . _prefix ( workunit , msg ) ) self . flush ( )
def get_overlapping_ranges ( self , collection_link , partition_key_ranges ) : '''Given a partition key range and a collection , returns the list of overlapping partition key ranges : param str collection _ link : The name of the collection . : param list partition _ key _ range : List of partition key range . : return : List of overlapping partition key ranges . : rtype : list'''
cl = self . _documentClient collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link ) collection_routing_map = self . _collection_routing_map_by_item . get ( collection_id ) if collection_routing_map is None : collection_pk_ranges = list ( cl . _ReadPartitionKeyRanges ( collection_link ) ) # for large collections , a split may complete between the read partition key ranges query page responses , # causing the partitionKeyRanges to have both the children ranges and their parents . Therefore , we need # to discard the parent ranges to have a valid routing map . collection_pk_ranges = _PartitionKeyRangeCache . _discard_parent_ranges ( collection_pk_ranges ) collection_routing_map = _CollectionRoutingMap . CompleteRoutingMap ( [ ( r , True ) for r in collection_pk_ranges ] , collection_id ) self . _collection_routing_map_by_item [ collection_id ] = collection_routing_map return collection_routing_map . get_overlapping_ranges ( partition_key_ranges )
def ok_schema_id ( token : str ) -> bool : """Whether input token looks like a valid schema identifier ; i . e . , < issuer - did > : 2 : < name > : < version > . : param token : candidate string : return : whether input token looks like a valid schema identifier"""
return bool ( re . match ( '[{}]{{21,22}}:2:.+:[0-9.]+$' . format ( B58 ) , token or '' ) )
def _handle_sdp_target_state_updated ( sdp_state : SDPState ) : """Respond to an SDP target state change event . This function sets the current state of SDP to the target state if that is possible . TODO ( BMo ) This cant be done as a blocking function as it is here !"""
LOG . info ( 'Handling SDP target state updated event...' ) LOG . info ( 'SDP target state: %s' , sdp_state . target_state ) # Map between the SDP target state and the service target state ? if sdp_state . target_state == 'off' : _update_services_target_state ( 'off' ) # TODO : Work out if the state of SDP has reached the target state . # If yes , update the current state . sdp_state . update_current_state ( sdp_state . target_state )
def pop ( self , index = - 1 ) : """See list . pop ."""
if not isinstance ( index , int ) : if PY2 : raise TypeError ( 'an integer is required' ) raise TypeError ( "'str' object cannot be interpreted as an integer" ) length = len ( self ) if - length <= index < length : self . _notify_remove_at ( index ) return super ( ObservableList , self ) . pop ( index )
def run_migration ( name , major_version , minor_version , db , mod , conf = { } ) : """Run migration script : param major _ version : major version number of the migration : param minor _ version : minor version number of the migration : param db : database connection object : param path : path of the migration script : param conf : application configuration ( if any )"""
with db . transaction ( ) : mod . up ( db , conf ) set_version ( db , name , major_version , minor_version )
def _read_para_ack ( self , code , cbit , clen , * , desc , length , version ) : """Read HIP ACK parameter . Structure of HIP ACK parameter [ RFC 7401 ] : 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | Type | Length | | peer Update ID 1 | / peer Update ID n | Octets Bits Name Description 0 0 ack . type Parameter Type 1 15 ack . critical Critical Bit 2 16 ack . length Length of Contents 4 32 ack . id Peer Update ID"""
if clen % 4 != 0 : raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' ) _upid = list ( ) for _ in range ( clen // 4 ) : _upid . append ( self . _read_unpack ( 4 ) ) ack = dict ( type = desc , critical = cbit , length = clen , id = tuple ( _upid ) , ) return ack
def report ( self , logprob ) : """Reports convergence to : data : ` sys . stderr ` . The output consists of three columns : iteration number , log probability of the data at the current iteration and convergence rate . At the first iteration convergence rate is unknown and is thus denoted by NaN . Parameters logprob : float The log probability of the data as computed by EM algorithm in the current iteration ."""
if self . verbose : delta = logprob - self . history [ - 1 ] if self . history else np . nan message = self . _template . format ( iter = self . iter + 1 , logprob = logprob , delta = delta ) print ( message , file = sys . stderr ) self . history . append ( logprob ) self . iter += 1
def _readintle ( self , length , start ) : """Read bits and interpret as a little - endian signed int ."""
ui = self . _readuintle ( length , start ) if not ui >> ( length - 1 ) : # Top bit not set , number is positive return ui # Top bit is set , so number is negative tmp = ( ~ ( ui - 1 ) ) & ( ( 1 << length ) - 1 ) return - tmp
def on_open ( self , websocket ) : '''When a new websocket connection is established it creates a new : class : ` ChatClient ` and adds it to the set of clients of the : attr : ` pubsub ` handler .'''
self . pubsub . add_client ( ChatClient ( websocket , self . channel ) )
def confusion_matrix ( y_true , y_pred , target_names = None , normalize = False , cmap = None , ax = None ) : """Plot confustion matrix . Parameters y _ true : array - like , shape = [ n _ samples ] Correct target values ( ground truth ) . y _ pred : array - like , shape = [ n _ samples ] Target predicted classes ( estimator predictions ) . target _ names : list List containing the names of the target classes . List must be in order e . g . ` ` [ ' Label for class 0 ' , ' Label for class 1 ' ] ` ` . If ` ` None ` ` , generic labels will be generated e . g . ` ` [ ' Class 0 ' , ' Class 1 ' ] ` ` ax : matplotlib Axes Axes object to draw the plot onto , otherwise uses current Axes normalize : bool Normalize the confusion matrix cmap : matplotlib Colormap If ` ` None ` ` uses a modified version of matplotlib ' s OrRd colormap . Returns ax : matplotlib Axes Axes containing the plot Examples . . plot : : . . / . . / examples / confusion _ matrix . py"""
if any ( ( val is None for val in ( y_true , y_pred ) ) ) : raise ValueError ( "y_true and y_pred are needed to plot confusion " "matrix" ) # calculate how many names you expect values = set ( y_true ) . union ( set ( y_pred ) ) expected_len = len ( values ) if target_names and ( expected_len != len ( target_names ) ) : raise ValueError ( ( 'Data cointains {} different values, but target' ' names contains {} values.' . format ( expected_len , len ( target_names ) ) ) ) # if the user didn ' t pass target _ names , create generic ones if not target_names : values = list ( values ) values . sort ( ) target_names = [ 'Class {}' . format ( v ) for v in values ] cm = sk_confusion_matrix ( y_true , y_pred ) if normalize : cm = cm . astype ( 'float' ) / cm . sum ( axis = 1 ) [ : , np . newaxis ] np . set_printoptions ( precision = 2 ) if ax is None : ax = plt . gca ( ) # this ( y , x ) may sound counterintuitive . The reason is that # in a matrix cell ( i , j ) is in row = i and col = j , translating that # to an x , y plane ( which matplotlib uses to plot ) , we need to use # i as the y coordinate ( how many steps down ) and j as the x coordinate # how many steps to the right . for ( y , x ) , v in np . ndenumerate ( cm ) : try : label = '{:.2}' . format ( v ) except : label = v ax . text ( x , y , label , horizontalalignment = 'center' , verticalalignment = 'center' ) if cmap is None : cmap = default_heatmap ( ) im = ax . imshow ( cm , interpolation = 'nearest' , cmap = cmap ) plt . colorbar ( im , ax = ax ) tick_marks = np . arange ( len ( target_names ) ) ax . set_xticks ( tick_marks ) ax . set_xticklabels ( target_names ) ax . set_yticks ( tick_marks ) ax . set_yticklabels ( target_names ) title = 'Confusion matrix' if normalize : title += ' (normalized)' ax . set_title ( title ) ax . set_ylabel ( 'True label' ) ax . set_xlabel ( 'Predicted label' ) return ax
def reset ( self ) -> None : """Restores the starting position ."""
self . turn = WHITE self . castling_rights = BB_CORNERS self . ep_square = None self . halfmove_clock = 0 self . fullmove_number = 1 self . reset_board ( )
def announcement_posted_hook ( request , obj ) : """Runs whenever a new announcement is created , or a request is approved and posted . obj : The Announcement object"""
logger . debug ( "Announcement posted" ) if obj . notify_post : logger . debug ( "Announcement notify on" ) announcement_posted_twitter ( request , obj ) try : notify_all = obj . notify_email_all except AttributeError : notify_all = False try : if notify_all : announcement_posted_email ( request , obj , True ) else : announcement_posted_email ( request , obj ) except Exception as e : logger . error ( "Exception when emailing announcement: {}" . format ( e ) ) messages . error ( request , "Exception when emailing announcement: {}" . format ( e ) ) raise e else : logger . debug ( "Announcement notify off" )
def acls ( self ) : """The instance bound ACLs operations layer ."""
if self . _acls is None : self . _acls = InstanceAcls ( instance = self ) return self . _acls
def objects_list ( self , bucket , prefix = None , delimiter = None , projection = 'noAcl' , versions = False , max_results = 0 , page_token = None ) : """Issues a request to retrieve information about an object . Args : bucket : the name of the bucket . prefix : an optional key prefix . delimiter : an optional key delimiter . projection : the projection of the objects to retrieve . versions : whether to list each version of a file as a distinct object . max _ results : an optional maximum number of objects to retrieve . page _ token : an optional token to continue the retrieval . Returns : A parsed list of object information dictionaries . Raises : Exception if there is an error performing the operation ."""
if max_results == 0 : max_results = Api . _MAX_RESULTS args = { 'maxResults' : max_results } if prefix is not None : args [ 'prefix' ] = prefix if delimiter is not None : args [ 'delimiter' ] = delimiter if projection is not None : args [ 'projection' ] = projection if versions : args [ 'versions' ] = 'true' if page_token is not None : args [ 'pageToken' ] = page_token url = Api . _ENDPOINT + ( Api . _OBJECT_PATH % ( bucket , '' ) ) return google . datalab . utils . Http . request ( url , args = args , credentials = self . _credentials )
def wait_for_running ( self ) : """Waits for found servers to be operational"""
self . server_attrs = self . consul . find_running ( self . server_attrs , self . launch_timeout_s , )
def get_forecast_summary ( self , include_map = False ) : """get a summary of the forecast uncertainty Parameters include _ map : bool if True , add the prior and posterior expectations and report standard deviation instead of variance Returns pandas . DataFrame : pandas . DataFrame dataframe of prior , posterior variances and percent uncertainty reduction of each parameter Note this is the primary method for accessing forecast uncertainty estimates - use this ! Example ` ` > > > import matplotlib . pyplot as plt ` ` ` ` > > > import pyemu ` ` This usage assumes you have set the ` ` + + forecasts ( ) ` ` argument in the control file : ` ` > > > sc = pyemu . Schur ( jco = " pest . jcb " ) ` ` or , you can pass the forecasts directly , assuming the forecasts are names of zero - weight observations : ` ` > > > sc = pyemu . Schur ( jco = " pest . jcb " , forecasts = [ " fore1 " , " fore2 " ] ) ` ` ` ` > > > fore _ sum = sc . get _ forecast _ summary ( ) ` ` ` ` > > > fore _ sum . plot ( kind = " bar " ) ` ` ` ` > > > plt . show ( ) ` `"""
sum = { "prior_var" : [ ] , "post_var" : [ ] , "percent_reduction" : [ ] } for forecast in self . prior_forecast . keys ( ) : pr = self . prior_forecast [ forecast ] pt = self . posterior_forecast [ forecast ] ur = 100.0 * ( 1.0 - ( pt / pr ) ) sum [ "prior_var" ] . append ( pr ) sum [ "post_var" ] . append ( pt ) sum [ "percent_reduction" ] . append ( ur ) df = pd . DataFrame ( sum , index = self . prior_forecast . keys ( ) ) if include_map : df . loc [ : , "prior_stdev" ] = df . pop ( "prior_var" ) . apply ( np . sqrt ) df . loc [ : , "post_stdev" ] = df . pop ( "post_var" ) . apply ( np . sqrt ) df . pop ( "percent_reduction" ) forecast_map = self . map_forecast_estimate df . loc [ : , "prior_expt" ] = forecast_map . prior_expt df . loc [ : , "post_expt" ] = forecast_map . post_expt return df return pd . DataFrame ( sum , index = self . prior_forecast . keys ( ) )
def median ( series ) : """Returns the median value of a series . Args : series ( pandas . Series ) : column to summarize ."""
if np . issubdtype ( series . dtype , np . number ) : return series . median ( ) else : return np . nan
async def handle_frame ( self , frame ) : """Handle incoming API frame , return True if this was the expected frame ."""
if not isinstance ( frame , FrameSetUTCConfirmation ) : return False self . success = True return True
def xpathNextFollowing ( self , cur ) : """Traversal function for the " following " direction The following axis contains all nodes in the same document as the context node that are after the context node in document order , excluding any descendants and excluding attribute nodes and namespace nodes ; the nodes are ordered in document order"""
if cur is None : cur__o = None else : cur__o = cur . _o ret = libxml2mod . xmlXPathNextFollowing ( self . _o , cur__o ) if ret is None : raise xpathError ( 'xmlXPathNextFollowing() failed' ) __tmp = xmlNode ( _obj = ret ) return __tmp
def configure_app ( dispatcher , app : web . Application , path = DEFAULT_WEB_PATH , route_name = DEFAULT_ROUTE_NAME ) : """You can prepare web . Application for working with webhook handler . : param dispatcher : Dispatcher instance : param app : : class : ` aiohttp . web . Application ` : param path : Path to your webhook . : param route _ name : Name of webhook handler route : return :"""
app . router . add_route ( '*' , path , WebhookRequestHandler , name = route_name ) app [ BOT_DISPATCHER_KEY ] = dispatcher
def get_feeds_url ( blog_page , root_page ) : """Get the feeds urls a blog page instance . It will use an url or another depending if blog _ page is the root page ."""
if root_page == blog_page : return reverse ( 'blog_page_feed' ) else : blog_path = strip_prefix_and_ending_slash ( blog_page . specific . last_url_part ) return reverse ( 'blog_page_feed_slug' , kwargs = { 'blog_path' : blog_path } )
def read_files ( * files ) : """Read files into setup"""
text = "" for single_file in files : content = read ( single_file ) text = text + content + "\n" return text
def languages2marc ( self , key , value ) : """Populate the ` ` 041 ` ` MARC field ."""
return { 'a' : pycountry . languages . get ( alpha_2 = value ) . name . lower ( ) }
def classpath_by_targets ( cls , targets , classpath_products , confs = ( 'default' , ) ) : """Return classpath entries grouped by their targets for the given ` targets ` . : param targets : The targets to lookup classpath products for . : param ClasspathProducts classpath _ products : Product containing classpath elements . : param confs : The list of confs for use by this classpath . : returns : The ordered ( target , classpath ) mappings . : rtype : OrderedDict"""
classpath_target_tuples = classpath_products . get_product_target_mappings_for_targets ( targets ) filtered_items_iter = filter ( cls . _accept_conf_filter ( confs , lambda x : x [ 0 ] [ 0 ] ) , classpath_target_tuples ) # group ( classpath _ entry , target ) tuples by targets target_to_classpath = OrderedDict ( ) for classpath_entry , target in filtered_items_iter : _ , entry = classpath_entry if not target in target_to_classpath : target_to_classpath [ target ] = [ ] target_to_classpath [ target ] . append ( entry ) return target_to_classpath
def get_reward_function ( self ) : """Returns the reward function as nested dict in the case of table - type parameter and a nested structure in case of decision diagram parameter Example > > > reader = PomdpXReader ( ' Test _ PomdpX . xml ' ) > > > reader . get _ reward _ function ( ) [ { ' Var ' : ' reward _ rover ' , ' Parent ' : [ ' action _ rover ' , ' rover _ 0 ' , ' rock _ 0 ' ] , ' Type ' : ' TBL ' , ' Parameter ' : [ { ' Instance ' : [ ' ame ' , ' s1 ' , ' * ' ] , ' ValueTable ' : [ ' 10 ' ] } ,"""
reward_function = [ ] for variable in self . network . findall ( 'RewardFunction' ) : for var in variable . findall ( 'Func' ) : func = defaultdict ( list ) func [ 'Var' ] = var . find ( 'Var' ) . text func [ 'Parent' ] = var . find ( 'Parent' ) . text . split ( ) if not var . find ( 'Parameter' ) . get ( 'type' ) : func [ 'Type' ] = 'TBL' else : func [ 'Type' ] = var . find ( 'Parameter' ) . get ( 'type' ) func [ 'Parameter' ] = self . get_parameter ( var ) reward_function . append ( func ) return reward_function
def undo ( self , hard = False ) : """Makes last commit not exist"""
if not self . fake : return self . repo . git . reset ( 'HEAD^' , working_tree = hard ) else : click . echo ( crayons . red ( 'Faked! >>> git reset {}{}' . format ( '--hard ' if hard else '' , 'HEAD^' ) ) ) return 0
def query ( self , query ) : '''Returns an iterable of objects matching criteria expressed in ` query ` De - serializes values on the way out , using a : ref : ` deserialized _ gen ` to avoid incurring the cost of de - serializing all data at once , or ever , if iteration over results does not finish ( subject to order generator constraint ) . Args : query : Query object describing the objects to return . Raturns : iterable cursor with all objects matching criteria'''
# run the query on the child datastore cursor = self . child_datastore . query ( query ) # chain the deserializing generator to the cursor ' s result set iterable cursor . _iterable = deserialized_gen ( self . serializer , cursor . _iterable ) return cursor
def slugify ( text ) : """Returns a slug of given text , normalizing unicode data for file - safe strings . Used for deciding where to write images to disk . Parameters text : string The string to slugify Returns slug : string A normalized slug representation of the text . . seealso : : http : / / yashchandra . com / 2014/05/08 / how - to - generate - clean - url - or - a - slug - in - python /"""
slug = re . sub ( r'[^\w]+' , ' ' , text ) slug = "-" . join ( slug . lower ( ) . strip ( ) . split ( ) ) return slug
def save_dataframes ( self , outdir , prefix = 'df_' ) : """Save all attributes that start with " df " into a specified directory . Args : outdir ( str ) : Path to output directory prefix ( str ) : Prefix that dataframe attributes start with"""
# Get list of attributes that start with " df _ " dfs = list ( filter ( lambda x : x . startswith ( prefix ) , dir ( self ) ) ) counter = 0 for df in dfs : outpath = ssbio . utils . outfile_maker ( inname = df , outext = '.csv' , outdir = outdir ) my_df = getattr ( self , df ) if not isinstance ( my_df , pd . DataFrame ) : raise TypeError ( '{}: object is not a Pandas DataFrame' . format ( df ) ) if my_df . empty : log . debug ( '{}: empty dataframe, not saving' . format ( df ) ) else : my_df . to_csv ( outpath ) log . debug ( '{}: saved dataframe' . format ( outpath ) ) counter += 1 log . debug ( 'Saved {} dataframes at {}' . format ( counter , outdir ) )
def etf_swap_in ( self , etf_name , amount , _async = False ) : """换入etf : param etf _ name : etf基金名称 : param amount : 数量 : param _ async : : return :"""
params = { } path = '/etf/swap/in' params [ 'etf_name' ] = etf_name params [ 'amount' ] = amount return api_key_post ( params , path , _async = _async )
def Runs ( self ) : """Return all the run names in the ` EventMultiplexer ` . Returns : { runName : { scalarValues : [ tagA , tagB , tagC ] , graph : true , meta _ graph : true } }"""
with self . _accumulators_mutex : # To avoid nested locks , we construct a copy of the run - accumulator map items = list ( six . iteritems ( self . _accumulators ) ) return { run_name : accumulator . Tags ( ) for run_name , accumulator in items }
def by_external_id_and_provider ( cls , external_id , provider_name , db_session = None ) : """Returns ExternalIdentity instance based on search params : param external _ id : : param provider _ name : : param db _ session : : return : ExternalIdentity"""
db_session = get_db_session ( db_session ) query = db_session . query ( cls . model ) query = query . filter ( cls . model . external_id == external_id ) query = query . filter ( cls . model . provider_name == provider_name ) return query . first ( )
def AddEntry ( self , thing , label = None , style = None ) : """Add an entry to the legend . If ` label ` is None , ` thing . GetTitle ( ) ` will be used as the label . If ` style ` is None , ` thing . legendstyle ` is used if present , otherwise ` P ` ."""
if isinstance ( thing , HistStack ) : things = thing else : things = [ thing ] for thing in things : if getattr ( thing , 'inlegend' , True ) : thing_label = thing . GetTitle ( ) if label is None else label thing_style = getattr ( thing , 'legendstyle' , 'P' ) if style is None else style super ( Legend , self ) . AddEntry ( thing , thing_label , thing_style ) keepalive ( self , thing )
def get_ref ( profile , ref ) : """Fetch a ref . Args : profile A profile generated from ` ` simplygithub . authentication . profile ` ` . Such profiles tell this module ( i ) the ` ` repo ` ` to connect to , and ( ii ) the ` ` token ` ` to connect with . ref The ref to fetch , e . g . , ` ` heads / my - feature - branch ` ` . Returns A dict with data about the ref ."""
resource = "/refs/" + ref data = api . get_request ( profile , resource ) return prepare ( data )
def from_array ( array ) : """Deserialize a new InlineKeyboardMarkup from a given dictionary . : return : new InlineKeyboardMarkup instance . : rtype : InlineKeyboardMarkup"""
if array is None or not array : return None # end if assert_type_or_raise ( array , dict , parameter_name = "array" ) data = { } data [ 'inline_keyboard' ] = InlineKeyboardButton . from_array_list ( array . get ( 'inline_keyboard' ) , list_level = 2 ) instance = InlineKeyboardMarkup ( ** data ) instance . _raw = array return instance
def from_xml ( content_types_xml ) : """Return a new | _ ContentTypeMap | instance populated with the contents of * content _ types _ xml * ."""
types_elm = parse_xml ( content_types_xml ) ct_map = _ContentTypeMap ( ) for o in types_elm . override_lst : ct_map . _add_override ( o . partName , o . contentType ) for d in types_elm . default_lst : ct_map . _add_default ( d . extension , d . contentType ) return ct_map
def add_logger ( self , name , address , conn_type , log_dir_path = None , ** kwargs ) : '''Add a new stream capturer to the manager . Add a new stream capturer to the manager with the provided configuration details . If an existing capturer is monitoring the same address the new handler will be added to it . Args : name : A string defining the new capturer ' s name . address : A tuple containing address data for the capturer . Check the : class : ` SocketStreamCapturer ` documentation for what is required . conn _ type : A string defining the connection type . Check the : class : ` SocketStreamCapturer ` documentation for a list of valid options . log _ dir _ path : An optional path defining the directory where the capturer should write its files . If this isn ' t provided the root log directory from the manager configuration is used .'''
capture_handler_conf = kwargs if not log_dir_path : log_dir_path = self . _mngr_conf [ 'root_log_directory' ] log_dir_path = os . path . normpath ( os . path . expanduser ( log_dir_path ) ) capture_handler_conf [ 'log_dir' ] = log_dir_path capture_handler_conf [ 'name' ] = name if 'rotate_log' not in capture_handler_conf : capture_handler_conf [ 'rotate_log' ] = True transforms = [ ] if 'pre_write_transforms' in capture_handler_conf : for transform in capture_handler_conf [ 'pre_write_transforms' ] : if isinstance ( transform , str ) : if globals ( ) . has_key ( transform ) : transforms . append ( globals ( ) . get ( transform ) ) else : msg = ( 'Unable to load data transformation ' '"{}" for handler "{}"' ) . format ( transform , capture_handler_conf [ 'name' ] ) log . warn ( msg ) elif hasattr ( transform , '__call__' ) : transforms . append ( transform ) else : msg = ( 'Unable to determine how to load data transform "{}"' ) . format ( transform ) log . warn ( msg ) capture_handler_conf [ 'pre_write_transforms' ] = transforms address_key = str ( address ) if address_key in self . _stream_capturers : capturer = self . _stream_capturers [ address_key ] [ 0 ] capturer . add_handler ( capture_handler_conf ) return socket_logger = SocketStreamCapturer ( capture_handler_conf , address , conn_type ) greenlet = gevent . spawn ( socket_logger . socket_monitor_loop ) self . _stream_capturers [ address_key ] = ( socket_logger , greenlet ) self . _pool . add ( greenlet )
def site ( parser , token ) : '''Returns a full absolute URL based on the current site . This template tag takes exactly the same paramters as url template tag .'''
node = url ( parser , token ) return SiteUrlNode ( view_name = node . view_name , args = node . args , kwargs = node . kwargs , asvar = node . asvar )
def file_is_present ( self , file_path ) : """check if file ' file _ path ' is present , raises IOError if file _ path is not a file : param file _ path : str , path to the file : return : True if file exists , False if file does not exist"""
p = self . p ( file_path ) if not os . path . exists ( p ) : return False if not os . path . isfile ( p ) : raise IOError ( "%s is not a file" % file_path ) return True
def event_at ( self , when , data_tuple ) : """Schedule an event to be emitted at a certain time . : param when : an absolute timestamp : param data _ tuple : a 2 - tuple ( flavor , data ) : return : an event object , useful for cancelling ."""
return self . _base . event_at ( when , self . make_event_data ( * data_tuple ) )
def parse_raw_list_data ( data , proxy_type = 'http' , proxy_userpwd = None ) : """Iterate over proxy servers found in the raw data"""
if not isinstance ( data , six . text_type ) : data = data . decode ( 'utf-8' ) for orig_line in data . splitlines ( ) : line = orig_line . strip ( ) . replace ( ' ' , '' ) if line and not line . startswith ( '#' ) : try : host , port , username , password = parse_proxy_line ( line ) except InvalidProxyLine as ex : logger . error ( ex ) else : if username is None and proxy_userpwd is not None : username , password = proxy_userpwd . split ( ':' ) yield Proxy ( host , port , username , password , proxy_type )
def set ( self , section , option , value = None ) : """Extends : meth : ` ~ configparser . ConfigParser . set ` by auto formatting byte strings into unicode strings ."""
if isinstance ( section , bytes ) : section = section . decode ( 'utf8' ) if isinstance ( option , bytes ) : option = option . decode ( 'utf8' ) if isinstance ( value , bytes ) : value = value . decode ( 'utf8' ) return super ( VSGConfigParser , self ) . set ( section , option , value )
def canonicalize_edge ( edge_data : EdgeData ) -> Tuple [ str , Optional [ Tuple ] , Optional [ Tuple ] ] : """Canonicalize the edge to a tuple based on the relation , subject modifications , and object modifications ."""
return ( edge_data [ RELATION ] , _canonicalize_edge_modifications ( edge_data . get ( SUBJECT ) ) , _canonicalize_edge_modifications ( edge_data . get ( OBJECT ) ) , )
def cli ( env , access_id , password ) : """Changes a password for a volume ' s access . access id is the allowed _ host _ id from slcli block access - list"""
block_manager = SoftLayer . BlockStorageManager ( env . client ) result = block_manager . set_credential_password ( access_id = access_id , password = password ) if result : click . echo ( 'Password updated for %s' % access_id ) else : click . echo ( 'FAILED updating password for %s' % access_id )
def addPlayer ( settings ) : """define a new PlayerRecord setting and save to disk file"""
_validate ( settings ) player = PlayerRecord ( settings ) player . save ( ) getKnownPlayers ( ) [ player . name ] = player return player
def sortedneighbourhood ( self , * args , ** kwargs ) : """Add a Sorted Neighbourhood Index . Shortcut of : class : ` recordlinkage . index . SortedNeighbourhood ` : : from recordlinkage . index import SortedNeighbourhood indexer = recordlinkage . Index ( ) indexer . add ( SortedNeighbourhood ( ) )"""
indexer = SortedNeighbourhood ( * args , ** kwargs ) self . add ( indexer ) return self
def create_downloadjob ( entry , domain , config ) : """Create download jobs for all file formats from a summary file entry ."""
logging . info ( 'Checking record %r' , entry [ 'assembly_accession' ] ) full_output_dir = create_dir ( entry , config . section , domain , config . output ) symlink_path = None if config . human_readable : symlink_path = create_readable_dir ( entry , config . section , domain , config . output ) checksums = grab_checksums_file ( entry ) # TODO : Only write this when the checksums file changed with open ( os . path . join ( full_output_dir , 'MD5SUMS' ) , 'w' ) as handle : handle . write ( checksums ) parsed_checksums = parse_checksums ( checksums ) download_jobs = [ ] for fmt in config . file_format : try : if has_file_changed ( full_output_dir , parsed_checksums , fmt ) : download_jobs . append ( download_file_job ( entry , full_output_dir , parsed_checksums , fmt , symlink_path ) ) elif need_to_create_symlink ( full_output_dir , parsed_checksums , fmt , symlink_path ) : download_jobs . append ( create_symlink_job ( full_output_dir , parsed_checksums , fmt , symlink_path ) ) except ValueError as err : logging . error ( err ) return download_jobs
def _parse_seq_header ( line ) : """Unique ID , head / tail lengths and taxonomy info from a sequence header . The description is the part of the FASTA / CMA sequence header starting after the first space ( i . e . excluding ID ) , to the end of the line . This function looks inside the first ' { . . . } ' pair to extract info . Ex : > consensus seq > gi | 15606894 | ref | NP _ 214275.1 | { | 2(244 ) | < Aquificae ( B ) > } DNA polymerase III gamma subunit [ Aquifex aeolicus VF5 ] > gi | 2984127 | gb | AAC07663.1 | DNA polymerase III gamma subunit [ Aquifex aeolicus VF5 ] > gi | 75 > gi | 3212262 | pdb | 1A2K | C { < Chordata ( M ) > } Chain C , Gdpran - Ntf2 Complex > gi | 3212263 | pdb | 1A2K | D Chain D , Gdpran - Ntf2 Complex > gi | 3212264 | pdb | 1A2K | E Chain E , Gdpran - Ntf2 Complex > gi | 5542273 | pdb | 1IBR | A C"""
# ENH : use the two functions in esbglib . parseutils # or , move one or both of those functions into here _parts = line [ 1 : ] . split ( None , 1 ) rec_id = _parts [ 0 ] descr = _parts [ 1 ] if _parts [ 1 : ] else '' # Database cross references dbxrefs = { } if '|' in rec_id : id_gen = iter ( rec_id . rstrip ( '|' ) . split ( '|' ) ) for key in id_gen : try : dbxrefs [ key ] = next ( id_gen ) except StopIteration : break # Head / tail lengths and taxonomy codes headlen = taillen = None phylum = taxchar = '' if descr . startswith ( '{' ) : _deets , description = descr [ 1 : ] . split ( '}' , 1 ) match = re . search ( r""" (?: \| (?P<headlen> \d+) \( (?P<taillen> \d+) \) \| )? (?: < (?P<phylum> .+?) \( (?P<taxchar> \w) \) > )? """ , _deets , re . VERBOSE ) if match : headlen , taillen , phylum , taxchar = match . groups ( ) if headlen is not None : headlen = int ( headlen ) if taillen is not None : taillen = int ( taillen ) if phylum is None : phylum = '' if taxchar is None : taxchar = '' else : logging . warn ( "Couldn't match head/tail: %s" , _deets ) else : description = descr # TODO - return a dictionary here , update it in _ parse _ sequences return rec_id , dbxrefs , headlen , taillen , phylum , taxchar , description
def generate ( self , * args , ** kwargs ) : """Implementation for generate method from ReportBase . Generates the xml and saves the report in Junit xml format . : param args : 1 argument , filename is used . : param kwargs : Not used : return : Nothing"""
xmlstr = str ( self ) filename = args [ 0 ] with open ( filename , 'w' ) as fil : fil . write ( xmlstr ) with open ( self . get_latest_filename ( 'junit.xml' ) , "w" ) as latest_report : latest_report . write ( xmlstr )
def set_state ( self , entity_id , new_state , ** kwargs ) : "Updates or creates the current state of an entity ."
return remote . set_state ( self . api , new_state , ** kwargs )
def kill ( self ) : """Kill the running process ( if there is one ) : return : void"""
if self . running ( ) : if self . verbose : print ( 'Killing {} with PID {}' . format ( self . exe , self . process . pid ) ) self . process . kill ( ) # Threads * should * tidy up after themselves , but we do it explicitly self . join_threads ( )
def get_storage_hash ( storage ) : """Return a hex string hash for a storage object ( or string containing ' full . path . ClassName ' referring to a storage object ) ."""
# If storage is wrapped in a lazy object we need to get the real thing . if isinstance ( storage , LazyObject ) : if storage . _wrapped is None : storage . _setup ( ) storage = storage . _wrapped if not isinstance ( storage , six . string_types ) : storage_cls = storage . __class__ storage = '%s.%s' % ( storage_cls . __module__ , storage_cls . __name__ ) return hashlib . md5 ( storage . encode ( 'utf8' ) ) . hexdigest ( )
def as_smearing ( cls , obj ) : """Constructs an instance of ` Smearing ` from obj . Accepts obj in the form : * Smearing instance * " name : tsmear " e . g . " gaussian : 0.004 " ( Hartree units ) * " name : tsmear units " e . g . " gaussian : 0.1 eV " * None - - > no smearing"""
if obj is None : return Smearing . nosmearing ( ) if isinstance ( obj , cls ) : return obj # obj is a string if obj == "nosmearing" : return cls . nosmearing ( ) else : obj , tsmear = obj . split ( ":" ) obj . strip ( ) occopt = cls . _mode2occopt [ obj ] try : tsmear = float ( tsmear ) except ValueError : tsmear , unit = tsmear . split ( ) tsmear = units . Energy ( float ( tsmear ) , unit ) . to ( "Ha" ) return cls ( occopt , tsmear )
def insert ( self , document ) : """Insert a new document into the table . : param document : the document to insert : returns : the inserted document ' s ID"""
doc_id = self . _get_doc_id ( document ) data = self . _read ( ) data [ doc_id ] = dict ( document ) self . _write ( data ) return doc_id
def addfield ( self , pkt , s , val ) : """Add an internal value to a string"""
if self . adjust ( pkt , self . length_of ) == 2 : return s + struct . pack ( self . fmt [ 0 ] + "H" , val ) elif self . adjust ( pkt , self . length_of ) == 8 : return s + struct . pack ( self . fmt [ 0 ] + "Q" , val ) else : return s
def send_invite ( self , ** kwargs ) : """Invite new subaccount . Returns True if success . : Example : s = client . subaccounts . create ( email = " johndoe @ yahoo . com " , role = " A " ) : param str email : Subaccount email . Required . : param str role : Subaccount role : ` A ` for administrator or ` U ` for regular user . Required ."""
resp , _ = self . request ( "POST" , self . uri , data = kwargs ) return resp . status == 204
def unlock ( self , passphrase , encrypted_seed = None ) : """Unlock the Wallet by decrypting the primary _ private _ seed with the supplied passphrase . Once unlocked , the private seed is accessible in memory and calls to ` account . pay ` will succeed . This is a necessary step for creating transactions . Args : passphrase ( str ) : The passphrase the User used to encrypt this wallet . encrypted _ seed ( dict ) : A dictionary of the form { ' ciphertext ' : longhexvalue , ' iterations ' : integer of pbkdf2 derivations , ' nonce ' : 24 - byte hex value ' salt ' : 16 - byte hex value } this dict represents an private seed ( not a master key ) encrypted with the ` passphrase ` using pbkdf2 . You can obtain this value with wallet . generate . If this value is supplied , it overwrites ( locally only ) the encrypted primary _ private _ seed value , allowing you to load in a primary key that you didn ' t store with Gem . Note that the key MUST match the pubkey that this wallet was created with . Returns : self"""
wallet = self . resource if not encrypted_seed : encrypted_seed = wallet . primary_private_seed try : if encrypted_seed [ 'nonce' ] : primary_seed = NaclPassphraseBox . decrypt ( passphrase , encrypted_seed ) else : primary_seed = PassphraseBox . decrypt ( passphrase , encrypted_seed ) except : raise InvalidPassphraseError ( ) self . multi_wallet = MultiWallet ( private_seeds = { 'primary' : primary_seed } , public = { 'cosigner' : wallet . cosigner_public_seed , 'backup' : wallet . backup_public_seed } ) return self
def transaction ( self , compare , success = None , failure = None ) : """Perform a transaction . Example usage : . . code - block : : python etcd . transaction ( compare = [ etcd . transactions . value ( ' / doot / testing ' ) = = ' doot ' , etcd . transactions . version ( ' / doot / testing ' ) > 0, success = [ etcd . transactions . put ( ' / doot / testing ' , ' success ' ) , failure = [ etcd . transactions . put ( ' / doot / testing ' , ' failure ' ) , : param compare : A list of comparisons to make : param success : A list of operations to perform if all the comparisons are true : param failure : A list of operations to perform if any of the comparisons are false : return : A tuple of ( operation status , responses )"""
compare = [ c . build_message ( ) for c in compare ] success_ops = self . _ops_to_requests ( success ) failure_ops = self . _ops_to_requests ( failure ) transaction_request = etcdrpc . TxnRequest ( compare = compare , success = success_ops , failure = failure_ops ) txn_response = self . kvstub . Txn ( transaction_request , self . timeout , credentials = self . call_credentials , metadata = self . metadata ) responses = [ ] for response in txn_response . responses : response_type = response . WhichOneof ( 'response' ) if response_type in [ 'response_put' , 'response_delete_range' , 'response_txn' ] : responses . append ( response ) elif response_type == 'response_range' : range_kvs = [ ] for kv in response . response_range . kvs : range_kvs . append ( ( kv . value , KVMetadata ( kv , txn_response . header ) ) ) responses . append ( range_kvs ) return txn_response . succeeded , responses
def create_logger ( ) : """Initial the global logger variable"""
global logger formatter = logging . Formatter ( '%(asctime)s|%(levelname)s|%(message)s' ) handler = TimedRotatingFileHandler ( log_file , when = "midnight" , interval = 1 ) handler . setFormatter ( formatter ) handler . setLevel ( log_level ) handler . suffix = "%Y-%m-%d" logger = logging . getLogger ( "sacplus" ) logger . setLevel ( log_level ) logger . addHandler ( handler )
def get_repo_content ( path ) : """List of files in a repo ( path or zip ) Parameters path : string or pathlib . Path Returns Returns a namedtuple with . iszip and . filelist The path in filelist are pure strings ."""
path = Path ( path ) if zipfile . is_zipfile ( str ( path ) ) : with zipfile . ZipFile ( str ( path ) ) as zz : filelist = [ info . filename for info in zz . infolist ( ) ] iszip = True else : iszip = False filelist = [ str ( f ) for f in path . glob ( '**/*' ) if f . is_file ( ) ] return namedtuple ( 'repocontent' , [ 'iszip' , 'filelist' ] ) ( iszip , filelist )
def rewind ( self , stop ) : """Used if you need to rewind stack to a particular frame . : param predicate : Callable used to stop unwind , e . g . : . . code : : def stop ( frame ) : return True : return : A context object used to restore the stack ."""
for i , frame in enumerate ( reversed ( self . stack ) ) : if stop ( frame ) : frames = self . stack [ - i : ] break else : raise RewindDidNotStop ( ) del self . stack [ - i : ] if self . src_path is not None : for frame in frames : if 'src_path' in frame : break if 'src' in frame : self . src_path . pop ( ) return Close ( functools . partial ( self . restore , frames ) )
def add_note ( path , filename = "note.txt" ) : """Opens a txt file at the given path where user can add and save notes . Args : path ( str ) : Directory where note will be saved . filename ( str ) : Name of note . Defaults to " note . txt " """
path = os . path . expanduser ( path ) assert os . path . isdir ( path ) , "{} is not a valid directory." . format ( path ) filepath = os . path . join ( path , filename ) exists = os . path . isfile ( filepath ) try : subprocess . call ( [ EDITOR , filepath ] ) except Exception as exc : logger . error ( "Editing note failed!" ) raise exc if exists : print ( "Note updated at:" , filepath ) else : print ( "Note created at:" , filepath )
def S2playground ( extent ) : """Return a segmentlist identifying the S2 playground times within the interval defined by the segment extent . Example : > > > from pycbc _ glue import segments > > > S2playground ( segments . segment ( 87400000 , 874010000 ) ) [ segment ( 874000013 , 874000613 ) , segment ( 874006383 , 874006983 ) ]"""
lo = int ( extent [ 0 ] ) lo -= ( lo - 729273613 ) % 6370 hi = int ( extent [ 1 ] ) + 1 return segments . segmentlist ( segments . segment ( t , t + 600 ) for t in range ( lo , hi , 6370 ) ) & segments . segmentlist ( [ extent ] )
def remove_tmp_prefix_from_filename ( filename ) : """Remove tmp prefix from filename ."""
if not filename . startswith ( dju_settings . DJU_IMG_UPLOAD_TMP_PREFIX ) : raise RuntimeError ( ERROR_MESSAGES [ 'filename_hasnt_tmp_prefix' ] % { 'filename' : filename } ) return filename [ len ( dju_settings . DJU_IMG_UPLOAD_TMP_PREFIX ) : ]
def watch_folder ( ) : """Main entry point . Expects one or two arguments ( the watch folder + optional destination folder ) ."""
argv = sys . argv [ 1 : ] if len ( sys . argv ) > 1 else [ ] args = arg_parser . parse_args ( sys . argv [ 1 : ] ) compiler_args = { } input_folder = os . path . realpath ( args . input_dir ) if not args . output_dir : output_folder = input_folder else : output_folder = os . path . realpath ( args . output_dir ) if args . verbose : Options . VERBOSE = True print "Watching {} at refresh interval {} seconds" . format ( input_folder , args . refresh ) if args . extension : Options . OUTPUT_EXT = args . extension if getattr ( args , 'tags' , False ) : hamlpynodes . TagNode . self_closing . update ( args . tags ) if args . input_extension : hamlpy . VALID_EXTENSIONS += args . input_extension if args . attr_wrapper : compiler_args [ 'attr_wrapper' ] = args . attr_wrapper if args . jinja : for k in ( 'ifchanged' , 'ifequal' , 'ifnotequal' , 'autoescape' , 'blocktrans' , 'spaceless' , 'comment' , 'cache' , 'localize' , 'compress' ) : del hamlpynodes . TagNode . self_closing [ k ] hamlpynodes . TagNode . may_contain . pop ( k , None ) hamlpynodes . TagNode . self_closing . update ( { 'macro' : 'endmacro' , 'call' : 'endcall' , 'raw' : 'endraw' } ) hamlpynodes . TagNode . may_contain [ 'for' ] = 'else' while True : try : _watch_folder ( input_folder , output_folder , compiler_args ) time . sleep ( args . refresh ) except KeyboardInterrupt : # allow graceful exit ( no stacktrace output ) sys . exit ( 0 )
def _init_training ( self ) : # pylint : disable = redefined - variable - type """Classes needed during training ."""
if self . check : self . backprop = CheckedBackprop ( self . network , self . problem . cost ) else : self . backprop = BatchBackprop ( self . network , self . problem . cost ) self . momentum = Momentum ( ) self . decent = GradientDecent ( ) self . decay = WeightDecay ( ) self . tying = WeightTying ( * self . problem . weight_tying ) self . weights = self . tying ( self . weights )
def formatLij ( Lij0 , Ne ) : """This function transforms a list of laser conections of the form [ i , j , [ l1 , l2 , . . . ] ] between states i and j by lasers l1 , l2 , . . . into a Ne x Ne matrix whose elements are the lasers connecting the corresponding indices ."""
# We create Lij as a matrix of lists of laser indices global Lij Lij = [ ] for i in range ( Ne ) : fila = [ ] for j in range ( Ne ) : band = False for tri in Lij0 : if [ i + 1 , j + 1 ] == tri [ : 2 ] : band = True break elif [ j + 1 , i + 1 ] == tri [ : 2 ] : band = True break if band : fila += [ tri [ 2 ] ] else : fila += [ [ ] ] Lij += [ fila ] return Lij
def dictdict_to_listdict ( dictgraph ) : """Transforms a dict - dict graph representation into a adjacency dictionary representation ( list - dict ) : param dictgraph : dictionary mapping vertices to dictionary such that dictgraph [ u ] [ v ] is weight of arc ( u , v ) : complexity : linear : returns : tuple with graph ( listdict ) , name _ to _ node ( dict ) , node _ to _ name ( list )"""
n = len ( dictgraph ) # vertices node_to_name = [ name for name in dictgraph ] # bijection indices < - > names node_to_name . sort ( ) # to make it more readable name_to_node = { } for i in range ( n ) : name_to_node [ node_to_name [ i ] ] = i sparse = [ { } for _ in range ( n ) ] # build sparse graph for u in dictgraph : for v in dictgraph [ u ] : sparse [ name_to_node [ u ] ] [ name_to_node [ v ] ] = dictgraph [ u ] [ v ] return sparse , name_to_node , node_to_name
def process_edge_dijkstra ( self , current , neighbor , pred , q , component ) : '''API : process _ edge _ dijkstra ( self , current , neighbor , pred , q , component ) Description : Used by search ( ) method if the algo argument is ' Dijkstra ' . Processes edges along Dijkstra ' s algorithm . User does not need to call this method directly . Input : current : Name of the current node . neighbor : Name of the neighbor node . pred : Predecessor tree . q : Data structure that holds nodes to be processed in a queue . component : component number . Post : ' color ' attribute of nodes and edges may change .'''
if current is None : self . get_node ( neighbor ) . set_attr ( 'color' , 'red' ) self . get_node ( neighbor ) . set_attr ( 'label' , 0 ) q . push ( neighbor , 0 ) self . display ( ) self . get_node ( neighbor ) . set_attr ( 'color' , 'black' ) return new_estimate = ( q . get_priority ( current ) + self . get_edge_attr ( current , neighbor , 'cost' ) ) if neighbor not in pred or new_estimate < q . get_priority ( neighbor ) : pred [ neighbor ] = current self . get_node ( neighbor ) . set_attr ( 'color' , 'red' ) self . get_node ( neighbor ) . set_attr ( 'label' , new_estimate ) q . push ( neighbor , new_estimate ) self . display ( ) self . get_node ( neighbor ) . set_attr ( 'color' , 'black' )
def prepare_duplicate_order_object ( manager , origin_volume , iops , tier , duplicate_size , duplicate_snapshot_size , volume_type , hourly_billing_flag = False ) : """Prepare the duplicate order to submit to SoftLayer _ Product : : placeOrder ( ) : param manager : The File or Block manager calling this function : param origin _ volume : The origin volume which is being duplicated : param iops : The IOPS for the duplicate volume ( performance ) : param tier : The tier level for the duplicate volume ( endurance ) : param duplicate _ size : The requested size for the duplicate volume : param duplicate _ snapshot _ size : The size for the duplicate snapshot space : param volume _ type : The type of the origin volume ( ' file ' or ' block ' ) : param hourly _ billing _ flag : Billing type , monthly ( False ) or hourly ( True ) : return : Returns the order object to be passed to the placeOrder ( ) method of the Product _ Order service"""
# Verify that the origin volume has not been cancelled if 'billingItem' not in origin_volume : raise exceptions . SoftLayerError ( "The origin volume has been cancelled; " "unable to order duplicate volume" ) # Verify that the origin volume has snapshot space ( needed for duplication ) if isinstance ( utils . lookup ( origin_volume , 'snapshotCapacityGb' ) , str ) : origin_snapshot_size = int ( origin_volume [ 'snapshotCapacityGb' ] ) else : raise exceptions . SoftLayerError ( "Snapshot space not found for the origin volume. " "Origin snapshot space is needed for duplication." ) # Obtain the datacenter location ID for the duplicate if isinstance ( utils . lookup ( origin_volume , 'billingItem' , 'location' , 'id' ) , int ) : location_id = origin_volume [ 'billingItem' ] [ 'location' ] [ 'id' ] else : raise exceptions . SoftLayerError ( "Cannot find origin volume's location" ) # Ensure the origin volume is STaaS v2 or higher # and supports Encryption at Rest if not _staas_version_is_v2_or_above ( origin_volume ) : raise exceptions . SoftLayerError ( "This volume cannot be duplicated since it " "does not support Encryption at Rest." ) # If no specific snapshot space was requested for the duplicate , # use the origin snapshot space size if duplicate_snapshot_size is None : duplicate_snapshot_size = origin_snapshot_size # Use the origin volume size if no size was specified for the duplicate if duplicate_size is None : duplicate_size = origin_volume [ 'capacityGb' ] # Get the appropriate package for the order # ( ' storage _ as _ a _ service ' is currently used for duplicate volumes ) package = get_package ( manager , 'storage_as_a_service' ) # Determine the IOPS or tier level for the duplicate volume , along with # the type and prices for the order origin_storage_type = origin_volume [ 'storageType' ] [ 'keyName' ] if 'PERFORMANCE' in origin_storage_type : volume_is_performance = True if iops is None : iops = int ( origin_volume . get ( 'provisionedIops' , 0 ) ) if iops <= 0 : raise exceptions . SoftLayerError ( "Cannot find origin volume's provisioned IOPS" ) # Set up the price array for the order prices = [ find_price_by_category ( package , 'storage_as_a_service' ) , find_price_by_category ( package , 'storage_' + volume_type ) , find_saas_perform_space_price ( package , duplicate_size ) , find_saas_perform_iops_price ( package , duplicate_size , iops ) , ] # Add the price code for snapshot space as well , unless 0 GB was given if duplicate_snapshot_size > 0 : prices . append ( find_saas_snapshot_space_price ( package , duplicate_snapshot_size , iops = iops ) ) elif 'ENDURANCE' in origin_storage_type : volume_is_performance = False if tier is None : tier = find_endurance_tier_iops_per_gb ( origin_volume ) # Set up the price array for the order prices = [ find_price_by_category ( package , 'storage_as_a_service' ) , find_price_by_category ( package , 'storage_' + volume_type ) , find_saas_endurance_space_price ( package , duplicate_size , tier ) , find_saas_endurance_tier_price ( package , tier ) , ] # Add the price code for snapshot space as well , unless 0 GB was given if duplicate_snapshot_size > 0 : prices . append ( find_saas_snapshot_space_price ( package , duplicate_snapshot_size , tier = tier ) ) else : raise exceptions . SoftLayerError ( "Origin volume does not have a valid storage type " "(with an appropriate keyName to indicate the " "volume is a PERFORMANCE or an ENDURANCE volume)" ) duplicate_order = { 'complexType' : 'SoftLayer_Container_Product_Order_' 'Network_Storage_AsAService' , 'packageId' : package [ 'id' ] , 'prices' : prices , 'volumeSize' : duplicate_size , 'quantity' : 1 , 'location' : location_id , 'duplicateOriginVolumeId' : origin_volume [ 'id' ] , 'useHourlyPricing' : hourly_billing_flag } if volume_is_performance : duplicate_order [ 'iops' ] = iops return duplicate_order
def RelaxNGValidateCtxt ( self , ctxt , options ) : """Use RelaxNG schema context to validate the document as it is processed . Activation is only possible before the first Read ( ) . If @ ctxt is None , then RelaxNG schema validation is deactivated ."""
if ctxt is None : ctxt__o = None else : ctxt__o = ctxt . _o ret = libxml2mod . xmlTextReaderRelaxNGValidateCtxt ( self . _o , ctxt__o , options ) return ret
def _read_config ( argv , config , current_name , queue ) : """Read the Sphinx config via multiprocessing for isolation . : param tuple argv : Arguments to pass to Sphinx . : param sphinxcontrib . versioning . lib . Config config : Runtime configuration . : param str current _ name : The ref name of the current version being built . : param multiprocessing . queues . Queue queue : Communication channel to parent process ."""
# Patch . EventHandlers . ABORT_AFTER_READ = queue # Run . _build ( argv , config , Versions ( list ( ) ) , current_name , False )
def iscomplex ( polynomial ) : """Returns whether the polynomial has complex coefficients : param polynomial : Polynomial of noncommutive variables . : type polynomial : : class : ` sympy . core . expr . Expr ` . : returns : bool - - whether there is a complex coefficient ."""
if isinstance ( polynomial , ( int , float ) ) : return False if isinstance ( polynomial , complex ) : return True polynomial = polynomial . expand ( ) for monomial in polynomial . as_coefficients_dict ( ) : for variable in monomial . as_coeff_mul ( ) [ 1 ] : if isinstance ( variable , complex ) or variable == I : return True return False
def reset_failed ( self , pk ) : """reset failed counter : param pk : : return :"""
TriggerService . objects . filter ( consumer__name__id = pk ) . update ( consumer_failed = 0 , provider_failed = 0 ) TriggerService . objects . filter ( provider__name__id = pk ) . update ( consumer_failed = 0 , provider_failed = 0 )
def _netstat_route_sunos ( ) : '''Return netstat routing information for SunOS'''
ret = [ ] cmd = 'netstat -f inet -rn | tail +5' out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True ) for line in out . splitlines ( ) : comps = line . split ( ) ret . append ( { 'addr_family' : 'inet' , 'destination' : comps [ 0 ] , 'gateway' : comps [ 1 ] , 'netmask' : '' , 'flags' : comps [ 2 ] , 'interface' : comps [ 5 ] if len ( comps ) >= 6 else '' } ) cmd = 'netstat -f inet6 -rn | tail +5' out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True ) for line in out . splitlines ( ) : comps = line . split ( ) ret . append ( { 'addr_family' : 'inet6' , 'destination' : comps [ 0 ] , 'gateway' : comps [ 1 ] , 'netmask' : '' , 'flags' : comps [ 2 ] , 'interface' : comps [ 5 ] if len ( comps ) >= 6 else '' } ) return ret
def profile_form_factory ( ) : """Create a profile form ."""
if current_app . config [ 'USERPROFILES_EMAIL_ENABLED' ] : return EmailProfileForm ( formdata = None , username = current_userprofile . username , full_name = current_userprofile . full_name , email = current_user . email , email_repeat = current_user . email , prefix = 'profile' , ) else : return ProfileForm ( formdata = None , obj = current_userprofile , prefix = 'profile' , )
def getKerningMutator ( self , pairs = None ) : """Return a kerning mutator , collect the sources , build mathGlyphs . If no pairs are given : calculate the whole table . If pairs are given then query the sources for a value and make a mutator only with those values ."""
if self . _kerningMutator and pairs == self . _kerningMutatorPairs : return self . _kerningMutator kerningItems = [ ] if pairs is None : for sourceDescriptor in self . sources : if sourceDescriptor . layerName is not None : continue if not sourceDescriptor . muteKerning : loc = Location ( sourceDescriptor . location ) sourceFont = self . fonts [ sourceDescriptor . name ] if sourceFont is None : continue # this makes assumptions about the groups of all sources being the same . kerningItems . append ( ( loc , self . mathKerningClass ( sourceFont . kerning , sourceFont . groups ) ) ) else : self . _kerningMutatorPairs = pairs for sourceDescriptor in self . sources : # XXX check sourceDescriptor layerName , only foreground should contribute if sourceDescriptor . layerName is not None : continue if not os . path . exists ( sourceDescriptor . path ) : continue if not sourceDescriptor . muteKerning : sourceFont = self . fonts [ sourceDescriptor . name ] if sourceFont is None : continue loc = Location ( sourceDescriptor . location ) # XXX can we get the kern value from the fontparts kerning object ? kerningItem = self . mathKerningClass ( sourceFont . kerning , sourceFont . groups ) sparseKerning = { } for pair in pairs : v = kerningItem . get ( pair ) if v is not None : sparseKerning [ pair ] = v kerningItems . append ( ( loc , self . mathKerningClass ( sparseKerning ) ) ) bias , self . _kerningMutator = self . getVariationModel ( kerningItems , axes = self . serializedAxes , bias = self . newDefaultLocation ( ) ) return self . _kerningMutator
def run ( self ) : """Overrides parent method to implement thread ' s functionality ."""
while True : # make sure to run at least once before exiting with self . _lock : self . _update ( self . _data ) if self . _done : break time . sleep ( 1 )
def xAxisIsMinor ( self ) : '''Returns True if the minor axis is parallel to the X axis , boolean .'''
return min ( self . radius . x , self . radius . y ) == self . radius . x
def parse ( self , fo ) : """Convert MDmodule output to motifs Parameters fo : file - like File object containing MDmodule output . Returns motifs : list List of Motif instances ."""
motifs = [ ] nucs = { "A" : 0 , "C" : 1 , "G" : 2 , "T" : 3 } p = re . compile ( r'(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)' ) pf = re . compile ( r'>.+\s+[bf]\d+\s+(\w+)' ) pwm = [ ] pfm = [ ] align = [ ] m_id = "" for line in fo . readlines ( ) : if line . startswith ( "Motif" ) : if m_id : motifs . append ( Motif ( ) ) motifs [ - 1 ] . id = m_id motifs [ - 1 ] . pwm = pwm motifs [ - 1 ] . pfm = pfm motifs [ - 1 ] . align = align pwm = [ ] pfm = [ ] align = [ ] m_id = line . split ( "\t" ) [ 0 ] else : m = p . search ( line ) if m : pwm . append ( [ float ( m . group ( x ) ) / 100 for x in [ 2 , 3 , 4 , 5 ] ] ) m = pf . search ( line ) if m : if not pfm : pfm = [ [ 0 for x in range ( 4 ) ] for x in range ( len ( m . group ( 1 ) ) ) ] for i in range ( len ( m . group ( 1 ) ) ) : pfm [ i ] [ nucs [ m . group ( 1 ) [ i ] ] ] += 1 align . append ( m . group ( 1 ) ) if pwm : motifs . append ( Motif ( ) ) motifs [ - 1 ] . id = m_id motifs [ - 1 ] . pwm = pwm motifs [ - 1 ] . pfm = pfm motifs [ - 1 ] . align = align return motifs
def regex ( self , regex = None ) : """Regex Sets or gets the regular expression used to validate the Node Arguments : regex { str } - - A standard regular expression string Raises : ValueError Returns : None | str"""
# If regex was not set , this is a getter if regex is None : return self . _regex # If the type is not a string if self . _type != 'string' : sys . stderr . write ( 'can not set __regex__ for %s' % self . _type ) return # If it ' s not a valid string or regex if not isinstance ( regex , ( basestring , _REGEX_TYPE ) ) : raise ValueError ( '__regex__' ) # Store the regex self . _regex = regex
def normalize_rgb_colors_to_hex ( css ) : """Convert ` rgb ( 51,102,153 ) ` to ` # 336699 ` ."""
regex = re . compile ( r"rgb\s*\(\s*([0-9,\s]+)\s*\)" ) match = regex . search ( css ) while match : colors = [ s . strip ( ) for s in match . group ( 1 ) . split ( "," ) ] hexcolor = '#%.2x%.2x%.2x' % tuple ( map ( int , colors ) ) css = css . replace ( match . group ( ) , hexcolor ) match = regex . search ( css ) return css
def public_ip_prefixes ( self ) : """Instance depends on the API version : * 2018-07-01 : : class : ` PublicIPPrefixesOperations < azure . mgmt . network . v2018_07_01 . operations . PublicIPPrefixesOperations > ` * 2018-08-01 : : class : ` PublicIPPrefixesOperations < azure . mgmt . network . v2018_08_01 . operations . PublicIPPrefixesOperations > `"""
api_version = self . _get_api_version ( 'public_ip_prefixes' ) if api_version == '2018-07-01' : from . v2018_07_01 . operations import PublicIPPrefixesOperations as OperationClass elif api_version == '2018-08-01' : from . v2018_08_01 . operations import PublicIPPrefixesOperations as OperationClass else : raise NotImplementedError ( "APIVersion {} is not available" . format ( api_version ) ) return OperationClass ( self . _client , self . config , Serializer ( self . _models_dict ( api_version ) ) , Deserializer ( self . _models_dict ( api_version ) ) )
def static ( base , mapping = None , far = ( 'js' , 'css' , 'gif' , 'jpg' , 'jpeg' , 'png' , 'ttf' , 'woff' ) ) : """Serve files from disk . This utility endpoint factory is meant primarily for use in development environments ; in production environments it is better ( more efficient , secure , etc . ) to serve your static content using a front end load balancer such as Nginx . The first argument , ` base ` , represents the base path to serve files from . Paths below the attachment point for the generated endpoint will combine this base path with the remaining path elements to determine the file to serve . The second argument is an optional dictionary mapping filename extensions to template engines , for cooperation with the TemplateExtension . ( See : https : / / github . com / marrow / template ) The result of attempting to serve a mapped path is a 2 - tuple of ` ( " { mapping } : { path } " , dict ( ) ) ` . For example , to render all ` . html ` files as Mako templates , you would attach something like the following : class Root : page = static ( ' / path / to / static / pages ' , dict ( html = ' mako ' ) ) By default the " usual culprits " are served with far - futures cache expiry headers . If you wish to change the extensions searched just assign a new ` far ` iterable . To disable , assign any falsy value ."""
base = abspath ( base ) @ staticmethod def static_handler ( context , * parts , ** kw ) : path = normpath ( pathjoin ( base , * parts ) ) if __debug__ : log . debug ( "Attempting to serve static file." , extra = dict ( request = id ( context ) , base = base , path = path ) ) if not path . startswith ( base ) : # Ensure we only serve files from the allowed path . raise HTTPForbidden ( "Cowardly refusing to violate base path policy." if __debug__ else None ) if not exists ( path ) : # Do the right thing if the file doesn ' t actually exist . raise HTTPNotFound ( ) if not isfile ( path ) : # Only serve normal files ; no UNIX domain sockets , FIFOs , etc . , etc . raise HTTPForbidden ( "Cowardly refusing to open a non-file." if __debug__ else None ) if far and path . rpartition ( '.' ) [ 2 ] in far : context . response . cache_expires = 60 * 60 * 24 * 365 if mapping : # Handle the mapping of filename extensions to 2 - tuples . ' Cause why not ? _ , _ , extension = basename ( path ) . partition ( '.' ) if extension in mapping : return mapping [ extension ] + ':' + path , dict ( ) return open ( path , 'rb' ) return static_handler
def load ( self , field_xso ) : """Load the field information from a data field . : param field _ xso : XSO describing the field . : type field _ xso : : class : ` ~ . Field ` This loads the current value , description , label and possibly options from the ` field _ xso ` , shadowing the information from the declaration of the field on the class . This method is must be overriden and is thus marked abstract . However , when called from a subclass , it loads the : attr : ` desc ` , : attr : ` label ` and : attr : ` required ` from the given ` field _ xso ` . Subclasses are supposed to implement a mechansim to load options and / or values from the ` field _ xso ` and then call this implementation through : func : ` super ` ."""
if field_xso . desc : self . _desc = field_xso . desc if field_xso . label : self . _label = field_xso . label self . _required = field_xso . required
def _handle_exception ( self , exception ) : """Called within an except block to allow converting exceptions to arbitrary responses . Anything returned ( except None ) will be used as response ."""
try : return super ( WebSocketRpcRequest , self ) . _handle_exception ( exception ) except Exception : if not isinstance ( exception , ( odoo . exceptions . Warning , odoo . http . SessionExpiredException , odoo . exceptions . except_orm ) ) : _logger . exception ( "Exception during JSON request handling." ) error = { 'code' : 200 , 'message' : "Odoo Server Error" , 'data' : odoo . http . serialize_exception ( exception ) } if isinstance ( exception , odoo . http . AuthenticationError ) : error [ 'code' ] = 100 error [ 'message' ] = "Odoo Session Invalid" if isinstance ( exception , odoo . http . SessionExpiredException ) : error [ 'code' ] = 100 error [ 'message' ] = "Odoo Session Expired" return self . _json_response ( error = error )
def rotate ( compound , theta , around ) : """Rotate a compound around an arbitrary vector . Parameters compound : mb . Compound The compound being rotated . theta : float The angle by which to rotate the compound , in radians . around : np . ndarray , shape = ( 3 , ) , dtype = float The vector about which to rotate the compound ."""
around = np . asarray ( around ) . reshape ( 3 ) if np . array_equal ( around , np . zeros ( 3 ) ) : raise ValueError ( 'Cannot rotate around a zero vector' ) atom_positions = compound . xyz_with_ports atom_positions = Rotation ( theta , around ) . apply_to ( atom_positions ) compound . xyz_with_ports = atom_positions
def cut ( x , bins , right = True , labels = None , retbins = False , precision = 3 , include_lowest = False , duplicates = 'raise' ) : """Bin values into discrete intervals . Use ` cut ` when you need to segment and sort data values into bins . This function is also useful for going from a continuous variable to a categorical variable . For example , ` cut ` could convert ages to groups of age ranges . Supports binning into an equal number of bins , or a pre - specified array of bins . Parameters x : array - like The input array to be binned . Must be 1 - dimensional . bins : int , sequence of scalars , or IntervalIndex The criteria to bin by . * int : Defines the number of equal - width bins in the range of ` x ` . The range of ` x ` is extended by . 1 % on each side to include the minimum and maximum values of ` x ` . * sequence of scalars : Defines the bin edges allowing for non - uniform width . No extension of the range of ` x ` is done . * IntervalIndex : Defines the exact bins to be used . Note that IntervalIndex for ` bins ` must be non - overlapping . right : bool , default True Indicates whether ` bins ` includes the rightmost edge or not . If ` ` right = = True ` ` ( the default ) , then the ` bins ` ` ` [ 1 , 2 , 3 , 4 ] ` ` indicate ( 1,2 ] , ( 2,3 ] , ( 3,4 ] . This argument is ignored when ` bins ` is an IntervalIndex . labels : array or bool , optional Specifies the labels for the returned bins . Must be the same length as the resulting bins . If False , returns only integer indicators of the bins . This affects the type of the output container ( see below ) . This argument is ignored when ` bins ` is an IntervalIndex . retbins : bool , default False Whether to return the bins or not . Useful when bins is provided as a scalar . precision : int , default 3 The precision at which to store and display the bins labels . include _ lowest : bool , default False Whether the first interval should be left - inclusive or not . duplicates : { default ' raise ' , ' drop ' } , optional If bin edges are not unique , raise ValueError or drop non - uniques . . . versionadded : : 0.23.0 Returns out : Categorical , Series , or ndarray An array - like object representing the respective bin for each value of ` x ` . The type depends on the value of ` labels ` . * True ( default ) : returns a Series for Series ` x ` or a Categorical for all other inputs . The values stored within are Interval dtype . * sequence of scalars : returns a Series for Series ` x ` or a Categorical for all other inputs . The values stored within are whatever the type in the sequence is . * False : returns an ndarray of integers . bins : numpy . ndarray or IntervalIndex . The computed or specified bins . Only returned when ` retbins = True ` . For scalar or sequence ` bins ` , this is an ndarray with the computed bins . If set ` duplicates = drop ` , ` bins ` will drop non - unique bin . For an IntervalIndex ` bins ` , this is equal to ` bins ` . See Also qcut : Discretize variable into equal - sized buckets based on rank or based on sample quantiles . Categorical : Array type for storing data that come from a fixed set of values . Series : One - dimensional array with axis labels ( including time series ) . IntervalIndex : Immutable Index implementing an ordered , sliceable set . Notes Any NA values will be NA in the result . Out of bounds values will be NA in the resulting Series or Categorical object . Examples Discretize into three equal - sized bins . > > > pd . cut ( np . array ( [ 1 , 7 , 5 , 4 , 6 , 3 ] ) , 3) . . . # doctest : + ELLIPSIS [ ( 0.994 , 3.0 ] , ( 5.0 , 7.0 ] , ( 3.0 , 5.0 ] , ( 3.0 , 5.0 ] , ( 5.0 , 7.0 ] , . . . Categories ( 3 , interval [ float64 ] ) : [ ( 0.994 , 3.0 ] < ( 3.0 , 5.0 ] . . . > > > pd . cut ( np . array ( [ 1 , 7 , 5 , 4 , 6 , 3 ] ) , 3 , retbins = True ) . . . # doctest : + ELLIPSIS ( [ ( 0.994 , 3.0 ] , ( 5.0 , 7.0 ] , ( 3.0 , 5.0 ] , ( 3.0 , 5.0 ] , ( 5.0 , 7.0 ] , . . . Categories ( 3 , interval [ float64 ] ) : [ ( 0.994 , 3.0 ] < ( 3.0 , 5.0 ] . . . array ( [ 0.994 , 3 . , 5 . , 7 . ] ) ) Discovers the same bins , but assign them specific labels . Notice that the returned Categorical ' s categories are ` labels ` and is ordered . > > > pd . cut ( np . array ( [ 1 , 7 , 5 , 4 , 6 , 3 ] ) , . . . 3 , labels = [ " bad " , " medium " , " good " ] ) [ bad , good , medium , medium , good , bad ] Categories ( 3 , object ) : [ bad < medium < good ] ` ` labels = False ` ` implies you just want the bins back . > > > pd . cut ( [ 0 , 1 , 1 , 2 ] , bins = 4 , labels = False ) array ( [ 0 , 1 , 1 , 3 ] ) Passing a Series as an input returns a Series with categorical dtype : > > > s = pd . Series ( np . array ( [ 2 , 4 , 6 , 8 , 10 ] ) , . . . index = [ ' a ' , ' b ' , ' c ' , ' d ' , ' e ' ] ) > > > pd . cut ( s , 3) . . . # doctest : + ELLIPSIS a ( 1.992 , 4.667] b ( 1.992 , 4.667] c ( 4.667 , 7.333] d ( 7.333 , 10.0] e ( 7.333 , 10.0] dtype : category Categories ( 3 , interval [ float64 ] ) : [ ( 1.992 , 4.667 ] < ( 4.667 , . . . Passing a Series as an input returns a Series with mapping value . It is used to map numerically to intervals based on bins . > > > s = pd . Series ( np . array ( [ 2 , 4 , 6 , 8 , 10 ] ) , . . . index = [ ' a ' , ' b ' , ' c ' , ' d ' , ' e ' ] ) > > > pd . cut ( s , [ 0 , 2 , 4 , 6 , 8 , 10 ] , labels = False , retbins = True , right = False ) . . . # doctest : + ELLIPSIS ( a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype : float64 , array ( [ 0 , 2 , 4 , 6 , 8 ] ) ) Use ` drop ` optional when bins is not unique > > > pd . cut ( s , [ 0 , 2 , 4 , 6 , 10 , 10 ] , labels = False , retbins = True , . . . right = False , duplicates = ' drop ' ) . . . # doctest : + ELLIPSIS ( a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype : float64 , array ( [ 0 , 2 , 4 , 6 , 8 ] ) ) Passing an IntervalIndex for ` bins ` results in those categories exactly . Notice that values not covered by the IntervalIndex are set to NaN . 0 is to the left of the first bin ( which is closed on the right ) , and 1.5 falls between two bins . > > > bins = pd . IntervalIndex . from _ tuples ( [ ( 0 , 1 ) , ( 2 , 3 ) , ( 4 , 5 ) ] ) > > > pd . cut ( [ 0 , 0.5 , 1.5 , 2.5 , 4.5 ] , bins ) [ NaN , ( 0 , 1 ] , NaN , ( 2 , 3 ] , ( 4 , 5 ] ] Categories ( 3 , interval [ int64 ] ) : [ ( 0 , 1 ] < ( 2 , 3 ] < ( 4 , 5 ] ]"""
# NOTE : this binning code is changed a bit from histogram for var ( x ) = = 0 # for handling the cut for datetime and timedelta objects x_is_series , series_index , name , x = _preprocess_for_cut ( x ) x , dtype = _coerce_to_type ( x ) if not np . iterable ( bins ) : if is_scalar ( bins ) and bins < 1 : raise ValueError ( "`bins` should be a positive integer." ) try : # for array - like sz = x . size except AttributeError : x = np . asarray ( x ) sz = x . size if sz == 0 : raise ValueError ( 'Cannot cut empty array' ) rng = ( nanops . nanmin ( x ) , nanops . nanmax ( x ) ) mn , mx = [ mi + 0.0 for mi in rng ] if np . isinf ( mn ) or np . isinf ( mx ) : # GH 24314 raise ValueError ( 'cannot specify integer `bins` when input data ' 'contains infinity' ) elif mn == mx : # adjust end points before binning mn -= .001 * abs ( mn ) if mn != 0 else .001 mx += .001 * abs ( mx ) if mx != 0 else .001 bins = np . linspace ( mn , mx , bins + 1 , endpoint = True ) else : # adjust end points after binning bins = np . linspace ( mn , mx , bins + 1 , endpoint = True ) adj = ( mx - mn ) * 0.001 # 0.1 % of the range if right : bins [ 0 ] -= adj else : bins [ - 1 ] += adj elif isinstance ( bins , IntervalIndex ) : if bins . is_overlapping : raise ValueError ( 'Overlapping IntervalIndex is not accepted.' ) else : if is_datetime64tz_dtype ( bins ) : bins = np . asarray ( bins , dtype = _NS_DTYPE ) else : bins = np . asarray ( bins ) bins = _convert_bin_to_numeric_type ( bins , dtype ) # GH 26045 : cast to float64 to avoid an overflow if ( np . diff ( bins . astype ( 'float64' ) ) < 0 ) . any ( ) : raise ValueError ( 'bins must increase monotonically.' ) fac , bins = _bins_to_cuts ( x , bins , right = right , labels = labels , precision = precision , include_lowest = include_lowest , dtype = dtype , duplicates = duplicates ) return _postprocess_for_cut ( fac , bins , retbins , x_is_series , series_index , name , dtype )
def withdraw ( self , amount , currency , payment_method_id ) : """Withdraw funds to a payment method . See AuthenticatedClient . get _ payment _ methods ( ) to receive information regarding payment methods . Args : amount ( Decimal ) : The amount to withdraw . currency ( str ) : Currency type ( eg . ' BTC ' ) payment _ method _ id ( str ) : ID of the payment method . Returns : dict : Withdraw details . Example : : " id " : " 593533d2 - ff31-46e0 - b22e - ca754147a96a " , " amount " : " 10.00 " , " currency " : " USD " , " payout _ at " : " 2016-08-20T00:31:09Z " """
params = { 'amount' : amount , 'currency' : currency , 'payment_method_id' : payment_method_id } return self . _send_message ( 'post' , '/withdrawals/payment-method' , data = json . dumps ( params ) )
def resendLast ( self ) : "Resend the last sent packet due to a timeout ."
log . warning ( "Resending packet %s on sessions %s" % ( self . context . last_pkt , self ) ) self . context . metrics . resent_bytes += len ( self . context . last_pkt . buffer ) self . context . metrics . add_dup ( self . context . last_pkt ) sendto_port = self . context . tidport if not sendto_port : # If the tidport wasn ' t set , then the remote end hasn ' t even # started talking to us yet . That ' s not good . Maybe it ' s not # there . sendto_port = self . context . port self . context . sock . sendto ( self . context . last_pkt . encode ( ) . buffer , ( self . context . host , sendto_port ) ) if self . context . packethook : self . context . packethook ( self . context . last_pkt )
def apt_install_from_url ( pkg_name , url , log = False ) : """installs a pkg from a url p pkg _ name : the name of the package to install p url : the full URL for the rpm package"""
if is_package_installed ( distribution = 'ubuntu' , pkg = pkg_name ) is False : if log : log_green ( "installing %s from %s" % ( pkg_name , url ) ) with settings ( hide ( 'warnings' , 'running' , 'stdout' ) , capture = True ) : sudo ( "wget -c -O %s.deb %s" % ( pkg_name , url ) ) sudo ( "dpkg -i %s.deb" % pkg_name ) # if we didn ' t abort above , we should return True return True
def create_routes ( routes : Tuple [ Route ] ) -> List [ Tuple [ str , Resource ] ] : """A thin wrapper around create _ routes that passes in flask specific values . : param routes : A tuple containing the route and another tuple with all http methods allowed for the route . : returns : A list of tuples containing the route and generated handler ."""
return doctor_create_routes ( routes , handle_http , default_base_handler_class = Resource )
def _request_sender ( self , packet : dict ) : """Sends a request to a server from a ServiceClient auto dispatch method called from self . send ( )"""
node_id = self . _get_node_id_for_packet ( packet ) client_protocol = self . _client_protocols . get ( node_id ) if node_id and client_protocol : if client_protocol . is_connected ( ) : packet [ 'to' ] = node_id client_protocol . send ( packet ) return True else : self . _logger . error ( 'Client protocol is not connected for packet %s' , packet ) raise ClientDisconnected ( ) else : # No node found to send request self . _logger . error ( 'Out of %s, Client Not found for packet %s' , self . _client_protocols . keys ( ) , packet ) raise ClientNotFoundError ( )