signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def serialize ( pagination , ** kwargs ) : """Return resumption token serializer ."""
if not pagination . has_next : return token_builder = URLSafeTimedSerializer ( current_app . config [ 'SECRET_KEY' ] , salt = kwargs [ 'verb' ] , ) schema = _schema_from_verb ( kwargs [ 'verb' ] , partial = False ) data = dict ( seed = random . random ( ) , page = pagination . next_num , kwargs = schema . dump ( kwargs ) . data ) scroll_id = getattr ( pagination , '_scroll_id' , None ) if scroll_id : data [ 'scroll_id' ] = scroll_id return token_builder . dumps ( data )
def _from_pb ( cls , pb , set_key = True , ent = None , key = None ) : """Internal helper to create an entity from an EntityProto protobuf ."""
if not isinstance ( pb , entity_pb . EntityProto ) : raise TypeError ( 'pb must be a EntityProto; received %r' % pb ) if ent is None : ent = cls ( ) # A key passed in overrides a key in the pb . if key is None and pb . key ( ) . path ( ) . element_size ( ) : key = Key ( reference = pb . key ( ) ) # If set _ key is not set , skip a trivial incomplete key . if key is not None and ( set_key or key . id ( ) or key . parent ( ) ) : ent . _key = key # NOTE ( darke ) : Keep a map from ( indexed , property name ) to the property . # This allows us to skip the ( relatively ) expensive call to # _ get _ property _ for for repeated fields . _property_map = { } projection = [ ] for indexed , plist in ( ( True , pb . property_list ( ) ) , ( False , pb . raw_property_list ( ) ) ) : for p in plist : if p . meaning ( ) == entity_pb . Property . INDEX_VALUE : projection . append ( p . name ( ) ) property_map_key = ( p . name ( ) , indexed ) if property_map_key not in _property_map : _property_map [ property_map_key ] = ent . _get_property_for ( p , indexed ) _property_map [ property_map_key ] . _deserialize ( ent , p ) ent . _set_projection ( projection ) return ent
def setAccessRules ( self , pid , public = False ) : """Set access rules for a resource . Current only allows for setting the public or private setting . : param pid : The HydroShare ID of the resource : param public : True if the resource should be made public ."""
url = "{url_base}/resource/accessRules/{pid}/" . format ( url_base = self . url_base , pid = pid ) params = { 'public' : public } r = self . _request ( 'PUT' , url , data = params ) if r . status_code != 200 : if r . status_code == 403 : raise HydroShareNotAuthorized ( ( 'PUT' , url ) ) elif r . status_code == 404 : raise HydroShareNotFound ( ( pid , ) ) else : raise HydroShareHTTPException ( ( url , 'PUT' , r . status_code , params ) ) resource = r . json ( ) assert ( resource [ 'resource_id' ] == pid ) return resource [ 'resource_id' ]
def _parse_raw ( self , raw ) : """Parse a raw dictionary to create a resource . : type raw : Dict [ str , Any ]"""
self . raw = raw if not raw : raise NotImplementedError ( "We cannot instantiate empty resources: %s" % raw ) dict2resource ( raw , self , self . _options , self . _session )
def to_string ( mnemonic ) : """Return the string representation of the given mnemonic ."""
strings = { # Arithmetic Instructions ReilMnemonic . ADD : "add" , ReilMnemonic . SUB : "sub" , ReilMnemonic . MUL : "mul" , ReilMnemonic . DIV : "div" , ReilMnemonic . MOD : "mod" , ReilMnemonic . BSH : "bsh" , # Bitwise Instructions ReilMnemonic . AND : "and" , ReilMnemonic . OR : "or" , ReilMnemonic . XOR : "xor" , # Data Transfer Instructions ReilMnemonic . LDM : "ldm" , ReilMnemonic . STM : "stm" , ReilMnemonic . STR : "str" , # Conditional Instructions ReilMnemonic . BISZ : "bisz" , ReilMnemonic . JCC : "jcc" , # Other Instructions ReilMnemonic . UNKN : "unkn" , ReilMnemonic . UNDEF : "undef" , ReilMnemonic . NOP : "nop" , # Extensions ReilMnemonic . SEXT : "sext" , ReilMnemonic . SDIV : "sdiv" , ReilMnemonic . SMOD : "smod" , ReilMnemonic . SMUL : "smul" , } return strings [ mnemonic ]
def _deserialize_datetime ( self , data ) : """Take any values coming in as a datetime and deserialize them"""
for key in data : if isinstance ( data [ key ] , dict ) : if data [ key ] . get ( 'type' ) == 'datetime' : data [ key ] = datetime . datetime . fromtimestamp ( data [ key ] [ 'value' ] ) return data
def _ipc_send ( self , sock , message_type , payload ) : '''Send and receive a message from the ipc . NOTE : this is not thread safe'''
sock . sendall ( self . _pack ( message_type , payload ) ) data , msg_type = self . _ipc_recv ( sock ) return data
def derivatives ( self , x , y , Rs , theta_Rs , e1 , e2 , center_x = 0 , center_y = 0 ) : """returns df / dx and df / dy of the function ( integral of NFW )"""
phi_G , q = param_util . ellipticity2phi_q ( e1 , e2 ) x_shift = x - center_x y_shift = y - center_y cos_phi = np . cos ( phi_G ) sin_phi = np . sin ( phi_G ) e = min ( abs ( 1. - q ) , 0.99 ) xt1 = ( cos_phi * x_shift + sin_phi * y_shift ) * np . sqrt ( 1 - e ) xt2 = ( - sin_phi * x_shift + cos_phi * y_shift ) * np . sqrt ( 1 + e ) R_ = np . sqrt ( xt1 ** 2 + xt2 ** 2 ) rho0_input = self . nfw . _alpha2rho0 ( theta_Rs = theta_Rs , Rs = Rs ) if Rs < 0.0000001 : Rs = 0.0000001 f_x_prim , f_y_prim = self . nfw . nfwAlpha ( R_ , Rs , rho0_input , xt1 , xt2 ) f_x_prim *= np . sqrt ( 1 - e ) f_y_prim *= np . sqrt ( 1 + e ) f_x = cos_phi * f_x_prim - sin_phi * f_y_prim f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x , f_y
def compile ( ** kwargs ) : r"""There are three modes of parameters : func : ` compile ( ) ` can take : ` ` string ` ` , ` ` filename ` ` , and ` ` dirname ` ` . The ` ` string ` ` parameter is the most basic way to compile Sass . It simply takes a string of Sass code , and then returns a compiled CSS string . : param string : Sass source code to compile . it ' s exclusive to ` ` filename ` ` and ` ` dirname ` ` parameters : type string : : class : ` str ` : param output _ style : an optional coding style of the compiled result . choose one of : ` ` ' nested ' ` ` ( default ) , ` ` ' expanded ' ` ` , ` ` ' compact ' ` ` , ` ` ' compressed ' ` ` : type output _ style : : class : ` str ` : param source _ comments : whether to add comments about source lines . : const : ` False ` by default : type source _ comments : : class : ` bool ` : param source _ map _ contents : embed include contents in map : type source _ map _ contents : : class : ` bool ` : param source _ map _ embed : embed sourceMappingUrl as data URI : type source _ map _ embed : : class : ` bool ` : param omit _ source _ map _ url : omit source map URL comment from output : type omit _ source _ map _ url : : class : ` bool ` : param source _ map _ root : base path , will be emitted in source map as is : type source _ map _ root : : class : ` str ` : param include _ paths : an optional list of paths to find ` ` @ import ` ` \ ed Sass / CSS source files : type include _ paths : : class : ` collections . abc . Sequence ` : param precision : optional precision for numbers . : const : ` 5 ` by default . : type precision : : class : ` int ` : param custom _ functions : optional mapping of custom functions . see also below ` custom functions < custom - functions _ > ` _ description : type custom _ functions : : class : ` set ` , : class : ` collections . abc . Sequence ` , : class : ` collections . abc . Mapping ` : param custom _ import _ extensions : ( ignored , for backward compatibility ) : param indented : optional declaration that the string is Sass , not SCSS formatted . : const : ` False ` by default : type indented : : class : ` bool ` : returns : the compiled CSS string : param importers : optional callback functions . see also below ` importer callbacks < importer - callbacks _ > ` _ description : type importers : : class : ` collections . abc . Callable ` : rtype : : class : ` str ` : raises sass . CompileError : when it fails for any reason ( for example the given Sass has broken syntax ) The ` ` filename ` ` is the most commonly used way . It takes a string of Sass filename , and then returns a compiled CSS string . : param filename : the filename of Sass source code to compile . it ' s exclusive to ` ` string ` ` and ` ` dirname ` ` parameters : type filename : : class : ` str ` : param output _ style : an optional coding style of the compiled result . choose one of : ` ` ' nested ' ` ` ( default ) , ` ` ' expanded ' ` ` , ` ` ' compact ' ` ` , ` ` ' compressed ' ` ` : type output _ style : : class : ` str ` : param source _ comments : whether to add comments about source lines . : const : ` False ` by default : type source _ comments : : class : ` bool ` : param source _ map _ filename : use source maps and indicate the source map output filename . : const : ` None ` means not using source maps . : const : ` None ` by default . : type source _ map _ filename : : class : ` str ` : param source _ map _ contents : embed include contents in map : type source _ map _ contents : : class : ` bool ` : param source _ map _ embed : embed sourceMappingUrl as data URI : type source _ map _ embed : : class : ` bool ` : param omit _ source _ map _ url : omit source map URL comment from output : type omit _ source _ map _ url : : class : ` bool ` : param source _ map _ root : base path , will be emitted in source map as is : type source _ map _ root : : class : ` str ` : param include _ paths : an optional list of paths to find ` ` @ import ` ` \ ed Sass / CSS source files : type include _ paths : : class : ` collections . abc . Sequence ` : param precision : optional precision for numbers . : const : ` 5 ` by default . : type precision : : class : ` int ` : param custom _ functions : optional mapping of custom functions . see also below ` custom functions < custom - functions _ > ` _ description : type custom _ functions : : class : ` set ` , : class : ` collections . abc . Sequence ` , : class : ` collections . abc . Mapping ` : param custom _ import _ extensions : ( ignored , for backward compatibility ) : param importers : optional callback functions . see also below ` importer callbacks < importer - callbacks _ > ` _ description : type importers : : class : ` collections . abc . Callable ` : returns : the compiled CSS string , or a pair of the compiled CSS string and the source map string if ` ` source _ map _ filename ` ` is set : rtype : : class : ` str ` , : class : ` tuple ` : raises sass . CompileError : when it fails for any reason ( for example the given Sass has broken syntax ) : raises exceptions . IOError : when the ` ` filename ` ` doesn ' t exist or cannot be read The ` ` dirname ` ` is useful for automation . It takes a pair of paths . The first of the ` ` dirname ` ` pair refers the source directory , contains several Sass source files to compiled . Sass source files can be nested in directories . The second of the pair refers the output directory that compiled CSS files would be saved . Directory tree structure of the source directory will be maintained in the output directory as well . If ` ` dirname ` ` parameter is used the function returns : const : ` None ` . : param dirname : a pair of ` ` ( source _ dir , output _ dir ) ` ` . it ' s exclusive to ` ` string ` ` and ` ` filename ` ` parameters : type dirname : : class : ` tuple ` : param output _ style : an optional coding style of the compiled result . choose one of : ` ` ' nested ' ` ` ( default ) , ` ` ' expanded ' ` ` , ` ` ' compact ' ` ` , ` ` ' compressed ' ` ` : type output _ style : : class : ` str ` : param source _ comments : whether to add comments about source lines . : const : ` False ` by default : type source _ comments : : class : ` bool ` : param source _ map _ contents : embed include contents in map : type source _ map _ contents : : class : ` bool ` : param source _ map _ embed : embed sourceMappingUrl as data URI : type source _ map _ embed : : class : ` bool ` : param omit _ source _ map _ url : omit source map URL comment from output : type omit _ source _ map _ url : : class : ` bool ` : param source _ map _ root : base path , will be emitted in source map as is : type source _ map _ root : : class : ` str ` : param include _ paths : an optional list of paths to find ` ` @ import ` ` \ ed Sass / CSS source files : type include _ paths : : class : ` collections . abc . Sequence ` : param precision : optional precision for numbers . : const : ` 5 ` by default . : type precision : : class : ` int ` : param custom _ functions : optional mapping of custom functions . see also below ` custom functions < custom - functions _ > ` _ description : type custom _ functions : : class : ` set ` , : class : ` collections . abc . Sequence ` , : class : ` collections . abc . Mapping ` : param custom _ import _ extensions : ( ignored , for backward compatibility ) : raises sass . CompileError : when it fails for any reason ( for example the given Sass has broken syntax ) . . _ custom - functions : The ` ` custom _ functions ` ` parameter can take three types of forms : : class : ` ~ set ` / : class : ` ~ collections . abc . Sequence ` of \ : class : ` SassFunction ` \ s It is the most general form . Although pretty verbose , it can take any kind of callables like type objects , unnamed functions , and user - defined callables . . . code - block : : python sass . compile ( custom _ functions = { sass . SassFunction ( ' func - name ' , ( ' $ a ' , ' $ b ' ) , some _ callable ) , : class : ` ~ collections . abc . Mapping ` of names to functions Less general , but easier - to - use form . Although it ' s not it can take any kind of callables , it can take any kind of * functions * defined using : keyword : ` def ` / : keyword : ` lambda ` syntax . It cannot take callables other than them since inspecting arguments is not always available for every kind of callables . . . code - block : : python sass . compile ( custom _ functions = { ' func - name ' : lambda a , b : . . . , : class : ` ~ set ` / : class : ` ~ collections . abc . Sequence ` of \ named functions Not general , but the easiest - to - use form for * named * functions . It can take only named functions , defined using : keyword : ` def ` . It cannot take lambdas sinc names are unavailable for them . . . code - block : : python def func _ name ( a , b ) : return . . . sass . compile ( custom _ functions = { func _ name } . . _ importer - callbacks : Newer versions of ` ` libsass ` ` allow developers to define callbacks to be called and given a chance to process ` ` @ import ` ` directives . You can define yours by passing in a list of callables via the ` ` importers ` ` parameter . The callables must be passed as 2 - tuples in the form : . . code - block : : python ( priority _ int , callback _ fn ) A priority of zero is acceptable ; priority determines the order callbacks are attempted . These callbacks can accept one or two string arguments . The first argument is the path that was passed to the ` ` @ import ` ` directive ; the second ( optional ) argument is the previous resolved path , where the ` ` @ import ` ` directive was found . The callbacks must either return ` ` None ` ` to indicate the path wasn ' t handled by that callback ( to continue with others or fall back on internal ` ` libsass ` ` filesystem behaviour ) or a list of one or more tuples , each in one of three forms : * A 1 - tuple representing an alternate path to handle internally ; or , * A 2 - tuple representing an alternate path and the content that path represents ; or , * A 3 - tuple representing the same as the 2 - tuple with the addition of a " sourcemap " . All tuple return values must be strings . As a not overly realistic example : . . code - block : : python def my _ importer ( path , prev ) : return [ ( path , ' # ' + path + ' { color : red ; } ' ) ] sass . compile ( importers = [ ( 0 , my _ importer ) ] Now , within the style source , attempting to ` ` @ import ' button ' ; ` ` will instead attach ` ` color : red ` ` as a property of an element with the imported name . . . versionadded : : 0.4.0 Added ` ` source _ comments ` ` and ` ` source _ map _ filename ` ` parameters . . . versionchanged : : 0.6.0 The ` ` source _ comments ` ` parameter becomes to take only : class : ` bool ` instead of : class : ` str ` . . . deprecated : : 0.6.0 Values like ` ` ' none ' ` ` , ` ` ' line _ numbers ' ` ` , and ` ` ' map ' ` ` for the ` ` source _ comments ` ` parameter are deprecated . . . versionadded : : 0.7.0 Added ` ` precision ` ` parameter . . . versionadded : : 0.7.0 Added ` ` custom _ functions ` ` parameter . . . versionadded : : 0.11.0 ` ` source _ map _ filename ` ` no longer implies ` ` source _ comments ` ` . . . versionadded : : 0.17.0 Added ` ` source _ map _ contents ` ` , ` ` source _ map _ embed ` ` , ` ` omit _ source _ map _ url ` ` , and ` ` source _ map _ root ` ` parameters . . . versionadded : : 0.18.0 The importer callbacks can now take a second argument , the previously - resolved path , so that importers can do relative path resolution ."""
modes = set ( ) for mode_name in MODES : if mode_name in kwargs : modes . add ( mode_name ) if not modes : raise TypeError ( 'choose one at least in ' + and_join ( MODES ) ) elif len ( modes ) > 1 : raise TypeError ( and_join ( modes ) + ' are exclusive each other; ' 'cannot be used at a time' , ) precision = kwargs . pop ( 'precision' , 5 ) output_style = kwargs . pop ( 'output_style' , 'nested' ) if not isinstance ( output_style , string_types ) : raise TypeError ( 'output_style must be a string, not ' + repr ( output_style ) , ) try : output_style = OUTPUT_STYLES [ output_style ] except KeyError : raise CompileError ( '{} is unsupported output_style; choose one of {}' '' . format ( output_style , and_join ( OUTPUT_STYLES ) ) , ) source_comments = kwargs . pop ( 'source_comments' , False ) if source_comments in SOURCE_COMMENTS : if source_comments == 'none' : deprecation_message = ( 'you can simply pass False to ' "source_comments instead of 'none'" ) source_comments = False elif source_comments in ( 'line_numbers' , 'default' ) : deprecation_message = ( 'you can simply pass True to ' "source_comments instead of " + repr ( source_comments ) ) source_comments = True else : deprecation_message = ( "you don't have to pass 'map' to " 'source_comments but just need to ' 'specify source_map_filename' ) source_comments = False warnings . warn ( "values like 'none', 'line_numbers', and 'map' for " 'the source_comments parameter are deprecated; ' + deprecation_message , FutureWarning , ) if not isinstance ( source_comments , bool ) : raise TypeError ( 'source_comments must be bool, not ' + repr ( source_comments ) , ) fs_encoding = sys . getfilesystemencoding ( ) or sys . getdefaultencoding ( ) def _get_file_arg ( key ) : ret = kwargs . pop ( key , None ) if ret is not None and not isinstance ( ret , string_types ) : raise TypeError ( '{} must be a string, not {!r}' . format ( key , ret ) ) elif isinstance ( ret , text_type ) : ret = ret . encode ( fs_encoding ) if ret and 'filename' not in modes : raise CompileError ( '{} is only available with filename= keyword argument since ' 'has to be aware of it' . format ( key ) , ) return ret source_map_filename = _get_file_arg ( 'source_map_filename' ) output_filename_hint = _get_file_arg ( 'output_filename_hint' ) source_map_contents = kwargs . pop ( 'source_map_contents' , False ) source_map_embed = kwargs . pop ( 'source_map_embed' , False ) omit_source_map_url = kwargs . pop ( 'omit_source_map_url' , False ) source_map_root = kwargs . pop ( 'source_map_root' , None ) if isinstance ( source_map_root , text_type ) : source_map_root = source_map_root . encode ( 'utf-8' ) # #208 : cwd is always included in include paths include_paths = ( os . getcwd ( ) , ) include_paths += tuple ( kwargs . pop ( 'include_paths' , ( ) ) or ( ) ) include_paths = os . pathsep . join ( include_paths ) if isinstance ( include_paths , text_type ) : include_paths = include_paths . encode ( fs_encoding ) custom_functions = kwargs . pop ( 'custom_functions' , ( ) ) if isinstance ( custom_functions , collections_abc . Mapping ) : custom_functions = [ SassFunction . from_lambda ( name , lambda_ ) for name , lambda_ in custom_functions . items ( ) ] elif isinstance ( custom_functions , ( collections_abc . Set , collections_abc . Sequence ) , ) : custom_functions = [ func if isinstance ( func , SassFunction ) else SassFunction . from_named_function ( func ) for func in custom_functions ] else : raise TypeError ( 'custom_functions must be one of:\n' '- a set/sequence of {0.__module__}.{0.__name__} objects,\n' '- a mapping of function name strings to lambda functions,\n' '- a set/sequence of named functions,\n' 'not {1!r}' . format ( SassFunction , custom_functions ) , ) if kwargs . pop ( 'custom_import_extensions' , None ) is not None : warnings . warn ( '`custom_import_extensions` has no effect and will be removed in ' 'a future version.' , FutureWarning , ) importers = _validate_importers ( kwargs . pop ( 'importers' , None ) ) if 'string' in modes : string = kwargs . pop ( 'string' ) if isinstance ( string , text_type ) : string = string . encode ( 'utf-8' ) indented = kwargs . pop ( 'indented' , False ) if not isinstance ( indented , bool ) : raise TypeError ( 'indented must be bool, not ' + repr ( source_comments ) , ) _check_no_remaining_kwargs ( compile , kwargs ) s , v = _sass . compile_string ( string , output_style , source_comments , include_paths , precision , custom_functions , indented , importers , source_map_contents , source_map_embed , omit_source_map_url , source_map_root , ) if s : return v . decode ( 'utf-8' ) elif 'filename' in modes : filename = kwargs . pop ( 'filename' ) if not isinstance ( filename , string_types ) : raise TypeError ( 'filename must be a string, not ' + repr ( filename ) ) elif not os . path . isfile ( filename ) : raise IOError ( '{!r} seems not a file' . format ( filename ) ) elif isinstance ( filename , text_type ) : filename = filename . encode ( fs_encoding ) _check_no_remaining_kwargs ( compile , kwargs ) s , v , source_map = _sass . compile_filename ( filename , output_style , source_comments , include_paths , precision , source_map_filename , custom_functions , importers , output_filename_hint , source_map_contents , source_map_embed , omit_source_map_url , source_map_root , ) if s : v = v . decode ( 'utf-8' ) if source_map_filename : source_map = source_map . decode ( 'utf-8' ) v = v , source_map return v elif 'dirname' in modes : try : search_path , output_path = kwargs . pop ( 'dirname' ) except ValueError : raise ValueError ( 'dirname must be a pair of (source_dir, ' 'output_dir)' , ) _check_no_remaining_kwargs ( compile , kwargs ) s , v = compile_dirname ( search_path , output_path , output_style , source_comments , include_paths , precision , custom_functions , importers , source_map_contents , source_map_embed , omit_source_map_url , source_map_root , ) if s : return else : raise TypeError ( 'something went wrong' ) assert not s raise CompileError ( v )
def score_meaning ( text ) : """Returns a score in [ 0,1 ] range if the text makes any sense in English ."""
# all _ characters = re . findall ( ' [ - ~ ] ' , text ) # match 32-126 in ASCII table all_characters = re . findall ( '[a-zA-Z ]' , text ) # match 32-126 in ASCII table if len ( all_characters ) == 0 : return 0 repetition_count = Counter ( all_characters ) score = ( len ( all_characters ) ) ** 2 / ( len ( repetition_count ) + len ( text ) / 26 ) return score
def r2_score ( pred : Tensor , targ : Tensor ) -> Rank0Tensor : "R2 score ( coefficient of determination ) between ` pred ` and ` targ ` ."
pred , targ = flatten_check ( pred , targ ) u = torch . sum ( ( targ - pred ) ** 2 ) d = torch . sum ( ( targ - targ . mean ( ) ) ** 2 ) return 1 - u / d
def check ( self , data ) : """For backwards compatibility , this method handles checkboxes and radio buttons in a single call . It will not uncheck any checkboxes unless explicitly specified by ` ` data ` ` , in contrast with the default behavior of : func : ` ~ Form . set _ checkbox ` ."""
for ( name , value ) in data . items ( ) : try : self . set_checkbox ( { name : value } , uncheck_other_boxes = False ) continue except InvalidFormMethod : pass try : self . set_radio ( { name : value } ) continue except InvalidFormMethod : pass raise LinkNotFoundError ( "No input checkbox/radio named " + name )
def __sort_analyses ( sentence ) : '''Sorts analysis of all the words in the sentence . This is required for consistency , because by default , analyses are listed in arbitrary order ;'''
for word in sentence : if ANALYSIS not in word : raise Exception ( '(!) Error: no analysis found from word: ' + str ( word ) ) else : word [ ANALYSIS ] = sorted ( word [ ANALYSIS ] , key = lambda x : "_" . join ( [ x [ ROOT ] , x [ POSTAG ] , x [ FORM ] , x [ CLITIC ] ] ) ) return sentence
def _sticker_templates_vocabularies ( self ) : """Returns the vocabulary to be used in AdmittedStickerTemplates . small _ default If the object has saved not AdmittedStickerTemplates . admitted stickers , this method will return an empty DisplayList . Otherwise it returns the stickers selected in admitted . : return : A DisplayList"""
admitted = self . getAdmittedStickers ( ) if not admitted : return DisplayList ( ) voc = DisplayList ( ) stickers = getStickerTemplates ( ) for sticker in stickers : if sticker . get ( 'id' ) in admitted : voc . add ( sticker . get ( 'id' ) , sticker . get ( 'title' ) ) return voc
def setup_jukebox_logger ( ) : """Setup the jukebox top - level logger with handlers The logger has the name ` ` jukebox ` ` and is the top - level logger for all other loggers of jukebox . It does not propagate to the root logger , because it also has a StreamHandler and that might cause double output . The logger default level is defined in the constants : data : ` jukeboxcore . constants . DEFAULT _ LOGGING _ LEVEL ` but can be overwritten by the environment variable \" JUKEBOX _ LOG _ LEVEL \" : returns : None : rtype : None : raises : None"""
log = logging . getLogger ( "jb" ) log . propagate = False handler = logging . StreamHandler ( sys . stdout ) fmt = "%(levelname)-8s:%(name)s: %(message)s" formatter = logging . Formatter ( fmt ) handler . setFormatter ( formatter ) log . addHandler ( handler ) level = DEFAULT_LOGGING_LEVEL log . setLevel ( level )
def stem ( u , v , dfs_data ) : """The stem of Bu ( v ) is the edge uv in Bu ( v ) ."""
# return dfs _ data [ ' graph ' ] . get _ first _ edge _ id _ by _ node _ ids ( u , v ) uv_edges = dfs_data [ 'graph' ] . get_edge_ids_by_node_ids ( u , v ) buv_edges = B ( u , v , dfs_data ) for edge_id in uv_edges : if edge_id in buv_edges : return edge_id return None
def removeItem ( self , index ) : """Alias for removeComponent"""
self . _stim . removeComponent ( index . row ( ) , index . column ( ) )
def _init_ubuntu_user ( self ) : """Initialize the ubuntu user . : return : bool : If the initialization was successful : raises : : class : ` paramiko . ssh _ exception . AuthenticationException ` if the authentication fails"""
# TODO : Test this on an image without the ubuntu user setup . auth_user = self . user ssh = None try : # Run w / o allocating a pty , so we fail if sudo prompts for a passwd ssh = self . _get_ssh_client ( self . host , "ubuntu" , self . private_key_path , ) stdout , stderr = self . _run_command ( ssh , "sudo -n true" , pty = False ) except paramiko . ssh_exception . AuthenticationException as e : raise e else : auth_user = "ubuntu" finally : if ssh : ssh . close ( ) # if the above fails , run the init script as the authenticated user # Infer the public key public_key = None public_key_path = "{}.pub" . format ( self . private_key_path ) if not os . path . exists ( public_key_path ) : raise FileNotFoundError ( "Public key '{}' doesn't exist." . format ( public_key_path ) ) with open ( public_key_path , "r" ) as f : public_key = f . readline ( ) script = INITIALIZE_UBUNTU_SCRIPT . format ( public_key ) try : ssh = self . _get_ssh_client ( self . host , auth_user , self . private_key_path , ) self . _run_command ( ssh , [ "sudo" , "/bin/bash -c " + shlex . quote ( script ) ] , pty = True ) except paramiko . ssh_exception . AuthenticationException as e : raise e finally : ssh . close ( ) return True
def _matches ( self , entities = None , extensions = None , domains = None , regex_search = False ) : """Checks whether the file matches all of the passed entities and extensions . Args : entities ( dict ) : A dictionary of entity names - > regex patterns . extensions ( str , list ) : One or more file extensions to allow . domains ( str , list ) : One or more domains the file must match . regex _ search ( bool ) : Whether to require exact match ( False ) or regex search ( True ) when comparing the query string to each entity . Returns : True if _ all _ entities and extensions match ; False otherwise ."""
if extensions is not None : if isinstance ( extensions , six . string_types ) : extensions = [ extensions ] extensions = '(' + '|' . join ( extensions ) + ')$' if re . search ( extensions , self . filename ) is None : return False if domains is not None : domains = listify ( domains ) if not set ( self . domains ) & set ( domains ) : return False if entities is not None : for name , val in entities . items ( ) : if ( name not in self . tags ) ^ ( val is None ) : return False if val is None : continue def make_patt ( x ) : patt = '%s' % x if isinstance ( x , ( int , float ) ) : # allow for leading zeros if a number was specified # regardless of regex _ search patt = '0*' + patt if not regex_search : patt = '^%s$' % patt return patt ent_patts = [ make_patt ( x ) for x in listify ( val ) ] patt = '|' . join ( ent_patts ) if re . search ( patt , str ( self . tags [ name ] . value ) ) is None : return False return True
def calibrate_threshold ( self , pairs_valid , y_valid , strategy = 'accuracy' , min_rate = None , beta = 1. ) : """Decision threshold calibration for pairwise binary classification Method that calibrates the decision threshold ( cutoff point ) of the metric learner . This threshold will then be used when calling the method ` predict ` . The methods for picking cutoff points make use of traditional binary classification evaluation statistics such as the true positive and true negative rates and F - scores . The threshold will be found to maximize the chosen score on the validation set ` ` ( pairs _ valid , y _ valid ) ` ` . See more in the : ref : ` User Guide < calibration > ` . Parameters strategy : str , optional ( default = ' accuracy ' ) The strategy to use for choosing the cutoff threshold . ' accuracy ' Selects a decision threshold that maximizes the accuracy . ' f _ beta ' Selects a decision threshold that maximizes the f _ beta score , with beta given by the parameter ` beta ` . ' max _ tpr ' Selects a decision threshold that yields the highest true positive rate with true negative rate at least equal to the value of the parameter ` min _ rate ` . ' max _ tnr ' Selects a decision threshold that yields the highest true negative rate with true positive rate at least equal to the value of the parameter ` min _ rate ` . beta : float in [ 0 , 1 ] , optional ( default = None ) Beta value to be used in case strategy = = ' f _ beta ' . min _ rate : float in [ 0 , 1 ] or None , ( default = None ) In case strategy is ' max _ tpr ' or ' max _ tnr ' this parameter must be set to specify the minimal value for the true negative rate or true positive rate respectively that needs to be achieved . pairs _ valid : array - like , shape = ( n _ pairs _ valid , 2 , n _ features ) The validation set of pairs to use to set the threshold . y _ valid : array - like , shape = ( n _ pairs _ valid , ) The labels of the pairs of the validation set to use to set the threshold . They must be + 1 for positive pairs and - 1 for negative pairs . References . . [ 1 ] Receiver - operating characteristic ( ROC ) plots : a fundamental evaluation tool in clinical medicine , MH Zweig , G Campbell - Clinical chemistry , 1993 . . [ 2 ] most of the code of this function is from scikit - learn ' s PR # 10117 See Also sklearn . calibration : scikit - learn ' s module for calibrating classifiers"""
self . _validate_calibration_params ( strategy , min_rate , beta ) pairs_valid , y_valid = self . _prepare_inputs ( pairs_valid , y_valid , type_of_inputs = 'tuples' ) n_samples = pairs_valid . shape [ 0 ] if strategy == 'accuracy' : scores = self . decision_function ( pairs_valid ) scores_sorted_idces = np . argsort ( scores ) [ : : - 1 ] scores_sorted = scores [ scores_sorted_idces ] # true labels ordered by decision _ function value : ( higher first ) y_ordered = y_valid [ scores_sorted_idces ] # we need to add a threshold that will reject all points scores_sorted = np . concatenate ( [ [ scores_sorted [ 0 ] + 1 ] , scores_sorted ] ) # finds the threshold that maximizes the accuracy : cum_tp = stable_cumsum ( y_ordered == 1 ) # cumulative number of true # positives # we need to add the point where all samples are rejected : cum_tp = np . concatenate ( [ [ 0. ] , cum_tp ] ) cum_tn_inverted = stable_cumsum ( y_ordered [ : : - 1 ] == - 1 ) cum_tn = np . concatenate ( [ [ 0. ] , cum_tn_inverted ] ) [ : : - 1 ] cum_accuracy = ( cum_tp + cum_tn ) / n_samples imax = np . argmax ( cum_accuracy ) # we set the threshold to the lowest accepted score # note : we are working with negative distances but we want the threshold # to be with respect to the actual distances so we take minus sign self . threshold_ = - scores_sorted [ imax ] # note : if the best is to reject all points it ' s already one of the # thresholds ( scores _ sorted [ 0 ] ) return self if strategy == 'f_beta' : precision , recall , thresholds = precision_recall_curve ( y_valid , self . decision_function ( pairs_valid ) , pos_label = 1 ) # here the thresholds are decreasing # We ignore the warnings here , in the same taste as # https : / / github . com / scikit - learn / scikit - learn / blob / 62d205980446a1abc1065 # f4332fd74eee57fcf73 / sklearn / metrics / classification . py # L1284 with np . errstate ( divide = 'ignore' , invalid = 'ignore' ) : f_beta = ( ( 1 + beta ** 2 ) * ( precision * recall ) / ( beta ** 2 * precision + recall ) ) # We need to set nans to zero otherwise they will be considered higher # than the others ( also discussed in https : / / github . com / scikit - learn / # scikit - learn / pull / 10117 / files # r262115773) f_beta [ np . isnan ( f_beta ) ] = 0. imax = np . argmax ( f_beta ) # we set the threshold to the lowest accepted score # note : we are working with negative distances but we want the threshold # to be with respect to the actual distances so we take minus sign self . threshold_ = - thresholds [ imax ] # Note : we don ' t need to deal with rejecting all points ( i . e . threshold = # max _ scores + 1 ) , since this can never happen to be optimal # ( see a more detailed discussion in test _ calibrate _ threshold _ extreme ) return self fpr , tpr , thresholds = roc_curve ( y_valid , self . decision_function ( pairs_valid ) , pos_label = 1 ) # here the thresholds are decreasing fpr , tpr , thresholds = fpr , tpr , thresholds if strategy in [ 'max_tpr' , 'max_tnr' ] : if strategy == 'max_tpr' : indices = np . where ( 1 - fpr >= min_rate ) [ 0 ] imax = np . argmax ( tpr [ indices ] ) if strategy == 'max_tnr' : indices = np . where ( tpr >= min_rate ) [ 0 ] imax = np . argmax ( 1 - fpr [ indices ] ) imax_valid = indices [ imax ] # note : we are working with negative distances but we want the threshold # to be with respect to the actual distances so we take minus sign if indices [ imax ] == len ( thresholds ) : # we want to accept everything self . threshold_ = - ( thresholds [ imax_valid ] - 1 ) else : # thanks to roc _ curve , the first point will always be max _ scores # + 1 , see : https : / / github . com / scikit - learn / scikit - learn / pull / 13523 self . threshold_ = - thresholds [ imax_valid ] return self
def exciter ( self , Xexc , Pexc , Vexc ) : """Exciter model . Based on Exciter . m from MatDyn by Stijn Cole , developed at Katholieke Universiteit Leuven . See U { http : / / www . esat . kuleuven . be / electa / teaching / matdyn / } for more information ."""
exciters = self . exciters F = zeros ( Xexc . shape ) typ1 = [ e . generator . _i for e in exciters if e . model == CONST_EXCITATION ] typ2 = [ e . generator . _i for e in exciters if e . model == IEEE_DC1A ] # Exciter type 1 : constant excitation F [ typ1 , : ] = 0.0 # Exciter type 2 : IEEE DC1A Efd = Xexc [ typ2 , 0 ] Uf = Xexc [ typ2 , 1 ] Ur = Xexc [ typ2 , 2 ] Ka = Pexc [ typ2 , 0 ] Ta = Pexc [ typ2 , 1 ] Ke = Pexc [ typ2 , 2 ] Te = Pexc [ typ2 , 3 ] Kf = Pexc [ typ2 , 4 ] Tf = Pexc [ typ2 , 5 ] Aex = Pexc [ typ2 , 6 ] Bex = Pexc [ typ2 , 7 ] Ur_min = Pexc [ typ2 , 8 ] Ur_max = Pexc [ typ2 , 9 ] Uref = Pexc [ typ2 , 10 ] Uref2 = Pexc [ typ2 , 11 ] U = Vexc [ typ2 , 1 ] Ux = Aex * exp ( Bex * Efd ) dUr = 1 / Ta * ( Ka * ( Uref - U + Uref2 - Uf ) - Ur ) dUf = 1 / Tf * ( Kf / Te * ( Ur - Ux - Ke * Efd ) - Uf ) if sum ( flatnonzero ( Ur > Ur_max ) ) >= 1 : Ur2 = Ur_max elif sum ( flatnonzero ( Ur < Ur_max ) ) >= 1 : Ur2 = Ur_min else : Ur2 = Ur dEfd = 1 / Te * ( Ur2 - Ux - Ke * Efd ) F [ typ2 , : ] = c_ [ dEfd , dUf , dUr ] # Exciter type 3: # Exciter type 4: return F
def has_u_umlaut ( word : str ) -> bool : """Does the word have an u - umlaut ? > > > has _ u _ umlaut ( " höfn " ) True > > > has _ u _ umlaut ( " börnum " ) True > > > has _ u _ umlaut ( " barn " ) False : param word : Old Norse word : return : has an u - umlaut occurred ?"""
word_syl = s . syllabify_ssp ( word ) s_word_syl = [ Syllable ( syl , VOWELS , CONSONANTS ) for syl in word_syl ] if len ( s_word_syl ) == 1 and s_word_syl [ - 1 ] . nucleus [ 0 ] in [ "ö" , "ǫ" ] : return True elif len ( s_word_syl ) >= 2 and s_word_syl [ - 1 ] . nucleus [ 0 ] == "u" : return s_word_syl [ - 2 ] . nucleus [ 0 ] in [ "ö" , "ǫ" ] return False
def process ( self , metric ) : """Process a metric by sending it to TSDB"""
entry = { 'timestamp' : metric . timestamp , 'value' : metric . value , "tags" : { } } entry [ "tags" ] [ "hostname" ] = metric . host if self . cleanMetrics : metric = MetricWrapper ( metric , self . log ) if self . skipAggregates and metric . isAggregate ( ) : return for tagKey in metric . getTags ( ) : entry [ "tags" ] [ tagKey ] = metric . getTags ( ) [ tagKey ] entry [ 'metric' ] = ( self . prefix + metric . getCollectorPath ( ) + '.' + metric . getMetricPath ( ) ) for [ key , value ] in self . tags : entry [ "tags" ] [ key ] = value self . entrys . append ( entry ) # send data if list is long enough if ( len ( self . entrys ) >= self . batch ) : # Compress data if self . compression >= 1 : data = StringIO . StringIO ( ) with contextlib . closing ( gzip . GzipFile ( fileobj = data , compresslevel = self . compression , mode = "w" ) ) as f : f . write ( json . dumps ( self . entrys ) ) self . _send ( data . getvalue ( ) ) else : # no compression data = json . dumps ( self . entrys ) self . _send ( data )
def _get_tgt_length ( self , var ) : """Get the total length of the whole reference sequence"""
if var . type == "g" or var . type == "m" : return float ( "inf" ) else : # Get genomic sequence access number for this transcript identity_info = self . hdp . get_tx_identity_info ( var . ac ) if not identity_info : raise HGVSDataNotAvailableError ( "No identity info available for {ac}" . format ( ac = var . ac ) ) tgt_len = sum ( identity_info [ "lengths" ] ) return tgt_len
def retrieveVals ( self ) : """Retrieve values for graphs ."""
for iface in self . _ifaceList : stats = self . _ifaceStats . get ( iface ) graph_name = 'netiface_traffic_%s' % iface if self . hasGraph ( graph_name ) : self . setGraphVal ( graph_name , 'rx' , stats . get ( 'rxbytes' ) * 8 ) self . setGraphVal ( graph_name , 'tx' , stats . get ( 'txbytes' ) * 8 ) graph_name = 'netiface_errors_%s' % iface if self . hasGraph ( graph_name ) : for field in ( 'rxerrs' , 'txerrs' , 'rxframe' , 'txcarrier' , 'rxdrop' , 'txdrop' , 'rxfifo' , 'txfifo' ) : self . setGraphVal ( graph_name , field , stats . get ( field ) )
def reshape ( self , * shape ) : """Reshape the Series object Cannot change the last dimension . Parameters shape : one or more ints New shape"""
if prod ( self . shape ) != prod ( shape ) : raise ValueError ( "Reshaping must leave the number of elements unchanged" ) if self . shape [ - 1 ] != shape [ - 1 ] : raise ValueError ( "Reshaping cannot change the size of the constituent series (last dimension)" ) if self . labels is not None : newlabels = self . labels . reshape ( * shape [ : - 1 ] ) else : newlabels = None return self . _constructor ( self . values . reshape ( shape ) , labels = newlabels ) . __finalize__ ( self , noprop = ( 'labels' , ) )
def bestscan ( self , seq ) : """m . bestscan ( seq ) - - Return the score of the best match to the motif in the supplied sequence"""
matches , endpoints , scores = self . scan ( seq , - 100 ) if not scores : return - 100 scores . sort ( ) best = scores [ - 1 ] return best
def npy_to_numpy ( npy_array ) : # type : ( object ) - > np . array """Convert an NPY array into numpy . Args : npy _ array ( npy array ) : to be converted to numpy array Returns : ( np . array ) : converted numpy array ."""
stream = BytesIO ( npy_array ) return np . load ( stream , allow_pickle = True )
def auto_inline_code ( self , node ) : """Try to automatically generate nodes for inline literals . Parameters node : nodes . literal Original codeblock node Returns tocnode : docutils node The converted toc tree node , None if conversion is not possible ."""
assert isinstance ( node , nodes . literal ) if len ( node . children ) != 1 : return None content = node . children [ 0 ] if not isinstance ( content , nodes . Text ) : return None content = content . astext ( ) . strip ( ) if content . startswith ( '$' ) and content . endswith ( '$' ) : if not self . config [ 'enable_inline_math' ] : return None content = content [ 1 : - 1 ] self . state_machine . reset ( self . document , node . parent , self . current_level ) return self . state_machine . run_role ( 'math' , content = content ) else : return None
def inverse_kinematics ( self , target_position_right , target_orientation_right , target_position_left , target_orientation_left , rest_poses , ) : """Helper function to do inverse kinematics for a given target position and orientation in the PyBullet world frame . Args : target _ position _ { right , left } : A tuple , list , or numpy array of size 3 for position . target _ orientation _ { right , left } : A tuple , list , or numpy array of size 4 for a orientation quaternion . rest _ poses : A list of size @ num _ joints to favor ik solutions close by . Returns : A list of size @ num _ joints corresponding to the joint angle solution ."""
ndof = 48 ik_solution = list ( p . calculateInverseKinematics ( self . ik_robot , self . effector_right , target_position_right , targetOrientation = target_orientation_right , restPoses = rest_poses [ : 7 ] , lowerLimits = self . lower , upperLimits = self . upper , jointRanges = self . ranges , jointDamping = [ 0.7 ] * ndof , ) ) ik_solution2 = list ( p . calculateInverseKinematics ( self . ik_robot , self . effector_left , target_position_left , targetOrientation = target_orientation_left , restPoses = rest_poses [ 7 : ] , lowerLimits = self . lower , upperLimits = self . upper , jointRanges = self . ranges , jointDamping = [ 0.7 ] * ndof , ) ) for i in range ( 8 , 15 ) : ik_solution [ i ] = ik_solution2 [ i ] return ik_solution [ 1 : ]
def check_name_not_on_dapi ( cls , dap ) : '''Check that the package _ name is not registered on Dapi . Return list of problems .'''
problems = list ( ) if dap . meta [ 'package_name' ] : from . import dapicli d = dapicli . metadap ( dap . meta [ 'package_name' ] ) if d : problems . append ( DapProblem ( 'This dap name is already registered on Dapi' , level = logging . WARNING ) ) return problems
def detect_tag ( filename ) : """Return type and position of ID3v2 tag in filename . Returns ( tag _ class , offset , length ) , where tag _ class is either Tag22 , Tag23 , or Tag24 , and ( offset , length ) is the position of the tag in the file ."""
with fileutil . opened ( filename , "rb" ) as file : file . seek ( 0 ) header = file . read ( 10 ) file . seek ( 0 ) if len ( header ) < 10 : raise NoTagError ( "File too short" ) if header [ 0 : 3 ] != b"ID3" : raise NoTagError ( "ID3v2 tag not found" ) if header [ 3 ] not in _tag_versions or header [ 4 ] != 0 : raise TagError ( "Unknown ID3 version: 2.{0}.{1}" . format ( * header [ 3 : 5 ] ) ) cls = _tag_versions [ header [ 3 ] ] offset = 0 length = Syncsafe . decode ( header [ 6 : 10 ] ) + 10 if header [ 3 ] == 4 and header [ 5 ] & _TAG24_FOOTER : length += 10 return ( cls , offset , length )
def call ( self , additional_fields , restriction , shape , depth , max_items , offset ) : """Find subfolders of a folder . : param additional _ fields : the extra fields that should be returned with the folder , as FieldPath objects : param shape : The set of attributes to return : param depth : How deep in the folder structure to search for folders : param max _ items : The maximum number of items to return : param offset : the offset relative to the first item in the item collection . Usually 0. : return : XML elements for the matching folders"""
from . folders import Folder roots = { f . root for f in self . folders } if len ( roots ) != 1 : raise ValueError ( 'FindFolder must be called with folders in the same root hierarchy (%r)' % roots ) root = roots . pop ( ) for elem in self . _paged_call ( payload_func = self . get_payload , max_items = max_items , ** dict ( additional_fields = additional_fields , restriction = restriction , shape = shape , depth = depth , page_size = self . chunk_size , offset = offset , ) ) : if isinstance ( elem , Exception ) : yield elem continue yield Folder . from_xml ( elem = elem , root = root )
def glob ( patterns , * , flags = 0 ) : """Glob ."""
return list ( iglob ( util . to_tuple ( patterns ) , flags = flags ) )
def close ( self ) : """turn off stream and close socket"""
if self . streamSock : self . watch ( enable = False ) self . streamSock . close ( ) self . streamSock = None
def run ( scenario , magicc_version = 6 , ** kwargs ) : """Run a MAGICC scenario and return output data and ( optionally ) config parameters . As a reminder , putting ` ` out _ parameters = 1 ` ` will cause MAGICC to write out its parameters into ` ` out / PARAMETERS . OUT ` ` and they will then be read into ` ` output . metadata [ " parameters " ] ` ` where ` ` output ` ` is the returned object . Parameters scenario : : obj : ` pymagicc . io . MAGICCData ` Scenario to run magicc _ version : int MAGICC version to use for the run * * kwargs Parameters overwriting default parameters Raises ValueError If the magicc _ version is not available Returns output : : obj : ` pymagicc . io . MAGICCData ` Output of the run with the data in the ` ` df ` ` attribute and parameters and other metadata in the ` ` metadata attribute ` `"""
if magicc_version == 6 : magicc_cls = MAGICC6 elif magicc_version == 7 : magicc_cls = MAGICC7 else : raise ValueError ( "MAGICC version {} is not available" . format ( magicc_version ) ) with magicc_cls ( ) as magicc : results = magicc . run ( scenario = scenario , ** kwargs ) return results
def insert_rows ( fc , features , fields , includeOIDField = False , oidField = None ) : """inserts rows based on a list features object"""
if arcpyFound == False : raise Exception ( "ArcPy is required to use this function" ) icur = None if includeOIDField : arcpy . AddField_management ( fc , "FSL_OID" , "LONG" ) fields . append ( "FSL_OID" ) if len ( features ) > 0 : fields . append ( "SHAPE@" ) workspace = os . path . dirname ( fc ) with arcpy . da . Editor ( workspace ) as edit : date_fields = getDateFields ( fc ) icur = arcpy . da . InsertCursor ( fc , fields ) for feat in features : row = [ "" ] * len ( fields ) drow = feat . asRow [ 0 ] dfields = feat . fields for field in fields : if field in dfields or ( includeOIDField and field == "FSL_OID" ) : if field in date_fields : row [ fields . index ( field ) ] = toDateTime ( drow [ dfields . index ( field ) ] ) elif field == "FSL_OID" : row [ fields . index ( "FSL_OID" ) ] = drow [ dfields . index ( oidField ) ] else : row [ fields . index ( field ) ] = drow [ dfields . index ( field ) ] del field row [ fields . index ( "SHAPE@" ) ] = feat . geometry icur . insertRow ( row ) del row del drow del dfields del feat del features icur = None del icur del fields return fc else : return fc
def arrays_to_hdf5 ( filename = "cache.hdf5" ) : """Returns registry for serialising arrays to a HDF5 reference ."""
return Registry ( types = { numpy . ndarray : SerNumpyArrayToHDF5 ( filename , "cache.lock" ) } , hooks = { '<ufunc>' : SerUFunc ( ) } , hook_fn = _numpy_hook )
def confirmation_view ( template , doc = "Display a confirmation view." ) : """Confirmation view generator for the " comment was posted / flagged / deleted / approved " views ."""
def confirmed ( request ) : comment = None if 'c' in request . GET : try : comment = comments . get_model ( ) . objects . get ( pk = request . GET [ 'c' ] ) except ( ObjectDoesNotExist , ValueError ) : pass return render ( request , template , { 'comment' : comment } ) confirmed . __doc__ = textwrap . dedent ( """\ %s Templates: :template:`%s`` Context: comment The posted comment """ % ( doc , template ) ) return confirmed
def _do_if_else_condition ( self , condition ) : """Common logic for evaluating the conditions on # if , # ifdef and # ifndef lines ."""
self . save ( ) d = self . dispatch_table if condition : self . start_handling_includes ( ) d [ 'elif' ] = self . stop_handling_includes d [ 'else' ] = self . stop_handling_includes else : self . stop_handling_includes ( ) d [ 'elif' ] = self . do_elif d [ 'else' ] = self . start_handling_includes
def PushItem ( self , item , block = True ) : """Push an item on to the queue . If no ZeroMQ socket has been created , one will be created the first time this method is called . Args : item ( object ) : item to push on the queue . block ( Optional [ bool ] ) : whether the push should be performed in blocking or non - blocking mode . Raises : KeyboardInterrupt : if the process is sent a KeyboardInterrupt while pushing an item . QueueFull : if it was not possible to push the item to the queue within the timeout . RuntimeError : if terminate event is missing . zmq . error . ZMQError : if a ZeroMQ specific error occurs ."""
if not self . _zmq_socket : self . _CreateZMQSocket ( ) if not self . _terminate_event : raise RuntimeError ( 'Missing terminate event.' ) logger . debug ( 'Push on {0:s} queue, port {1:d}' . format ( self . name , self . port ) ) last_retry_timestamp = time . time ( ) + self . timeout_seconds while not self . _terminate_event . is_set ( ) : try : send_successful = self . _SendItem ( self . _zmq_socket , item , block ) if send_successful : break if time . time ( ) > last_retry_timestamp : logger . error ( '{0:s} unable to push item, raising.' . format ( self . name ) ) raise errors . QueueFull except KeyboardInterrupt : self . Close ( abort = True ) raise
def _strip_placeholder_braces ( p_matchobj ) : """Returns string with conditional braces around placeholder stripped and percent sign glued into placeholder character . Returned string is composed from ' start ' , ' before ' , ' placeholder ' , ' after ' , ' whitespace ' , and ' end ' match - groups of p _ matchobj . Conditional braces are stripped from ' before ' and ' after ' groups . ' whitespace ' , ' start ' , and ' end ' groups are preserved without any change . Using this function as an ' repl ' argument in re . sub it is possible to turn : into :"""
before = p_matchobj . group ( 'before' ) or '' placeholder = p_matchobj . group ( 'placeholder' ) after = p_matchobj . group ( 'after' ) or '' whitespace = p_matchobj . group ( 'whitespace' ) or '' return before + '%' + placeholder + after + whitespace
def disambiguate ( self , symclasses ) : """Use the connection to the atoms around a given vertex as a multiplication function to disambiguate a vertex"""
offsets = self . offsets result = symclasses [ : ] for index in self . range : try : val = 1 for offset , bondtype in offsets [ index ] : val *= symclasses [ offset ] * bondtype except OverflowError : # Hmm , how often does this occur ? val = 1L for offset , bondtype in offsets [ index ] : val *= symclasses [ offset ] * bondtype result [ index ] = val return result
def visit_Name ( self , node ) : """Return dependencies for given variable . It have to be register first ."""
if node . id in self . naming : return self . naming [ node . id ] elif node . id in self . global_declarations : return [ frozenset ( [ self . global_declarations [ node . id ] ] ) ] elif isinstance ( node . ctx , ast . Param ) : deps = [ frozenset ( ) ] self . naming [ node . id ] = deps return deps else : raise PythranInternalError ( "Variable '{}' use before assignment" "" . format ( node . id ) )
def try_again_later ( self , seconds ) : """Put this cluster in retry - wait ( or consider it dead )"""
if not self . failed : self . fails += 1 self . retry_at = ( dt . datetime . now ( ) + timedelta ( seconds = seconds ) )
def main ( ) : """Main method ."""
run_config = _parse_args ( sys . argv [ 1 : ] ) gitlab_config = GitLabConfig ( run_config . url , run_config . token ) manager = ProjectVariablesManager ( gitlab_config , run_config . project ) output = json . dumps ( manager . get ( ) , sort_keys = True , indent = 4 , separators = ( "," , ": " ) ) print ( output )
def records ( account_id ) : """Fetch locks data"""
s = boto3 . Session ( ) table = s . resource ( 'dynamodb' ) . Table ( 'Sphere11.Dev.ResourceLocks' ) results = table . scan ( ) for r in results [ 'Items' ] : if 'LockDate' in r : r [ 'LockDate' ] = datetime . fromtimestamp ( r [ 'LockDate' ] ) if 'RevisionDate' in r : r [ 'RevisionDate' ] = datetime . fromtimestamp ( r [ 'RevisionDate' ] ) print ( tabulate . tabulate ( results [ 'Items' ] , headers = "keys" , tablefmt = 'fancy_grid' ) )
def modis_filename2modisdate ( modis_fname ) : """# MODIS _ FILENAME2DATE : Convert MODIS file name to MODIS date # @ author : Renaud DUSSURGET ( LER PAC / IFREMER ) # @ history : Created by RD on 29/10/2012"""
if not isinstance ( modis_fname , list ) : modis_fname = [ modis_fname ] return [ os . path . splitext ( os . path . basename ( m ) ) [ 0 ] [ 1 : 12 ] for m in modis_fname ]
def _phir ( self , rho , T , x ) : """Residual contribution to the free Helmholtz energy Parameters rho : float Density , [ kg / m3] T : float Temperature , [ K ] x : float Mole fraction of ammonia in mixture , [ mol / mol ] Returns prop : dict dictionary with residual adimensional helmholtz energy and derivatives : * tau : the adimensional temperature variable , [ - ] * delta : the adimensional density variable , [ - ] * fir , [ - ] * firt : [ ∂ fir / ∂ τ ] δ , x [ - ] * fird : [ ∂ fir / ∂ δ ] τ , x [ - ] * firtt : [ ∂ 2fir / ∂ τ2 ] δ , x [ - ] * firdt : [ ∂ 2fir / ∂ τ ∂ δ ] x [ - ] * firdd : [ ∂ 2fir / ∂ δ2 ] τ , x [ - ] * firx : [ ∂ fir / ∂ x ] τ , δ [ - ] * F : Function for fugacity calculation , [ - ] References IAPWS , Guideline on the IAPWS Formulation 2001 for the Thermodynamic Properties of Ammonia - Water Mixtures , http : / / www . iapws . org / relguide / nh3h2o . pdf , Eq 3"""
# Temperature reducing value , Eq 4 Tc12 = 0.9648407 / 2 * ( IAPWS95 . Tc + NH3 . Tc ) Tn = ( 1 - x ) ** 2 * IAPWS95 . Tc + x ** 2 * NH3 . Tc + 2 * x * ( 1 - x ** 1.125455 ) * Tc12 dTnx = - 2 * IAPWS95 . Tc * ( 1 - x ) + 2 * x * NH3 . Tc + 2 * Tc12 * ( 1 - x ** 1.125455 ) - 2 * Tc12 * 1.12455 * x ** 1.12455 # Density reducing value , Eq 5 b = 0.8978069 rhoc12 = 1 / ( 1.2395117 / 2 * ( 1 / IAPWS95 . rhoc + 1 / NH3 . rhoc ) ) rhon = 1 / ( ( 1 - x ) ** 2 / IAPWS95 . rhoc + x ** 2 / NH3 . rhoc + 2 * x * ( 1 - x ** b ) / rhoc12 ) drhonx = - ( 2 * b * x ** b / rhoc12 + 2 * ( 1 - x ** b ) / rhoc12 + 2 * x / NH3 . rhoc - 2 * ( 1 - x ) / IAPWS95 . rhoc ) / ( 2 * x * ( 1 - x ** b ) / rhoc12 + x ** 2 / NH3 . rhoc + ( 1 - x ) ** 2 / IAPWS95 . rhoc ) ** 2 tau = Tn / T delta = rho / rhon water = IAPWS95 ( ) phi1 = water . _phir ( tau , delta ) ammonia = NH3 ( ) phi2 = ammonia . _phir ( tau , delta ) Dphi = self . _Dphir ( tau , delta , x ) prop = { } prop [ "tau" ] = tau prop [ "delta" ] = delta prop [ "fir" ] = ( 1 - x ) * phi1 [ "fir" ] + x * phi2 [ "fir" ] + Dphi [ "fir" ] prop [ "firt" ] = ( 1 - x ) * phi1 [ "firt" ] + x * phi2 [ "firt" ] + Dphi [ "firt" ] prop [ "firtt" ] = ( 1 - x ) * phi1 [ "firtt" ] + x * phi2 [ "firtt" ] + Dphi [ "firtt" ] prop [ "fird" ] = ( 1 - x ) * phi1 [ "fird" ] + x * phi2 [ "fird" ] + Dphi [ "fird" ] prop [ "firdd" ] = ( 1 - x ) * phi1 [ "firdd" ] + x * phi2 [ "firdd" ] + Dphi [ "firdd" ] prop [ "firdt" ] = ( 1 - x ) * phi1 [ "firdt" ] + x * phi2 [ "firdt" ] + Dphi [ "firdt" ] prop [ "firx" ] = - phi1 [ "fir" ] + phi2 [ "fir" ] + Dphi [ "firx" ] prop [ "F" ] = prop [ "firx" ] - delta / rhon * drhonx * prop [ "fird" ] + tau / Tn * dTnx * prop [ "firt" ] return prop
def locale_export ( ) : """Exports for dealing with Click - based programs and ASCII / Unicode errors . RuntimeError : Click will abort further execution because Python 3 was configured to use ASCII as encoding for the environment . Consult https : / / click . palletsprojects . com / en / 7 . x / python3 / for mitigation steps . Looks up available locales on the system to find an appropriate one to pick , defaulting to C . UTF - 8 which is globally available on newer systems ."""
locale_to_use = "C.UTF-8" try : locales = subprocess . check_output ( [ "locale" , "-a" ] ) . decode ( errors = "ignore" ) . split ( "\n" ) except subprocess . CalledProcessError : locales = [ ] for locale in locales : if locale . lower ( ) . endswith ( ( "utf-8" , "utf8" ) ) : locale_to_use = locale break return "export LC_ALL=%s && export LANG=%s && " % ( locale_to_use , locale_to_use )
def landing_target_send ( self , time_usec , target_num , frame , angle_x , angle_y , distance , size_x , size_y , force_mavlink1 = False ) : '''The location of a landing area captured from a downward facing camera time _ usec : Timestamp ( micros since boot or Unix epoch ) ( uint64 _ t ) target _ num : The ID of the target if multiple targets are present ( uint8 _ t ) frame : MAV _ FRAME enum specifying the whether the following feilds are earth - frame , body - frame , etc . ( uint8 _ t ) angle _ x : X - axis angular offset ( in radians ) of the target from the center of the image ( float ) angle _ y : Y - axis angular offset ( in radians ) of the target from the center of the image ( float ) distance : Distance to the target from the vehicle in meters ( float ) size _ x : Size in radians of target along x - axis ( float ) size _ y : Size in radians of target along y - axis ( float )'''
return self . send ( self . landing_target_encode ( time_usec , target_num , frame , angle_x , angle_y , distance , size_x , size_y ) , force_mavlink1 = force_mavlink1 )
def get_locales ( self ) : """Get a list of supported locales . Computes the list using ` ` I18N _ LANGUAGES ` ` configuration variable ."""
if self . _locales_cache is None : langs = [ self . babel . default_locale ] for l , dummy_title in current_app . config . get ( 'I18N_LANGUAGES' , [ ] ) : langs . append ( self . babel . load_locale ( l ) ) self . _locales_cache = langs return self . _locales_cache
def transition ( value , maximum , start , end ) : """Transition between two values . : param value : Current iteration . : param maximum : Maximum number of iterations . : param start : Start value . : param end : End value . : returns : Transitional value ."""
return round ( start + ( end - start ) * value / maximum , 2 )
def rr_absent ( name , HostedZoneId = None , DomainName = None , PrivateZone = False , Name = None , Type = None , SetIdentifier = None , region = None , key = None , keyid = None , profile = None ) : '''Ensure the Route53 record is deleted . name The name of the state definition . This will be used for Name if the latter is not provided . HostedZoneId The ID of the zone to delete the record from . Exclusive with DomainName . DomainName The domain name of the zone to delete the record from . Exclusive with HostedZoneId . PrivateZone Set to True if the RR to be removed is in a private zone , False if public . Name Name of the resource record . Type The record type ( A , NS , MX , TXT , etc . ) SetIdentifier Valid for Weighted , Latency , Geolocation , and Failover resource record sets only . An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type . The value of SetIdentifier must be unique for each resource record set that has the same combination of DNS name and type . Omit SetIdentifier for any other types of record sets . region The region to connect to . key Secret key to be used . keyid Access key to be used . profile Dict , or pillar key pointing to a dict , containing AWS region / key / keyid .'''
Name = Name if Name else name if Type is None : raise SaltInvocationError ( "'Type' is a required parameter when deleting resource records." ) ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } } args = { 'Id' : HostedZoneId , 'Name' : DomainName , 'PrivateZone' : PrivateZone , 'region' : region , 'key' : key , 'keyid' : keyid , 'profile' : profile } zone = __salt__ [ 'boto3_route53.find_hosted_zone' ] ( ** args ) if not zone : ret [ 'comment' ] = 'Route 53 {} hosted zone {} not found' . format ( 'private' if PrivateZone else 'public' , DomainName ) log . info ( ret [ 'comment' ] ) return ret zone = zone [ 0 ] HostedZoneId = zone [ 'HostedZone' ] [ 'Id' ] recordsets = __salt__ [ 'boto3_route53.get_resource_records' ] ( HostedZoneId = HostedZoneId , StartRecordName = Name , StartRecordType = Type , region = region , key = key , keyid = keyid , profile = profile ) if SetIdentifier and recordsets : log . debug ( 'Filter recordsets %s by SetIdentifier %s.' , recordsets , SetIdentifier ) recordsets = [ r for r in recordsets if r . get ( 'SetIdentifier' ) == SetIdentifier ] log . debug ( 'Resulted in recordsets %s.' , recordsets ) if not recordsets : ret [ 'comment' ] = 'Route 53 resource record {} with type {} already absent.' . format ( Name , Type ) return ret elif len ( recordsets ) > 1 : ret [ 'comment' ] = 'Given criteria matched more than one ResourceRecordSet.' log . error ( ret [ 'comment' ] ) ret [ 'result' ] = False return ret ResourceRecordSet = recordsets [ 0 ] if __opts__ [ 'test' ] : ret [ 'comment' ] = 'Route 53 resource record {} with type {} would be deleted.' . format ( Name , Type ) ret [ 'result' ] = None return ret ChangeBatch = { 'Changes' : [ { 'Action' : 'DELETE' , 'ResourceRecordSet' : ResourceRecordSet , } ] } if __salt__ [ 'boto3_route53.change_resource_record_sets' ] ( HostedZoneId = HostedZoneId , ChangeBatch = ChangeBatch , region = region , key = key , keyid = keyid , profile = profile ) : ret [ 'comment' ] = 'Route 53 resource record {} with type {} deleted.' . format ( Name , Type ) log . info ( ret [ 'comment' ] ) ret [ 'changes' ] [ 'old' ] = ResourceRecordSet ret [ 'changes' ] [ 'new' ] = None else : ret [ 'comment' ] = 'Failed to delete Route 53 resource record {} with type {}.' . format ( Name , Type ) log . error ( ret [ 'comment' ] ) ret [ 'result' ] = False return ret
def pad ( x , p = 3 ) : """Pad tensor in H , W Remarks : TensorFlow uses " ceil ( input _ spatial _ shape [ i ] / strides [ i ] ) " rather than explicit padding like Caffe , pyTorch does . Hence , we need to pad here beforehand . Args : x ( tf . tensor ) : incoming tensor p ( int , optional ) : padding for H , W Returns : tf . tensor : padded tensor"""
return tf . pad ( x , [ [ 0 , 0 ] , [ 0 , 0 ] , [ p , p ] , [ p , p ] ] )
def _include_environment_variables ( self , program , executor_vars ) : """Define environment variables ."""
env_vars = { 'RESOLWE_HOST_URL' : self . settings_actual . get ( 'RESOLWE_HOST_URL' , 'localhost' ) , } set_env = self . settings_actual . get ( 'FLOW_EXECUTOR' , { } ) . get ( 'SET_ENV' , { } ) env_vars . update ( executor_vars ) env_vars . update ( set_env ) export_commands = [ 'export {}={}' . format ( key , shlex . quote ( value ) ) for key , value in env_vars . items ( ) ] return os . linesep . join ( export_commands ) + os . linesep + program
def wait_for_path_blocking ( path : pathlib . Path , timeout : int = 30 ) -> None : """Waits up to ` ` timeout ` ` seconds for the path to appear at path ` ` path ` ` otherwise raises : exc : ` TimeoutError ` ."""
start_at = time . monotonic ( ) while time . monotonic ( ) - start_at < timeout : if path . exists ( ) : return else : time . sleep ( 0.05 ) raise TimeoutError ( f"IPC socket file {path} has not appeared in {timeout} seconds" )
def mux ( index , * mux_ins , ** kwargs ) : """Multiplexer returning the value of the wire in . : param WireVector index : used as the select input to the multiplexer : param WireVector mux _ ins : additional WireVector arguments selected when select > 1 : param WireVector kwargs : additional WireVectors , keyword arg " default " If you are selecting between less items than your index can address , you can use the " default " keyword argument to auto - expand those terms . For example , if you have a 3 - bit index but are selecting between 6 options , you need to specify a value for those other 2 possible values of index ( 0b110 and 0b111 ) . : return : WireVector of length of the longest input ( not including select ) To avoid confusion , if you are using the mux where the select is a " predicate " ( meaning something that you are checking the truth value of rather than using it as a number ) it is recommended that you use the select function instead as named arguments because the ordering is different from the classic ternary operator of some languages . Example of mux as " selector " to pick between a0 and a1 : : : index = WireVector ( 1) mux ( index , a0 , a1 ) Example of mux as " selector " to pick between a0 . . . a3 : : : index = WireVector ( 2) mux ( index , a0 , a1 , a2 , a3 ) Example of " default " to specify additional arguments : : : index = WireVector ( 3) mux ( index , a0 , a1 , a2 , a3 , a4 , a5 , default = 0 )"""
if kwargs : # only " default " is allowed as kwarg . if len ( kwargs ) != 1 or 'default' not in kwargs : try : result = select ( index , ** kwargs ) import warnings warnings . warn ( "Predicates are being deprecated in Mux. " "Use the select operator instead." , stacklevel = 2 ) return result except Exception : bad_args = [ k for k in kwargs . keys ( ) if k != 'default' ] raise PyrtlError ( 'unknown keywords %s applied to mux' % str ( bad_args ) ) default = kwargs [ 'default' ] else : default = None # find the diff between the addressable range and number of inputs given short_by = 2 ** len ( index ) - len ( mux_ins ) if short_by > 0 : if default is not None : # extend the list to appropriate size mux_ins = list ( mux_ins ) extention = [ default ] * short_by mux_ins . extend ( extention ) if 2 ** len ( index ) != len ( mux_ins ) : raise PyrtlError ( 'Mux select line is %d bits, but selecting from %d inputs. ' % ( len ( index ) , len ( mux_ins ) ) ) if len ( index ) == 1 : return select ( index , falsecase = mux_ins [ 0 ] , truecase = mux_ins [ 1 ] ) half = len ( mux_ins ) // 2 return select ( index [ - 1 ] , falsecase = mux ( index [ 0 : - 1 ] , * mux_ins [ : half ] ) , truecase = mux ( index [ 0 : - 1 ] , * mux_ins [ half : ] ) )
def hash_sha256 ( buf ) : """AuthenticationHelper . hash"""
a = hashlib . sha256 ( buf ) . hexdigest ( ) return ( 64 - len ( a ) ) * '0' + a
def convert_numeric_id_to_id36 ( numeric_id ) : """Convert an integer into its base36 string representation . This method has been cleaned up slightly to improve readability . For more info see : https : / / github . com / reddit / reddit / blob / master / r2 / r2 / lib / utils / _ utils . pyx https : / / www . reddit . com / r / redditdev / comments / n624n / submission _ ids _ question / https : / / en . wikipedia . org / wiki / Base36"""
# base36 allows negative numbers , but reddit does not if not isinstance ( numeric_id , six . integer_types ) or numeric_id < 0 : raise ValueError ( "must supply a positive int/long" ) # Alphabet used for base 36 conversion alphabet = '0123456789abcdefghijklmnopqrstuvwxyz' alphabet_len = len ( alphabet ) # Temp assign current_number = numeric_id base36 = [ ] # Current _ number must be greater than alphabet length to while / divmod if 0 <= current_number < alphabet_len : return alphabet [ current_number ] # Break up into chunks while current_number != 0 : current_number , rem = divmod ( current_number , alphabet_len ) base36 . append ( alphabet [ rem ] ) # String is built in reverse order return '' . join ( reversed ( base36 ) )
def encode_intervals ( self , duration , intervals , values , dtype = np . bool , multi = True , fill = None ) : '''Encode labeled intervals as a time - series matrix . Parameters duration : number The duration ( in frames ) of the track intervals : np . ndarray , shape = ( n , 2) The list of intervals values : np . ndarray , shape = ( n , m ) The ( encoded ) values corresponding to each interval dtype : np . dtype The desired output type multi : bool If ` True ` , allow multiple labels per interval . fill : dtype ( optional ) Optional default fill value for missing data . If not provided , the default is inferred from ` dtype ` . Returns target : np . ndarray , shape = ( duration * sr / hop _ length , m ) The labeled interval encoding , sampled at the desired frame rate'''
if fill is None : fill = fill_value ( dtype ) frames = time_to_frames ( intervals , sr = self . sr , hop_length = self . hop_length ) n_total = int ( time_to_frames ( duration , sr = self . sr , hop_length = self . hop_length ) ) values = values . astype ( dtype ) n_alloc = n_total if np . any ( frames ) : n_alloc = max ( n_total , 1 + int ( frames . max ( ) ) ) target = np . empty ( ( n_alloc , values . shape [ 1 ] ) , dtype = dtype ) target . fill ( fill ) for column , interval in zip ( values , frames ) : if multi : target [ interval [ 0 ] : interval [ 1 ] ] += column else : target [ interval [ 0 ] : interval [ 1 ] ] = column return target [ : n_total ]
def parse_args ( ) : """Parse the command line arguments"""
global default_device parser = argparse . ArgumentParser ( description = 'Initialize OATH token for use with yhsm-validation-server' , add_help = True , formatter_class = argparse . ArgumentDefaultsHelpFormatter , ) parser . add_argument ( '-D' , '--device' , dest = 'device' , default = default_device , required = False , help = 'YubiHSM device' , ) parser . add_argument ( '-v' , '--verbose' , dest = 'verbose' , action = 'store_true' , default = False , help = 'Enable verbose operation' , ) parser . add_argument ( '--debug' , dest = 'debug' , action = 'store_true' , default = False , help = 'Enable debug operation' , ) parser . add_argument ( '--force' , dest = 'force' , action = 'store_true' , default = False , help = 'Overwrite any present entry' , ) parser . add_argument ( '--key-handle' , dest = 'key_handle' , required = True , help = 'Key handle to create AEAD' , metavar = 'HANDLE' , ) parser . add_argument ( '--uid' , dest = 'uid' , required = True , help = 'User ID' , metavar = 'STR' , ) parser . add_argument ( '--oath-c' , dest = 'oath_c' , required = False , default = 0 , help = 'Initial OATH counter value' , metavar = 'INT' , ) parser . add_argument ( '--test-oath-window' , dest = 'look_ahead' , required = False , default = 10 , help = 'Number of codes to search with --test-code' , metavar = 'INT' , ) parser . add_argument ( '--test-code' , dest = 'test_code' , type = int , required = False , help = 'Optional OTP from token for verification' , metavar = 'INT' , ) parser . add_argument ( '--oath-k' , dest = 'oath_k' , required = False , help = 'The secret key of the token, hex encoded' , metavar = 'HEXSTR' , ) parser . add_argument ( '--db-file' , dest = 'db_file' , default = default_db_file , required = False , help = 'DB file for storing AEAD\'s for --pwhash and --oath in the yhsm-validation-server' , metavar = 'FN' , ) args = parser . parse_args ( ) return args
def _strip_nones ( d : Dict [ str , Any ] ) -> Dict [ str , Any ] : """An attribute with type None is equivalent to an absent attribute . : param d : Object with attributes : return : Object dictionary w / Nones and underscores removed"""
return OrderedDict ( { k : None if isinstance ( v , JSGNull ) else v for k , v in d . items ( ) if not k . startswith ( "_" ) and v is not None and v is not Empty and ( issubclass ( type ( v ) , JSGObject ) or ( not issubclass ( type ( v ) , JSGString ) or v . val is not None ) and ( not issubclass ( type ( v ) , AnyType ) or v . val is not Empty ) ) } )
def geodetic2ecef ( lat : float , lon : float , alt : float , ell : Ellipsoid = None , deg : bool = True ) -> Tuple [ float , float , float ] : """point transformation from Geodetic of specified ellipsoid ( default WGS - 84 ) to ECEF Parameters lat : float or numpy . ndarray of float target geodetic latitude lon : float or numpy . ndarray of float target geodetic longitude h : float or numpy . ndarray of float target altitude above geodetic ellipsoid ( meters ) ell : Ellipsoid , optional reference ellipsoid deg : bool , optional degrees input / output ( False : radians in / out ) Returns ECEF ( Earth centered , Earth fixed ) x , y , z x : float or numpy . ndarray of float target x ECEF coordinate ( meters ) y : float or numpy . ndarray of float target y ECEF coordinate ( meters ) z : float or numpy . ndarray of float target z ECEF coordinate ( meters )"""
if ell is None : ell = Ellipsoid ( ) if deg : lat = radians ( lat ) lon = radians ( lon ) with np . errstate ( invalid = 'ignore' ) : # need np . any ( ) to handle scalar and array cases if np . any ( ( lat < - pi / 2 ) | ( lat > pi / 2 ) ) : raise ValueError ( '-90 <= lat <= 90' ) # radius of curvature of the prime vertical section N = get_radius_normal ( lat , ell ) # Compute cartesian ( geocentric ) coordinates given ( curvilinear ) geodetic # coordinates . x = ( N + alt ) * cos ( lat ) * cos ( lon ) y = ( N + alt ) * cos ( lat ) * sin ( lon ) z = ( N * ( ell . b / ell . a ) ** 2 + alt ) * sin ( lat ) return x , y , z
def listener_create_event ( self , listener_info ) : """Process listener create event . This is lbaas v2 vif will be plugged into ovs when first listener is created and unpluged from ovs when last listener is deleted"""
listener_data = listener_info . get ( 'listener' ) lb_list = listener_data . get ( 'loadbalancers' ) for lb in lb_list : lb_id = lb . get ( 'id' ) req = dict ( instance_id = ( lb_id . replace ( '-' , '' ) ) ) instances = self . get_vms_for_this_req ( ** req ) if not instances : lb_info = self . neutronclient . show_loadbalancer ( lb_id ) if lb_info : port_id = lb_info [ "loadbalancer" ] [ "vip_port_id" ] self . add_lbaas_port ( port_id , lb_id ) else : LOG . info ( "lbaas port for lb %s already added" % lb_id )
def simple_moving_matrix ( x , n = 10 ) : """Create simple moving matrix . Parameters x : ndarray A numpy array n : integer The number of sample points used to make average Returns ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average"""
if x . ndim > 1 and len ( x [ 0 ] ) > 1 : x = np . average ( x , axis = 1 ) h = n / 2 o = 0 if h * 2 == n else 1 xx = [ ] for i in range ( h , len ( x ) - h ) : xx . append ( x [ i - h : i + h + o ] ) return np . array ( xx )
def _add_lines ( specification , module ) : """Return autodoc commands for a basemodels docstring . Note that ` collection classes ` ( e . g . ` Model ` , ` ControlParameters ` , ` InputSequences ` are placed on top of the respective section and the ` contained classes ` ( e . g . model methods , ` ControlParameter ` instances , ` InputSequence ` instances at the bottom . This differs from the order of their definition in the respective modules , but results in a better documentation structure ."""
caption = _all_spec2capt . get ( specification , 'dummy' ) if caption . split ( ) [ - 1 ] in ( 'parameters' , 'sequences' , 'Masks' ) : exists_collectionclass = True name_collectionclass = caption . title ( ) . replace ( ' ' , '' ) else : exists_collectionclass = False lines = [ ] if specification == 'model' : lines += [ f'' , f'.. autoclass:: {module.__name__}.Model' , f' :members:' , f' :show-inheritance:' , f' :exclude-members: {", ".join(EXCLUDE_MEMBERS)}' ] elif exists_collectionclass : lines += [ f'' , f'.. autoclass:: {module.__name__}.{name_collectionclass}' , f' :members:' , f' :show-inheritance:' , f' :exclude-members: {", ".join(EXCLUDE_MEMBERS)}' ] lines += [ '' , '.. automodule:: ' + module . __name__ , ' :members:' , ' :show-inheritance:' ] if specification == 'model' : lines += [ ' :exclude-members: Model' ] elif exists_collectionclass : lines += [ ' :exclude-members: ' + name_collectionclass ] return lines
def _parse_networks ( networks ) : '''Common logic for parsing the networks'''
networks = salt . utils . args . split_input ( networks or [ ] ) if not networks : networks = { } else : # We don ' t want to recurse the repack , as the values of the kwargs # being passed when connecting to the network will not be dictlists . networks = salt . utils . data . repack_dictlist ( networks ) if not networks : raise CommandExecutionError ( 'Invalid network configuration (see documentation)' ) for net_name , net_conf in six . iteritems ( networks ) : if net_conf is None : networks [ net_name ] = { } else : networks [ net_name ] = salt . utils . data . repack_dictlist ( net_conf ) if not networks [ net_name ] : raise CommandExecutionError ( 'Invalid configuration for network \'{0}\' ' '(see documentation)' . format ( net_name ) ) for key in ( 'links' , 'aliases' ) : try : networks [ net_name ] [ key ] = salt . utils . args . split_input ( networks [ net_name ] [ key ] ) except KeyError : continue # Iterate over the networks again now , looking for # incorrectly - formatted arguments errors = [ ] for net_name , net_conf in six . iteritems ( networks ) : if net_conf is not None : for key , val in six . iteritems ( net_conf ) : if val is None : errors . append ( 'Config option \'{0}\' for network \'{1}\' is ' 'missing a value' . format ( key , net_name ) ) if errors : raise CommandExecutionError ( 'Invalid network configuration' , info = errors ) if networks : try : all_networks = [ x [ 'Name' ] for x in __salt__ [ 'docker.networks' ] ( ) if 'Name' in x ] except CommandExecutionError as exc : raise CommandExecutionError ( 'Failed to get list of existing networks: {0}.' . format ( exc ) ) else : missing_networks = [ x for x in sorted ( networks ) if x not in all_networks ] if missing_networks : raise CommandExecutionError ( 'The following networks are not present: {0}' . format ( ', ' . join ( missing_networks ) ) ) return networks
def save ( self , * args , ** kwargs ) : """Customized to generate an image from the pdf file ."""
# open image from pdf img = Image ( filename = self . file . path + '[0]' ) # make new filename filename = os . path . basename ( self . file . path ) . split ( '.' ) [ : - 1 ] if type ( filename ) == list : filename = '' . join ( filename ) # TODO : Would be better to compute this path from the upload _ to # setting which is already set on the model field image_dir = os . path . join ( django_settings . MEDIA_ROOT , UPLOAD_TO_DIR ) if not os . path . exists ( image_dir ) : os . makedirs ( image_dir ) image_path = os . path . join ( image_dir , '{}.jpg' . format ( filename ) ) tmp_image_path = os . path . join ( image_dir , '{}.tmp.jpg' . format ( filename ) ) # we remove the old image befor we save because the cover might have # changed when we upload a new PDF file - even when that file has the # same filename as the old one try : os . remove ( image_path ) except OSError : # file is already gone pass # and we also remove the thumbnails old_files = glob . glob ( '{}.*' . format ( image_path ) ) for old_file in old_files : try : os . remove ( old_file ) except OSError : pass # save as image under a temporary filename so that we can read it with # File ( ) img . save ( filename = tmp_image_path ) # attach it to image field with open ( tmp_image_path , 'r' ) as f : self . image . save ( '{}.jpg' . format ( filename ) , File ( f ) , save = False ) super ( PDFPluginModel , self ) . save ( * args , ** kwargs ) # remove temp file try : os . remove ( tmp_image_path ) except OSError : pass
def _send ( self ) : """Send data to graphite . Data that can not be sent will be queued ."""
# Check to see if we have a valid socket . If not , try to connect . try : try : if self . socket is None : self . log . debug ( "GraphiteHandler: Socket is not connected. " "Reconnecting." ) self . _connect ( ) if self . socket is None : self . log . debug ( "GraphiteHandler: Reconnect failed." ) else : # Send data to socket self . _send_data ( '' . join ( self . metrics ) ) self . metrics = [ ] if self . _time_to_reconnect ( ) : self . _close ( ) except Exception : self . _close ( ) self . _throttle_error ( "GraphiteHandler: Error sending metrics." ) raise finally : if len ( self . metrics ) >= ( self . batch_size * self . max_backlog_multiplier ) : trim_offset = ( self . batch_size * self . trim_backlog_multiplier * - 1 ) self . log . warn ( 'GraphiteHandler: Trimming backlog. Removing' + ' oldest %d and keeping newest %d metrics' , len ( self . metrics ) - abs ( trim_offset ) , abs ( trim_offset ) ) self . metrics = self . metrics [ trim_offset : ]
def logged_in ( f ) : """Decorator for Page methods that require the user to be authenticated ."""
@ wraps ( f ) def wrapped_method ( self , * args , ** kwargs ) : if not self . reddit . is_oauth_session ( ) : self . term . show_notification ( 'Not logged in' ) return None return f ( self , * args , ** kwargs ) return wrapped_method
def filter ( self , read ) : """Check if a read passes the filter . @ param read : A C { Read } instance . @ return : C { read } if C { read } passes the filter , C { False } if not ."""
self . readIndex += 1 if self . alwaysFalse : return False if self . wantedSequenceNumberGeneratorExhausted : return False if self . nextWantedSequenceNumber is not None : if self . readIndex + 1 == self . nextWantedSequenceNumber : # We want this sequence . try : self . nextWantedSequenceNumber = next ( self . wantedSequenceNumberGenerator ) except StopIteration : # The sequence number iterator ran out of sequence # numbers . We must let the rest of the filtering # continue for the current sequence in case we # throw it out for other reasons ( as we might have # done for any of the earlier wanted sequence # numbers ) . self . wantedSequenceNumberGeneratorExhausted = True else : # This sequence isn ' t one of the ones that ' s wanted . return False if ( self . sampleFraction is not None and uniform ( 0.0 , 1.0 ) > self . sampleFraction ) : # Note that we don ' t have to worry about the 0.0 or 1.0 # cases in the above ' if ' , as they have been dealt with # in self . _ _ init _ _ . return False if self . randomSubset is not None : if self . yieldCount == self . randomSubset : # The random subset has already been fully returned . # There ' s no point in going any further through the input . self . alwaysFalse = True return False elif uniform ( 0.0 , 1.0 ) > ( ( self . randomSubset - self . yieldCount ) / ( self . trueLength - self . readIndex ) ) : return False if self . head is not None and self . readIndex == self . head : # We ' re completely done . self . alwaysFalse = True return False readLen = len ( read ) if ( ( self . minLength is not None and readLen < self . minLength ) or ( self . maxLength is not None and readLen > self . maxLength ) ) : return False if self . removeGaps : if read . quality is None : read = read . __class__ ( read . id , read . sequence . replace ( '-' , '' ) ) else : newSequence = [ ] newQuality = [ ] for base , quality in zip ( read . sequence , read . quality ) : if base != '-' : newSequence . append ( base ) newQuality . append ( quality ) read = read . __class__ ( read . id , '' . join ( newSequence ) , '' . join ( newQuality ) ) if ( self . titleFilter and self . titleFilter . accept ( read . id ) == TitleFilter . REJECT ) : return False if ( self . keepSequences is not None and self . readIndex not in self . keepSequences ) : return False if ( self . removeSequences is not None and self . readIndex in self . removeSequences ) : return False if self . removeDuplicates : if read . sequence in self . sequencesSeen : return False self . sequencesSeen . add ( read . sequence ) if self . removeDuplicatesById : if read . id in self . idsSeen : return False self . idsSeen . add ( read . id ) if self . modifier : modified = self . modifier ( read ) if modified is None : return False else : read = modified # We have to use ' is not None ' in the following tests so the empty set # is processed properly . if self . keepSites is not None : read = read . newFromSites ( self . keepSites ) elif self . removeSites is not None : read = read . newFromSites ( self . removeSites , exclude = True ) if self . idLambda : newId = self . idLambda ( read . id ) if newId is None : return False else : read . id = newId if self . readLambda : newRead = self . readLambda ( read ) if newRead is None : return False else : read = newRead if self . removeDescriptions : read . id = read . id . split ( ) [ 0 ] if self . reverse : read = read . reverse ( ) elif self . reverseComplement : read = read . reverseComplement ( ) self . yieldCount += 1 return read
def print_err ( * args , ** kwargs ) : """print _ err ( * args , flush = False ) Same as * print * , but outputs to stderr . If * flush * is * True * , stderr is flushed after printing ."""
sys . stderr . write ( " " . join ( str ( arg ) for arg in args ) + "\n" ) if kwargs . get ( "flush" , False ) : sys . stderr . flush ( )
def is_ipv6_filter ( ip , options = None ) : '''Returns a bool telling if the value passed to it was a valid IPv6 address . ip The IP address . net : False Consider IP addresses followed by netmask . options CSV of options regarding the nature of the IP address . E . g . : loopback , multicast , private etc .'''
_is_ipv6 = _is_ipv ( ip , 6 , options = options ) return isinstance ( _is_ipv6 , six . string_types )
def _get_answer ( self , part ) : """Note : Answers are only revealed after a correct submission . If you ' ve have not already solved the puzzle , AocdError will be raised ."""
answer_fname = getattr ( self , "answer_{}_fname" . format ( part ) ) if os . path . isfile ( answer_fname ) : with open ( answer_fname ) as f : return f . read ( ) . strip ( ) # scrape puzzle page for any previously solved answers response = requests . get ( self . url , cookies = self . _cookies , headers = self . _headers ) response . raise_for_status ( ) soup = bs4 . BeautifulSoup ( response . text , "html.parser" ) if not self . _title : # may as well save this while we ' re here self . _save_title ( soup = soup ) hit = "Your puzzle answer was" paras = [ p for p in soup . find_all ( "p" ) if p . text . startswith ( hit ) ] if paras : parta_correct_answer = paras [ 0 ] . code . text self . _save_correct_answer ( value = parta_correct_answer , part = "a" ) if len ( paras ) > 1 : _p1 , p2 = paras partb_correct_answer = p2 . code . text self . _save_correct_answer ( value = partb_correct_answer , part = "b" ) if os . path . isfile ( answer_fname ) : with open ( answer_fname ) as f : return f . read ( ) . strip ( ) msg = "Answer {}-{}{} is not available" . format ( self . year , self . day , part ) raise PuzzleUnsolvedError ( msg )
def lemmatise ( self , word ) : '''Tries to find the base form ( lemma ) of the given word , using the data provided by the Projekt deutscher Wortschatz . This method returns a list of potential lemmas . > > > gn . lemmatise ( u ' Männer ' ) [ u ' Mann ' ] > > > gn . lemmatise ( u ' XYZ123 ' ) [ u ' XYZ123 ' ]'''
lemmas = list ( self . _mongo_db . lemmatiser . find ( { 'word' : word } ) ) if lemmas : return [ lemma [ 'lemma' ] for lemma in lemmas ] else : return [ word ]
def _pad_added ( self , element , pad ) : """The callback for GstElement ' s " pad - added " signal ."""
# Decoded data is ready . Connect up the decoder , finally . name = pad . query_caps ( None ) . to_string ( ) if name . startswith ( 'audio/x-raw' ) : nextpad = self . conv . get_static_pad ( 'sink' ) if not nextpad . is_linked ( ) : self . _got_a_pad = True pad . link ( nextpad )
def get_projection_on_elements ( self ) : """Method returning a dictionary of projections on elements . Returns : a dictionary in the { Spin . up : [ ] [ { Element : values } ] , Spin . down : [ ] [ { Element : values } ] } format if there is no projections in the band structure returns an empty dict"""
result = { } structure = self . structure for spin , v in self . projections . items ( ) : result [ spin ] = [ [ collections . defaultdict ( float ) for i in range ( len ( self . kpoints ) ) ] for j in range ( self . nb_bands ) ] for i , j , k in itertools . product ( range ( self . nb_bands ) , range ( len ( self . kpoints ) ) , range ( structure . num_sites ) ) : result [ spin ] [ i ] [ j ] [ str ( structure [ k ] . specie ) ] += np . sum ( v [ i , j , : , k ] ) return result
def coupling_constant ( self , specie ) : """Computes the couplling constant C _ q as defined in : Wasylishen R E , Ashbrook S E , Wimperis S . NMR of quadrupolar nuclei in solid materials [ M ] . John Wiley & Sons , 2012 . ( Chapter 3.2) C _ q for a specific atom type for this electric field tensor : C _ q = e * Q * V _ zz / h h : planck ' s constant Q : nuclear electric quadrupole moment in mb ( millibarn e : elementary proton charge Args : specie : flexible input to specify the species at this site . Can take a isotope or element string , Specie object , or Site object Return : the coupling constant as a FloatWithUnit in MHz"""
planks_constant = FloatWithUnit ( 6.62607004E-34 , "m^2 kg s^-1" ) Vzz = FloatWithUnit ( self . V_zz , "V ang^-2" ) e = FloatWithUnit ( - 1.60217662E-19 , "C" ) # Convert from string to Specie object if isinstance ( specie , str ) : # isotope was provided in string format if len ( specie . split ( "-" ) ) > 1 : isotope = str ( specie ) specie = Specie ( specie . split ( "-" ) [ 0 ] ) Q = specie . get_nmr_quadrupole_moment ( isotope ) else : specie = Specie ( specie ) Q = specie . get_nmr_quadrupole_moment ( ) elif isinstance ( specie , Site ) : specie = specie . specie Q = specie . get_nmr_quadrupole_moment ( ) elif isinstance ( specie , Specie ) : Q = specie . get_nmr_quadrupole_moment ( ) else : raise ValueError ( "Invalid speciie provided for quadrupolar coupling constant calcuations" ) return ( e * Q * Vzz / planks_constant ) . to ( "MHz" )
def compute_search_efficiency_in_bins ( found , total , ndbins , sim_to_bins_function = lambda sim : ( sim . distance , ) ) : """Calculate search efficiency in the given ndbins . The first dimension of ndbins must be bins over injected distance . sim _ to _ bins _ function must map an object to a tuple indexing the ndbins ."""
bins = bin_utils . BinnedRatios ( ndbins ) # increment the numerator and denominator with found / found + missed injs [ bins . incnumerator ( sim_to_bins_function ( sim ) ) for sim in found ] [ bins . incdenominator ( sim_to_bins_function ( sim ) ) for sim in total ] # regularize by setting denoms to 1 to avoid nans bins . regularize ( ) # efficiency array is the ratio eff = bin_utils . BinnedArray ( bin_utils . NDBins ( ndbins ) , array = bins . ratio ( ) ) # compute binomial uncertainties in each bin err_arr = numpy . sqrt ( eff . array * ( 1 - eff . array ) / bins . denominator . array ) err = bin_utils . BinnedArray ( bin_utils . NDBins ( ndbins ) , array = err_arr ) return eff , err
def terminate ( self ) : """Terminate Qutepart instance . This method MUST be called before application stop to avoid crashes and some other interesting effects Call it on close to free memory and stop background highlighting"""
self . text = '' self . _completer . terminate ( ) if self . _highlighter is not None : self . _highlighter . terminate ( ) if self . _vim is not None : self . _vim . terminate ( )
def validate ( self ) : """Validate workflow object . This method currently validates the workflow object with the use of cwltool . It writes the workflow to a tmp CWL file , reads it , validates it and removes the tmp file again . By default , the workflow is written to file using absolute paths to the steps ."""
# define tmpfile ( fd , tmpfile ) = tempfile . mkstemp ( ) os . close ( fd ) try : # save workflow object to tmpfile , # do not recursively call validate function self . save ( tmpfile , mode = 'abs' , validate = False ) # load workflow from tmpfile document_loader , processobj , metadata , uri = load_cwl ( tmpfile ) finally : # cleanup tmpfile os . remove ( tmpfile )
def userstream_user ( self , delegate , stall_warnings = None , with_ = 'followings' , replies = None ) : """Streams messages for a single user . https : / / dev . twitter . com / docs / api / 1.1 / get / user The ` ` stringify _ friend _ ids ` ` parameter is always set to ` ` ' true ' ` ` for consistency with the use of string identifiers elsewhere . : param delegate : A delegate function that will be called for each message in the stream and will be passed the message dict as the only parameter . The message dicts passed to this function may represent any message type and the delegate is responsible for any dispatch that may be required . ( : mod : ` txtwitter . messagetools ` may be helpful here . ) : param bool stall _ warnings : Specifies whether stall warnings should be delivered . : param str with _ : If ` ` ' followings ' ` ` ( the default ) , the stream will include messages from both the authenticated user and the authenticated user ' s followers . If ` ` ' user ' ` ` , the stream will only include messages from ( or mentioning ) the autheticated user . All other values are invalid . ( The underscore appended to the parameter name is to avoid conflicting with Python ' s ` ` with ` ` keyword . ) : param str replies : If set to ` ` ' all ' ` ` , replies to tweets will be included even if the authenticated user does not follow both parties . : returns : An unstarted : class : ` TwitterStreamService ` ."""
params = { 'stringify_friend_ids' : 'true' } set_bool_param ( params , 'stall_warnings' , stall_warnings ) set_str_param ( params , 'with' , with_ ) set_str_param ( params , 'replies' , replies ) svc = TwitterStreamService ( lambda : self . _get_userstream ( 'user.json' , params ) , delegate ) return svc
def setHoverable ( self , state ) : """Sets whether or not this is a hoverable button . When in a hoverable state , the icon will only be visible when the button is hovered on . : param state | < bool >"""
self . _hoverable = state self . _hoverIcon = self . icon ( )
def copy ( string ) : """Copy given string into system clipboard ."""
win32clipboard . OpenClipboard ( ) win32clipboard . EmptyClipboard ( ) win32clipboard . SetClipboardText ( string ) win32clipboard . CloseClipboard ( )
def compute_merkle_tree ( items : Iterable [ bytes ] ) -> MerkleTree : """Calculates the merkle root for a given list of items"""
if not all ( isinstance ( l , bytes ) and len ( l ) == 32 for l in items ) : raise ValueError ( 'Not all items are hashes' ) leaves = sorted ( items ) if len ( leaves ) == 0 : return MerkleTree ( layers = [ [ EMPTY_MERKLE_ROOT ] ] ) if not len ( leaves ) == len ( set ( leaves ) ) : raise ValueError ( 'The leaves items must not contain duplicate items' ) tree = [ leaves ] layer = leaves while len ( layer ) > 1 : # [ a , b , c , d , e ] - > [ ( a , b ) , ( c , d ) , ( e , None ) ] iterator = iter ( layer ) paired_items = zip_longest ( iterator , iterator ) layer = [ _hash_pair ( a , b ) for a , b in paired_items ] tree . append ( layer ) return MerkleTree ( layers = tree )
def posthoc_nemenyi_friedman ( a , y_col = None , block_col = None , group_col = None , melted = False , sort = False ) : '''Calculate pairwise comparisons using Nemenyi post hoc test for unreplicated blocked data . This test is usually conducted post hoc if significant results of the Friedman ' s test are obtained . The statistics refer to upper quantiles of the studentized range distribution ( Tukey ) [ 1 ] _ , [2 ] _ , [ 3 ] _ . Parameters a : array _ like or pandas DataFrame object An array , any object exposing the array interface or a pandas DataFrame . If ` melted ` is set to False ( default ) , ` a ` is a typical matrix of block design , i . e . rows are blocks , and columns are groups . In this case you do not need to specify col arguments . If ` a ` is an array and ` melted ` is set to True , y _ col , block _ col and group _ col must specify the indices of columns containing elements of correspondary type . If ` a ` is a Pandas DataFrame and ` melted ` is set to True , y _ col , block _ col and group _ col must specify columns names ( strings ) . y _ col : str or int Must be specified if ` a ` is a pandas DataFrame object . Name of the column that contains y data . block _ col : str or int Must be specified if ` a ` is a pandas DataFrame object . Name of the column that contains blocking factor values . group _ col : str or int Must be specified if ` a ` is a pandas DataFrame object . Name of the column that contains treatment ( group ) factor values . melted : bool , optional Specifies if data are given as melted columns " y " , " blocks " , and " groups " . sort : bool , optional If True , sort data by block and group columns . Returns result : pandas DataFrame P values . Notes A one - way ANOVA with repeated measures that is also referred to as ANOVA with unreplicated block design can also be conducted via Friedman ' s test . The consequent post hoc pairwise multiple comparison test according to Nemenyi is conducted with this function . This function does not test for ties . References . . [ 1 ] J . Demsar ( 2006 ) , Statistical comparisons of classifiers over multiple data sets , Journal of Machine Learning Research , 7 , 1-30. . . [ 2 ] P . Nemenyi ( 1963 ) Distribution - free Multiple Comparisons . Ph . D . thesis , Princeton University . . . [ 3 ] L . Sachs ( 1997 ) , Angewandte Statistik . Berlin : Springer . Pages : 668-675. Examples > > > # Non - melted case , x is a block design matrix , i . e . rows are blocks > > > # and columns are groups . > > > x = np . array ( [ [ 31,27,24 ] , [ 31,28,31 ] , [ 45,29,46 ] , [ 21,18,48 ] , [ 42,36,46 ] , [ 32,17,40 ] ] ) > > > sp . posthoc _ nemenyi _ friedman ( x )'''
if melted and not all ( [ block_col , group_col , y_col ] ) : raise ValueError ( 'block_col, group_col, y_col should be explicitly specified if using melted data' ) def compare_stats ( i , j ) : dif = np . abs ( R [ groups [ i ] ] - R [ groups [ j ] ] ) qval = dif / np . sqrt ( k * ( k + 1. ) / ( 6. * n ) ) return qval x , _y_col , _group_col , _block_col = __convert_to_block_df ( a , y_col , group_col , block_col , melted ) # if not sort : # x [ group _ col ] = Categorical ( x [ group _ col ] , categories = x [ group _ col ] . unique ( ) , ordered = True ) # x [ block _ col ] = Categorical ( x [ block _ col ] , categories = x [ block _ col ] . unique ( ) , ordered = True ) x . sort_values ( by = [ _group_col , _block_col ] , ascending = True , inplace = True ) x . dropna ( inplace = True ) groups = x [ _group_col ] . unique ( ) k = groups . size n = x [ _block_col ] . unique ( ) . size x [ 'mat' ] = x . groupby ( _block_col ) [ _y_col ] . rank ( ) R = x . groupby ( _group_col ) [ 'mat' ] . mean ( ) vs = np . zeros ( ( k , k ) ) combs = it . combinations ( range ( k ) , 2 ) tri_upper = np . triu_indices ( vs . shape [ 0 ] , 1 ) tri_lower = np . tril_indices ( vs . shape [ 0 ] , - 1 ) vs [ : , : ] = 0 for i , j in combs : vs [ i , j ] = compare_stats ( i , j ) vs *= np . sqrt ( 2. ) vs [ tri_upper ] = psturng ( vs [ tri_upper ] , k , np . inf ) vs [ tri_lower ] = vs . T [ tri_lower ] np . fill_diagonal ( vs , - 1 ) return DataFrame ( vs , index = groups , columns = groups )
def rotation_matrix ( axis , theta ) : """The Euler – Rodrigues formula . Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians . Parameters axis : vector to rotate around theta : rotation angle , in rad"""
axis = np . asarray ( axis ) axis = axis / np . linalg . norm ( axis ) a = np . cos ( theta / 2 ) b , c , d = - axis * np . sin ( theta / 2 ) aa , bb , cc , dd = a * a , b * b , c * c , d * d bc , ad , ac , ab , bd , cd = b * c , a * d , a * c , a * b , b * d , c * d return np . array ( [ [ aa + bb - cc - dd , 2 * ( bc + ad ) , 2 * ( bd - ac ) ] , [ 2 * ( bc - ad ) , aa + cc - bb - dd , 2 * ( cd + ab ) ] , [ 2 * ( bd + ac ) , 2 * ( cd - ab ) , aa + dd - bb - cc ] , ] )
def _on_message ( self , delivery_frame , properties , body , consumer ) : """Callback when a message is received from the server . This method wraps a user - registered callback for message delivery . It decodes the message body , determines the message schema to validate the message with , and validates the message before passing it on to the user callback . This also handles acking , nacking , and rejecting messages based on exceptions raised by the consumer callback . For detailed documentation on the user - provided callback , see the user guide on consuming . Args : delivery _ frame ( pika . spec . Deliver ) : The delivery frame which includes details about the message like content encoding and its delivery tag . properties ( pika . spec . BasicProperties ) : The message properties like the message headers . body ( bytes ) : The message payload . consumer ( dict ) : A dictionary describing the consumer of the message . Returns : Deferred : fired when the message has been handled ."""
_legacy_twisted_log . msg ( "Message arrived with delivery tag {tag} for {consumer}" , tag = delivery_frame . delivery_tag , consumer = consumer , logLevel = logging . DEBUG , ) try : message = get_message ( delivery_frame . routing_key , properties , body ) message . queue = consumer . queue except ValidationError : _legacy_twisted_log . msg ( "Message id {msgid} did not pass validation; ignoring message" , msgid = properties . message_id , logLevel = logging . WARNING , ) yield consumer . channel . basic_nack ( delivery_tag = delivery_frame . delivery_tag , requeue = False ) return try : _legacy_twisted_log . msg ( "Consuming message from topic {topic!r} (id {msgid})" , topic = message . topic , msgid = properties . message_id , ) yield defer . maybeDeferred ( consumer . callback , message ) except Nack : _legacy_twisted_log . msg ( "Returning message id {msgid} to the queue" , msgid = properties . message_id , logLevel = logging . WARNING , ) yield consumer . channel . basic_nack ( delivery_tag = delivery_frame . delivery_tag , requeue = True ) except Drop : _legacy_twisted_log . msg ( "Consumer requested message id {msgid} be dropped" , msgid = properties . message_id , logLevel = logging . WARNING , ) yield consumer . channel . basic_nack ( delivery_tag = delivery_frame . delivery_tag , requeue = False ) except HaltConsumer : _legacy_twisted_log . msg ( "Consumer indicated it wishes consumption to halt, shutting down" ) yield consumer . channel . basic_ack ( delivery_tag = delivery_frame . delivery_tag ) yield self . cancel ( consumer . queue ) except Exception : _legacy_twisted_log . msg ( "Received unexpected exception from consumer {c}" , c = consumer , logLevel = logging . ERROR , ) yield consumer . channel . basic_nack ( delivery_tag = 0 , multiple = True , requeue = True ) yield self . cancel ( consumer . queue ) else : yield consumer . channel . basic_ack ( delivery_tag = delivery_frame . delivery_tag )
def temperature ( temp : Number , unit : str = 'C' ) -> str : """Formats a temperature element into a string with both C and F values Used for both Temp and Dew Ex : 34 ° C ( 93 ° F )"""
unit = unit . upper ( ) if not ( temp and unit in ( 'C' , 'F' ) ) : return '' if unit == 'C' : converted = temp . value * 1.8 + 32 converted = str ( int ( round ( converted ) ) ) + '°F' # type : ignore elif unit == 'F' : converted = ( temp . value - 32 ) / 1.8 converted = str ( int ( round ( converted ) ) ) + '°C' # type : ignore return f'{temp.value}°{unit} ({converted})'
def close ( self ) : """Close all active poll instances and remove all callbacks ."""
if self . _mpoll is None : return for mpoll in self . _mpoll . values ( ) : mpoll . close ( ) self . _mpoll . clear ( ) self . _mpoll = None
def get_class_paths ( _class , saltclass_path ) : '''Converts the dotted notation of a saltclass class to its possible file counterparts . : param str _ class : Dotted notation of the class : param str saltclass _ path : Root to saltclass storage : return : 3 - tuple of possible file counterparts : rtype : tuple ( str )'''
straight = os . path . join ( saltclass_path , 'classes' , '{0}.yml' . format ( _class ) ) sub_straight = os . path . join ( saltclass_path , 'classes' , '{0}.yml' . format ( _class . replace ( '.' , os . sep ) ) ) sub_init = os . path . join ( saltclass_path , 'classes' , _class . replace ( '.' , os . sep ) , 'init.yml' ) return straight , sub_init , sub_straight
def resolve_url_ext ( to , params_ = None , anchor_ = None , args = None , kwargs = None ) : """Advanced resolve _ url which can includes GET - parameters and anchor ."""
url = resolve_url ( to , * ( args or ( ) ) , ** ( kwargs or { } ) ) if params_ : url += '?' + urllib . urlencode ( encode_url_query_params ( params_ ) ) if anchor_ : url += '#' + anchor_ return url
def add_proxy_for ( self , name , widget ) : """Create a proxy for a widget and add it to this group : param name : The name or key of the proxy , which will be emitted with the changed signal : param widget : The widget to create a proxy for"""
proxy = proxy_for ( widget ) self . add_proxy ( name , proxy )
def _remove_double_brackets ( text ) : """Remove double brackets ( internal links ) but leave the viewable text . Args : text : a unicode string Returns : a unicode string"""
def replacement_fn ( s ) : if u":" in s : # this is probably a category or something like that . return "" # keep the part after the bar . bar_pos = s . find ( u"|" ) if bar_pos == - 1 : return s return s [ bar_pos + 1 : ] return _find_and_replace ( text , u"[[" , u"]]" , replacement_fn )
def replace_nones ( dict_or_list ) : """Update a dict or list in place to replace ' none ' string values with Python None ."""
def replace_none_in_value ( value ) : if isinstance ( value , basestring ) and value . lower ( ) == "none" : return None return value items = dict_or_list . iteritems ( ) if isinstance ( dict_or_list , dict ) else enumerate ( dict_or_list ) for accessor , value in items : if isinstance ( value , ( dict , list ) ) : replace_nones ( value ) else : dict_or_list [ accessor ] = replace_none_in_value ( value )
def replace_namespaced_replication_controller_dummy_scale ( self , name , namespace , body , ** kwargs ) : """replace scale of the specified ReplicationControllerDummy This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . replace _ namespaced _ replication _ controller _ dummy _ scale ( name , namespace , body , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str name : name of the Scale ( required ) : param str namespace : object name and auth scope , such as for teams and projects ( required ) : param ExtensionsV1beta1Scale body : ( required ) : param str pretty : If ' true ' , then the output is pretty printed . : param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed : param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint . : return : ExtensionsV1beta1Scale If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . replace_namespaced_replication_controller_dummy_scale_with_http_info ( name , namespace , body , ** kwargs ) else : ( data ) = self . replace_namespaced_replication_controller_dummy_scale_with_http_info ( name , namespace , body , ** kwargs ) return data
def _export ( self , args , ** extra_args ) : """Export is the entry point for exporting docker images ."""
if not isinstance ( args , argparse . Namespace ) : raise TypeError ( logger . error ( "args should of an instance of argparse.Namespace" ) ) # Warn the consumer about unsafe Docker Practices if args . no_validation : logger . warning ( "#######################################################\n" "Validation has been disabled for this export operation.\n" "This is an unsafe operation and does not verify the " "run time nature of the container.\n" "Any docker image created in this manner will not " "be verified to start. Do not ship broken code.\n" "#######################################################\n" , extra = { 'formatter' : 'cli-warning' } ) # Require the consumer to verify their actions if not args . y : validation_input = six . moves . input ( "Please type \'yes\' to export the container without validation: " ) if not ( isinstance ( validation_input , six . string_types ) and ( 'yes' == validation_input ) ) : raise ValueError ( "Incorrect type defined. Required value: yes" ) # create new freight forwarder to create a commercial _ invoice and export goods . freight_forwarder = FreightForwarder ( ) # create commercial invoice this is the contact given to freight forwarder dispatch containers and images commercial_invoice = freight_forwarder . commercial_invoice ( 'export' , args . data_center , args . environment , args . service , tagging_scheme = not args . no_tagging_scheme ) # create commercial _ invoice bill_of_lading = freight_forwarder . export ( commercial_invoice , clean = args . clean , configs = args . configs , tags = args . tag , test = args . test , use_cache = args . use_cache , validate = not args . no_validation ) # pretty lame . . . Need to work on return values through to app to make them consistent . exit_code = 0 if bill_of_lading else 1 if exit_code != 0 : exit ( exit_code )
def remove_logger ( self , cb_id ) : '''Remove a logger . @ param cb _ id The ID of the logger to remove . @ raises NoLoggerError'''
if cb_id not in self . _loggers : raise exceptions . NoLoggerError ( cb_id , self . name ) conf = self . object . get_configuration ( ) res = conf . remove_service_profile ( cb_id . get_bytes ( ) ) del self . _loggers [ cb_id ]
def log_param ( name , value ) : '''Log a parameter value to the console . Parameters name : str Name of the parameter being logged . value : any Value of the parameter being logged .'''
log ( 'setting {} = {}' , click . style ( str ( name ) ) , click . style ( str ( value ) , fg = 'yellow' ) )