signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def hypergeometric_like ( x , n , m , N ) : r"""Hypergeometric log - likelihood . Discrete probability distribution that describes the number of successes in a sequence of draws from a finite population without replacement . . . math : : f ( x \ mid n , m , N ) = \ frac { \ left ( { \ begin { array } { c } { m } \ \ { x } \ \ \ end { array } } \ right ) \ left ( { \ begin { array } { c } { N - m } \ \ { n - x } \ \ \ end { array } } \ right ) } { \ left ( { \ begin { array } { c } { N } \ \ { n } \ \ \ end { array } } \ right ) } : Parameters : - ` x ` : [ int ] Number of successes in a sample drawn from a population . - ` n ` : [ int ] Size of sample drawn from the population . - ` m ` : [ int ] Number of successes in the population . - ` N ` : [ int ] Total number of units in the population . . . note : : : math : ` E ( X ) = \ frac { n n } { N } `"""
return flib . hyperg ( x , n , m , N )
def is_user ( value , min = None , max = None ) : """Check whether username or uid as argument exists . if this function recieved username , convert uid and exec validation ."""
if type ( value ) == str : try : entry = pwd . getpwnam ( value ) value = entry . pw_uid except KeyError : err_message = ( '{0}: No such user.' . format ( value ) ) raise validate . VdtValueError ( err_message ) return value elif type ( value ) == int : try : pwd . getpwuid ( value ) except KeyError : err_message = ( '{0}: No such user.' . format ( value ) ) raise validate . VdtValueError ( err_message ) return value else : err_message = ( 'Please, use str or int to "user" parameter.' ) raise validate . VdtTypeError ( err_message )
def fetch_data ( self , stock_no , nowdatetime ) : """Fetch data from gretai . org . tw ( OTC ) return list . 從 gretai . org . tw 下載資料 , 回傳格式為 csv . reader 0 . 日期 1 . 成交股數 2 . 成交金額 3 . 開盤價 4 . 最高價 ( 續 ) 5 . 最低價 6 . 收盤價 7 . 漲跌價差 8 . 成交筆數 : param str stock _ no : 股票代碼 : param datetime nowdatetime : 此刻時間 : rtype : list"""
url = ( '/ch/stock/aftertrading/' + 'daily_trading_info/st43_download.php?d=%(year)d/%(mon)02d&' + 'stkno=%(stock)s&r=%(rand)s' ) % { 'year' : nowdatetime . year - 1911 , 'mon' : nowdatetime . month , 'stock' : stock_no , 'rand' : random . randrange ( 1 , 1000000 ) } logging . info ( url ) result = GRETAI_CONNECTIONS . urlopen ( 'GET' , url ) csv_files = csv . reader ( StringIO ( result . data ) ) self . __url . append ( GRETAI_HOST + url ) return csv_files
def replicate_no_merge ( source , model , cache = None ) : '''Replicates the ` source ` object to ` model ` class and returns its reflection .'''
# ` cache ` is used to break circular dependency : we need to replicate # attributes before merging target into the session , but replication of # some attributes may require target to be in session to avoid infinite # loop . if source is None : return None if cache is None : cache = { } elif source in cache : return cache [ source ] db = object_session ( source ) cls , ident = identity_key ( instance = source ) target = db . query ( model ) . get ( ident ) if target is None : target = model ( ) cache [ source ] = target try : replicate_attributes ( source , target , cache = cache ) except _PrimaryKeyIsNull : return None else : return target
def _compute_style_of_faulting_term ( self , rup , C ) : """Computes the coefficient to scale for reverse or strike - slip events Fault type ( Strike - slip , Normal , Thrust / reverse ) is derived from rake angle . Rakes angles within 30 of horizontal are strike - slip , angles from 30 to 150 are reverse , and angles from -30 to - 150 are normal . See paragraph ' Predictor Variables ' pag 103. Note that ' Unspecified ' case is used to refer to all other rake angles ."""
if np . abs ( rup . rake ) <= 30.0 or ( 180.0 - np . abs ( rup . rake ) ) <= 30.0 : # strike - slip return C [ 'B1ss' ] elif rup . rake > 30.0 and rup . rake < 150.0 : # reverse return C [ 'B1rv' ] else : # unspecified ( also includes Normal faulting ! ) return C [ 'B1all' ]
def variable_names ( self ) : """Get all variable names required for this query"""
if self . _variable_names is None : if self . _operator is None : if self . _operands is None : self . _variable_names = tuple ( ) else : self . _variable_names = self . _get_variable_names ( self . _operands ) elif self . _operator == 'NOT' : self . _variable_names = self . _operands . variable_names else : v = list ( ) for op in self . _operands : v . extend ( op . variable_names ) self . _variable_names = tuple ( set ( v ) ) return self . _variable_names
def update ( self ) : "Updates cartesian coordinates for drawing tree graph"
# get new shape and clear for attrs self . edges = np . zeros ( ( self . ttree . nnodes - 1 , 2 ) , dtype = int ) self . verts = np . zeros ( ( self . ttree . nnodes , 2 ) , dtype = float ) self . lines = [ ] self . coords = [ ] # fill with updates self . update_idxs ( ) # get dimensions of tree self . update_fixed_order ( ) # in case ntips changed self . assign_vertices ( ) # get node locations self . assign_coordinates ( ) # get edge locations self . reorient_coordinates ( )
def normalized_hypergraph_cut ( H , threshold = 0 ) : """Executes the min - cut algorithm described in the paper : Zhou , Dengyong , Jiayuan Huang , and Bernhard Scholkopf . " Learning with hypergraphs : Clustering , classification , and embedding . " Advances in neural information processing systems . 2006. ( http : / / machinelearning . wustl . edu / mlpapers / paper _ files / NIPS2006_630 . pdf ) This algorithm uses the normalized Laplacian to partition the hypergraph into two disjoint components . : param H : the hypergraph to perform the hypergraph - cut algorithm on . : param threshold : The threshold value for the partitioning algorithm . Typically , the value zero is selected for this purpose . : returns : set - - the S set of nodes in the S - T partition set - - the T set of nodes in the S - T partition : raises : TypeError - - Algorithm only applicable to undirected hypergraphs"""
if not isinstance ( H , UndirectedHypergraph ) : raise TypeError ( "Algorithm only applicable to undirected hypergraphs" ) # TODO : make sure that the hypergraph is connected # Get index < - > node mappings and index < - > hyperedge _ id mappings for matrices indices_to_nodes , nodes_to_indices = umat . get_node_mapping ( H ) indices_to_hyperedge_ids , hyperedge_ids_to_indices = umat . get_hyperedge_id_mapping ( H ) delta = _compute_normalized_laplacian ( H , nodes_to_indices , hyperedge_ids_to_indices ) # Since the eigs method in sparse . linalg library doesn ' t find # all the eigenvalues and eigenvectors , it doesn ' t give us an # exact and correct solution . Therefore , we should use the # numpy library which works on dense graphs . This might be # problematic for large graphs . # New note : I think we only need the 2 smallest eigenvalues , which # can be found with the sparse solver . Look into this if run - time # becomes an issue . # eigenvalues , eigenvectors = linalg . eigs ( delta , k = numberOfEigenValues ) eigenvalues , eigenvectors = np . linalg . eig ( delta . todense ( ) ) second_min_index = np . argsort ( eigenvalues ) [ 1 ] second_eigenvector = eigenvectors [ : , second_min_index ] partition_index = [ i for i in range ( len ( second_eigenvector ) ) if second_eigenvector [ i ] >= threshold ] S , T = set ( ) , set ( ) for key , value in nodes_to_indices . items ( ) : if value in partition_index : S . add ( key ) else : T . add ( key ) return S , T
def hamsterday_time_to_datetime ( hamsterday , time ) : """Return the civil datetime corresponding to a given hamster day and time . The hamster day start is taken into account ."""
# work around cyclic imports from hamster . lib . configuration import conf if time < conf . day_start : # early morning , between midnight and day _ start # = > the hamster day is the previous civil day civil_date = hamsterday + dt . timedelta ( days = 1 ) else : civil_date = hamsterday return dt . datetime . combine ( civil_date , time )
def _get_gecos ( name ) : '''Retrieve GECOS field info and return it in dictionary form'''
try : gecos_field = pwd . getpwnam ( name ) . pw_gecos . split ( ',' , 3 ) except KeyError : raise CommandExecutionError ( 'User \'{0}\' does not exist' . format ( name ) ) if not gecos_field : return { } else : # Assign empty strings for any unspecified trailing GECOS fields while len ( gecos_field ) < 4 : gecos_field . append ( '' ) return { 'fullname' : salt . utils . data . decode ( gecos_field [ 0 ] ) , 'roomnumber' : salt . utils . data . decode ( gecos_field [ 1 ] ) , 'workphone' : salt . utils . data . decode ( gecos_field [ 2 ] ) , 'homephone' : salt . utils . data . decode ( gecos_field [ 3 ] ) }
def read_json ( self ) : """read metadata from json and set all the found properties . when overriding remember to wrap your calls in reading _ ancillary _ files : return : the read metadata : rtype : dict"""
with reading_ancillary_files ( self ) : if self . json_uri is None : metadata = self . _read_json_db ( ) else : metadata = self . _read_json_file ( ) if 'properties' in metadata : for name , prop in list ( metadata [ 'properties' ] . items ( ) ) : try : self . set ( prop [ 'name' ] , prop [ 'value' ] , prop [ 'xml_path' ] ) except KeyError : # we just skip if we don ' t have something , we want # to have as much as possible read from the JSON pass return metadata
def validators ( self ) : """Gets or creates validator wrapper"""
if not hasattr ( self , "_validators" ) : self . _validators = ValidatorList ( self ) return self . _validators
def get_final_version_string ( release_mode , semver , commit_count = 0 ) : """Generates update dictionary entries for the version string"""
version_string = "." . join ( semver ) maybe_dev_version_string = version_string updates = { } if release_mode : # in production , we have something like ` 1.2.3 ` , as well as a flag e . g . PRODUCTION = True updates [ Constants . RELEASE_FIELD ] = config . RELEASED_VALUE else : # in dev mode , we have a dev marker e . g . ` 1.2.3 . dev678 ` maybe_dev_version_string = config . DEVMODE_TEMPLATE . format ( version = version_string , count = commit_count ) # make available all components of the semantic version including the full string updates [ Constants . VERSION_FIELD ] = maybe_dev_version_string updates [ Constants . VERSION_STRICT_FIELD ] = version_string return updates
def create ( self , equipments ) : """Method to create equipments : param equipments : List containing equipments desired to be created on database : return : None"""
data = { 'equipments' : equipments } return super ( ApiV4Equipment , self ) . post ( 'api/v4/equipment/' , data )
def name ( self ) : """Name attribute of rule element"""
return self . _meta . name if self . _meta . name else 'Rule @%s' % self . tag
def json_template ( data , template_name , template_context ) : """Old style , use JSONTemplateResponse instead of this ."""
html = render_to_string ( template_name , template_context ) data = data or { } data [ 'html' ] = html return HttpResponse ( json_encode ( data ) , content_type = 'application/json' )
def to_backward_slashes ( data ) : """Converts forward slashes to backward slashes . Usage : : > > > to _ backward _ slashes ( " / Users / JohnDoe / Documents " ) u ' \\ Users \\ JohnDoe \\ Documents ' : param data : Data to convert . : type data : unicode : return : Converted path . : rtype : unicode"""
data = data . replace ( "/" , "\\" ) LOGGER . debug ( "> Data: '{0}' to backward slashes." . format ( data ) ) return data
def detach ( self ) : """Detach from parent . @ return : This element removed from its parent ' s child list and I { parent } = I { None } @ rtype : L { Element }"""
if self . parent is not None : if self in self . parent . children : self . parent . children . remove ( self ) self . parent = None return self
def search ( query , stats ) : """Perform issue search for given stats instance"""
log . debug ( "Search query: {0}" . format ( query ) ) issues = [ ] # Fetch data from the server in batches of MAX _ RESULTS issues for batch in range ( MAX_BATCHES ) : response = stats . parent . session . get ( "{0}/rest/api/latest/search?{1}" . format ( stats . parent . url , urllib . urlencode ( { "jql" : query , "fields" : "summary,comment" , "maxResults" : MAX_RESULTS , "startAt" : batch * MAX_RESULTS } ) ) ) data = response . json ( ) log . debug ( "Batch {0} result: {1} fetched" . format ( batch , listed ( data [ "issues" ] , "issue" ) ) ) log . data ( pretty ( data ) ) issues . extend ( data [ "issues" ] ) # If all issues fetched , we ' re done if len ( issues ) >= data [ "total" ] : break # Return the list of issue objects return [ Issue ( issue , prefix = stats . parent . prefix ) for issue in issues ]
def _accept_header ( self ) : """Method for determining correct ` Accept ` header . Different resources and different GoCD version servers prefer a diverse headers . In order to manage all of them , this method tries to help : if ` VERSION _ TO _ ACCEPT _ HEADER ` is not provided , if would simply return default ` ACCEPT _ HEADER ` . Though if some manager specifies ` VERSION _ TO _ ACCEPT _ HEADER ` class variable , then it should be a dictionary : keys should be a versions and values should be desired accept headers . Choosing is pessimistic : if version of a server is less or equal to one of the dictionary , the value of that key would be used . : return : accept header to use in request ."""
if not self . VERSION_TO_ACCEPT_HEADER : return self . ACCEPT_HEADER return YagocdUtil . choose_option ( version_to_options = self . VERSION_TO_ACCEPT_HEADER , default = self . ACCEPT_HEADER , server_version = self . _session . server_version )
def _parse_view_results ( self , rows , factory , options ) : '''rows here should be a list of tuples : - ( key , value ) for reduce views - ( key , value , id ) for nonreduce views without include docs - ( key , value , id , doc ) for nonreduce with with include docs'''
kwargs = dict ( ) kwargs [ 'reduced' ] = factory . use_reduce and options . get ( 'reduce' , True ) kwargs [ 'include_docs' ] = options . get ( 'include_docs' , False ) # Lines below pass extra arguments to the parsing function if they # are expected . These arguments are bound method unserialize ( ) and # unserialize _ list ( ) . They methods perform the magic of parsing and # upgrading if necessary the loaded documents . spec = inspect . getargspec ( factory . parse_view_result ) if 'unserialize' in spec . args : kwargs [ 'unserialize' ] = self . unserialize_document if 'unserialize_list' in spec . args : kwargs [ 'unserialize_list' ] = self . unserialize_list_of_documents return factory . parse_view_result ( rows , ** kwargs )
def parse ( self , data ) : """Parse a 17 bytes packet in the Wind format and return a dictionary containing the data extracted . An example of a return value would be : . . code - block : : python ' id ' : " 0x2EB2 " , ' packet _ length ' : 16, ' packet _ type ' : 86, ' packet _ type _ name ' : ' Wind sensors ' , ' sequence _ number ' : 0, ' packet _ subtype ' : 4, ' packet _ subtype _ name ' : " TFA " , ' temperature ' : 17.3, ' direction ' : 120, ' wind _ gust ' : 11, ' av _ speed ' : 12, ' wind _ chill ' : 10, ' signal _ level ' : 9, ' battery _ level ' : 6, : param data : bytearray to be parsed : type data : bytearray : return : Data dictionary containing the parsed values : rtype : dict"""
self . validate_packet ( data ) results = self . parse_header_part ( data ) sub_type = results [ 'packet_subtype' ] id_ = self . dump_hex ( data [ 4 : 6 ] ) direction = data [ 6 ] * 256 + data [ 7 ] if sub_type != 0x05 : av_speed = ( data [ 8 ] * 256 + data [ 9 ] ) * 0.1 else : av_speed = '--??--' gust = ( data [ 10 ] * 256 + data [ 11 ] ) * 0.1 if sub_type == 0x04 : temperature = ( ( data [ 12 ] & 0x7f ) * 256 + data [ 13 ] ) / 10 signbit = data [ 12 ] & 0x80 if signbit != 0 : temperature = - temperature else : temperature = '--??--' if sub_type == 0x04 : wind_chill = ( ( data [ 14 ] & 0x7f ) * 256 + data [ 15 ] ) / 10 signbit = data [ 14 ] & 0x80 if signbit != 0 : wind_chill = - wind_chill else : wind_chill = '--??--' sensor_specific = { 'id' : id_ , 'direction' : direction , 'wind_gust' : gust } if av_speed != '--??--' : sensor_specific [ 'av_speed' ] = av_speed if temperature != '--??--' : sensor_specific [ 'temperature' ] = temperature if wind_chill != '--??--' : sensor_specific [ 'wind_chill' ] = wind_chill results . update ( RfxPacketUtils . parse_signal_and_battery ( data [ 16 ] ) ) results . update ( sensor_specific ) return results
def median ( self , func = lambda x : x ) : """Return the median value of data elements : param func : lambda expression to project and sort data : return : median value"""
if self . count ( ) == 0 : raise NoElementsError ( u"Iterable contains no elements" ) result = self . order_by ( func ) . select ( func ) . to_list ( ) length = len ( result ) i = int ( length / 2 ) return result [ i ] if length % 2 == 1 else ( float ( result [ i - 1 ] ) + float ( result [ i ] ) ) / float ( 2 )
def multi_component_layout ( data , graph , n_components , component_labels , dim , random_state , metric = "euclidean" , metric_kwds = { } , ) : """Specialised layout algorithm for dealing with graphs with many connected components . This will first fid relative positions for the components by spectrally embedding their centroids , then spectrally embed each individual connected component positioning them according to the centroid embeddings . This provides a decent embedding of each component while placing the components in good relative positions to one another . Parameters data : array of shape ( n _ samples , n _ features ) The source data - - required so we can generate centroids for each connected component of the graph . graph : sparse matrix The adjacency matrix of the graph to be emebdded . n _ components : int The number of distinct components to be layed out . component _ labels : array of shape ( n _ samples ) For each vertex in the graph the label of the component to which the vertex belongs . dim : int The chosen embedding dimension . metric : string or callable ( optional , default ' euclidean ' ) The metric used to measure distances among the source data points . metric _ kwds : dict ( optional , default { } ) Keyword arguments to be passed to the metric function . Returns embedding : array of shape ( n _ samples , dim ) The initial embedding of ` ` graph ` ` ."""
result = np . empty ( ( graph . shape [ 0 ] , dim ) , dtype = np . float32 ) if n_components > 2 * dim : meta_embedding = component_layout ( data , n_components , component_labels , dim , metric = metric , metric_kwds = metric_kwds , ) else : k = int ( np . ceil ( n_components / 2.0 ) ) base = np . hstack ( [ np . eye ( k ) , np . zeros ( ( k , dim - k ) ) ] ) meta_embedding = np . vstack ( [ base , - base ] ) [ : n_components ] for label in range ( n_components ) : component_graph = graph . tocsr ( ) [ component_labels == label , : ] . tocsc ( ) component_graph = component_graph [ : , component_labels == label ] . tocoo ( ) distances = pairwise_distances ( [ meta_embedding [ label ] ] , meta_embedding ) data_range = distances [ distances > 0.0 ] . min ( ) / 2.0 if component_graph . shape [ 0 ] < 2 * dim : result [ component_labels == label ] = ( random_state . uniform ( low = - data_range , high = data_range , size = ( component_graph . shape [ 0 ] , dim ) , ) + meta_embedding [ label ] ) continue diag_data = np . asarray ( component_graph . sum ( axis = 0 ) ) # standard Laplacian # D = scipy . sparse . spdiags ( diag _ data , 0 , graph . shape [ 0 ] , graph . shape [ 0 ] ) # L = D - graph # Normalized Laplacian I = scipy . sparse . identity ( component_graph . shape [ 0 ] , dtype = np . float64 ) D = scipy . sparse . spdiags ( 1.0 / np . sqrt ( diag_data ) , 0 , component_graph . shape [ 0 ] , component_graph . shape [ 0 ] , ) L = I - D * component_graph * D k = dim + 1 num_lanczos_vectors = max ( 2 * k + 1 , int ( np . sqrt ( component_graph . shape [ 0 ] ) ) ) try : eigenvalues , eigenvectors = scipy . sparse . linalg . eigsh ( L , k , which = "SM" , ncv = num_lanczos_vectors , tol = 1e-4 , v0 = np . ones ( L . shape [ 0 ] ) , maxiter = graph . shape [ 0 ] * 5 , ) order = np . argsort ( eigenvalues ) [ 1 : k ] component_embedding = eigenvectors [ : , order ] expansion = data_range / np . max ( np . abs ( component_embedding ) ) component_embedding *= expansion result [ component_labels == label ] = ( component_embedding + meta_embedding [ label ] ) except scipy . sparse . linalg . ArpackError : warn ( "WARNING: spectral initialisation failed! The eigenvector solver\n" "failed. This is likely due to too small an eigengap. Consider\n" "adding some noise or jitter to your data.\n\n" "Falling back to random initialisation!" ) result [ component_labels == label ] = ( random_state . uniform ( low = - data_range , high = data_range , size = ( component_graph . shape [ 0 ] , dim ) , ) + meta_embedding [ label ] ) return result
def get_reserved_bindings ( vlan_id , instance_id , switch_ip = None , port_id = None ) : """Lists reserved bindings ."""
LOG . debug ( "get_reserved_bindings() called" ) if port_id : return _lookup_all_nexus_bindings ( vlan_id = vlan_id , switch_ip = switch_ip , instance_id = instance_id , port_id = port_id ) elif switch_ip : return _lookup_all_nexus_bindings ( vlan_id = vlan_id , switch_ip = switch_ip , instance_id = instance_id ) else : return _lookup_all_nexus_bindings ( vlan_id = vlan_id , instance_id = instance_id )
def delete_node ( self , node_name ) : """Deletes this node and all edges referencing it . Args : node _ name ( str ) : The name of the node to delete . Raises : KeyError : Raised if the node does not exist in the graph ."""
graph = self . graph if node_name not in graph : raise KeyError ( 'node %s does not exist' % node_name ) graph . pop ( node_name ) for node , edges in graph . items ( ) : if node_name in edges : edges . remove ( node_name )
def get_nested_exceptions ( self ) : """Traverses the exception record linked list and builds a Python list . Nested exception records are received for nested exceptions . This happens when an exception is raised in the debugee while trying to handle a previous exception . @ rtype : list ( L { ExceptionEvent } ) @ return : List of ExceptionEvent objects representing each exception record found in this event . There is always at least one exception record , so the list is never empty . All other methods of this class read from the first exception record only , that is , the most recent exception ."""
# The list always begins with ourselves . # Just put a reference to " self " as the first element , # and start looping from the second exception record . nested = [ self ] raw = self . raw dwDebugEventCode = raw . dwDebugEventCode dwProcessId = raw . dwProcessId dwThreadId = raw . dwThreadId dwFirstChance = raw . u . Exception . dwFirstChance record = raw . u . Exception . ExceptionRecord while True : record = record . ExceptionRecord if not record : break raw = win32 . DEBUG_EVENT ( ) raw . dwDebugEventCode = dwDebugEventCode raw . dwProcessId = dwProcessId raw . dwThreadId = dwThreadId raw . u . Exception . ExceptionRecord = record raw . u . Exception . dwFirstChance = dwFirstChance event = EventFactory . get ( self . debug , raw ) nested . append ( event ) return nested
def has_preview ( self ) : """Returns if the document has real merged data . When True , ` topil ( ) ` returns pre - composed data ."""
version_info = self . image_resources . get_data ( 'version_info' ) if version_info : return version_info . has_composite return True
def list_team_codes ( ) : """List team names in alphabetical order of team ID , per league ."""
# Sort teams by league , then alphabetical by code cleanlist = sorted ( TEAM_DATA , key = lambda k : ( k [ "league" ] [ "name" ] , k [ "code" ] ) ) # Get league names leaguenames = sorted ( list ( set ( [ team [ "league" ] [ "name" ] for team in cleanlist ] ) ) ) for league in leaguenames : teams = [ team for team in cleanlist if team [ "league" ] [ "name" ] == league ] click . secho ( league , fg = "green" , bold = True ) for team in teams : if team [ "code" ] != "null" : click . secho ( u"{0}: {1}" . format ( team [ "code" ] , team [ "name" ] ) , fg = "yellow" ) click . secho ( "" )
def register_cmdfinalization_hook ( self , func : Callable [ [ plugin . CommandFinalizationData ] , plugin . CommandFinalizationData ] ) -> None : """Register a hook to be called after a command is completed , whether it completes successfully or not ."""
self . _validate_cmdfinalization_callable ( func ) self . _cmdfinalization_hooks . append ( func )
def _R2deriv ( self , R , z , phi = 0. , t = 0. ) : """NAME : _ Rderiv PURPOSE : evaluate the second radial derivative for this potential INPUT : R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT : the second radial derivative HISTORY : 2013-04-10 - Written - Bovy ( IAS )"""
r = numpy . sqrt ( R ** 2. + z ** 2. ) x = r / self . a return - numpy . pi / x ** 3. / r ** 2. * ( - 4. * R ** 2. * r ** 3. / ( self . a ** 2. + r ** 2. ) / ( self . a + r ) + ( z ** 2. - 2. * R ** 2. ) * ( numpy . pi - 2. * numpy . arctan ( 1. / x ) - 2. * numpy . log ( 1. + x ) - numpy . log ( 1. + x ** 2. ) ) )
def to_event ( self ) : """get rid of id , sign , tunnel and update message type Notice : this method will return a deepcopy"""
msg = copy . deepcopy ( self ) for _ in [ "id" , "sign" , "tunnel" , "query" , "param" ] : if not hasattr ( msg , _ ) : continue delattr ( msg , _ ) msg . _type = Message . get_message_type ( msg . __dict__ ) return msg
def description ( self ) : """Grab package verion"""
for line in self . SLACKBUILDS_TXT . splitlines ( ) : if line . startswith ( self . line_name ) : sbo_name = line [ 17 : ] . strip ( ) if line . startswith ( self . line_des ) : if sbo_name == self . name : return line [ 31 : ] . strip ( )
def linkcode_resolve ( domain , info ) : """Determine the URL corresponding to Python object"""
if domain != 'py' : return None modname = info [ 'module' ] fullname = info [ 'fullname' ] submod = sys . modules . get ( modname ) if submod is None : return None obj = submod for part in fullname . split ( '.' ) : try : obj = getattr ( obj , part ) except : return None try : fn = inspect . getsourcefile ( obj ) except : fn = None if not fn : return None try : source , lineno = inspect . findsource ( obj ) except : lineno = None if lineno : linespec = "#L%d" % ( lineno + 1 ) else : linespec = "" fn = relpath ( fn , start = '..' ) return "https://github.com/mithrandi/txacme/blob/%s/%s%s" % ( txacme_version_info [ 'full-revisionid' ] , fn , linespec )
def isrc ( name = None ) : """Creates the grammar for an ISRC code . ISRC stands for International Standard Recording Code , which is the standard ISO 3901 . This stores information identifying a particular recording . : param name : name for the field : return : grammar for an ISRC field"""
if name is None : name = 'ISRC Field' field = _isrc_short ( name ) | _isrc_long ( name ) field . setName ( name ) return field . setResultsName ( 'isrc' )
def wait_for_event ( self , emptybuffer = False ) : """Waits until a joystick event becomes available . Returns the event , as an ` InputEvent ` tuple . If * emptybuffer * is ` True ` ( it defaults to ` False ` ) , any pending events will be thrown away first . This is most useful if you are only interested in " pressed " events ."""
if emptybuffer : while self . _wait ( 0 ) : self . _read ( ) while self . _wait ( ) : event = self . _read ( ) if event : return event
def versional ( a , b ) : """Sorts the inputted items by their natural order , trying to extract a number from them to sort by . : param a < str > b < str > : return < int > 1 | | 0 | | - 1 : usage | > > > from projex import sorting | > > > a = [ ' test - 1.1.2 ' , ' test - 1.02 ' , ' test - 1.2 ' , ' test - 1.18 ' ] | > > > a . sort ( ) | > > > print a | [ ' test - 1.02 ' , ' test - 1.1.2 ' , ' test - 1.18 ' , ' test - 1.2 ' ] | > > > a . sort ( sorting . natural ) | > > > print a | [ ' test - 1.1.2 ' , ' test - 1.02 ' , ' test - 1.2 ' , ' test - 1.18 ' ] | > > > a . sort ( sorting . versional ) | > > > print a | [ ' test - 1.1.2 ' , ' test - 1.02 ' , ' test - 1.18 ' , ' test - 1.2 ' ]"""
stra = nstr ( a ) . lower ( ) strb = nstr ( b ) . lower ( ) # look up all the pairs of items aresults = EXPR_VERSIONAL . findall ( stra ) bresults = EXPR_VERSIONAL . findall ( strb ) # make sure we have the same number of results bcount = len ( bresults ) for i in range ( len ( aresults ) ) : # make sure we don ' t exceed the number of elements in b if bcount <= i : break atext , anum = aresults [ i ] btext , bnum = bresults [ i ] # compare the text components if atext != btext : return cmp ( atext , btext ) if not anum : anum = 0 if not bnum : bnum = 0 # compare the numeric components if atext == '.' : anum = int ( float ( '.' + anum ) * 10000 ) bnum = int ( float ( '.' + bnum ) * 10000 ) else : anum = int ( anum ) bnum = int ( bnum ) if anum != bnum : return cmp ( anum , bnum ) # b has less characters than a , so should sort before return 1
def trimim ( fims , affine = None , scale = 2 , divdim = 8 ** 2 , int_order = 0 , fmax = 0.05 , outpath = '' , fname = '' , fcomment = '' , store_avg = False , store_img_intrmd = False , store_img = False , imdtype = np . float32 , memlim = False , verbose = False ) : '''Trim and upsample PET image ( s ) , e . g . , for GPU execution , PVC correction , ROI sampling , etc . The input images ' fims ' can be passed in multiple ways : 1 . as a string of the folder containing NIfTI files 2 . as a string of a NIfTI file path ( this way a 4D image can be loaded ) . 3 . as a list of NIfTI file paths . 4 . as a 3D or 4D image Parameters : affine : affine matrix , 4x4 , for all the input images in case when images passed as a matrix ( all have to have the same shape and data type ) scale : the scaling factor for upsampling has to be greater than 1 divdim : image divisor , i . e . , the dimensions are a multiple of this number ( default 64) int _ order : interpolation order ( 0 - nearest neighbour , 1 - linear , as in scipy ) fmax : fraction of the max image value used for finding image borders for trimming outpath : output folder path fname : file name when image given as a numpy matrix fcomment : part of the name of the output file , left for the user as a comment store _ img _ intrmd : stores intermediate images with suffix ' _ i ' store _ avg : stores the average image ( if multiple images are given ) imdtype : data type for output images memlim : Ture for cases when memory is limited and takes more processing time instead . verbose : verbose mode [ True / False ]'''
using_multiple_files = False # case when input folder is given if isinstance ( fims , basestring ) and os . path . isdir ( fims ) : # list of input images ( e . g . , PET ) fimlist = [ os . path . join ( fims , f ) for f in os . listdir ( fims ) if f . endswith ( '.nii' ) or f . endswith ( '.nii.gz' ) ] imdic = imio . niisort ( fimlist , memlim = memlim ) if not ( imdic [ 'N' ] > 50 and memlim ) : imin = imdic [ 'im' ] imshape = imdic [ 'shape' ] affine = imdic [ 'affine' ] fldrin = fims fnms = [ os . path . basename ( f ) . split ( '.nii' ) [ 0 ] for f in imdic [ 'files' ] if f != None ] # number of images / frames Nim = imdic [ 'N' ] using_multiple_files = True # case when input file is a 3D or 4D NIfTI image elif isinstance ( fims , basestring ) and os . path . isfile ( fims ) and ( fims . endswith ( 'nii' ) or fims . endswith ( 'nii.gz' ) ) : imdic = imio . getnii ( fims , output = 'all' ) imin = imdic [ 'im' ] if imin . ndim == 3 : imin . shape = ( 1 , imin . shape [ 0 ] , imin . shape [ 1 ] , imin . shape [ 2 ] ) imdtype = imdic [ 'dtype' ] imshape = imdic [ 'shape' ] [ - 3 : ] affine = imdic [ 'affine' ] fldrin = os . path . dirname ( fims ) fnms = imin . shape [ 0 ] * [ os . path . basename ( fims ) . split ( '.nii' ) [ 0 ] ] # number of images / frames Nim = imin . shape [ 0 ] # case when a list of input files is given elif isinstance ( fims , list ) and all ( [ os . path . isfile ( k ) for k in fims ] ) : imdic = imio . niisort ( fims , memlim = memlim ) if not ( imdic [ 'N' ] > 50 and memlim ) : imin = imdic [ 'im' ] imshape = imdic [ 'shape' ] affine = imdic [ 'affine' ] imdtype = imdic [ 'dtype' ] fldrin = os . path . dirname ( fims [ 0 ] ) fnms = [ os . path . basename ( f ) . split ( '.nii' ) [ 0 ] for f in imdic [ 'files' ] ] # number of images / frames Nim = imdic [ 'N' ] using_multiple_files = True # case when an array [ # frames , zdim , ydim , xdim ] . Can be 3D or 4D elif isinstance ( fims , ( np . ndarray , np . generic ) ) and ( fims . ndim == 4 or fims . ndim == 3 ) : # check image affine if affine . shape != ( 4 , 4 ) : raise ValueError ( 'Affine should be a 4x4 array.' ) # create a copy to avoid mutation when only one image ( 3D ) imin = np . copy ( fims ) if fims . ndim == 3 : imin . shape = ( 1 , imin . shape [ 0 ] , imin . shape [ 1 ] , imin . shape [ 2 ] ) imshape = imin . shape [ - 3 : ] fldrin = os . path . join ( os . path . expanduser ( '~' ) , 'NIMPA_output' ) if fname == '' : fnms = imin . shape [ 0 ] * [ 'NIMPA' ] else : fnms = imin . shape [ 0 ] * [ fname ] # number of images / frames Nim = imin . shape [ 0 ] else : raise TypeError ( 'Wrong data type input.' ) # store images in this folder if outpath == '' : petudir = os . path . join ( fldrin , 'trimmed' ) else : petudir = os . path . join ( outpath , 'trimmed' ) imio . create_dir ( petudir ) # scale is preferred to be integer try : scale = int ( scale ) except ValueError : raise ValueError ( 'e> scale has to be an integer.' ) # scale factor as the inverse of scale sf = 1 / float ( scale ) if verbose : print 'i> upsampling scale {}, giving resolution scale factor {} for {} images.' . format ( scale , sf , Nim ) # scaled input image and get a sum image as the base for trimming if scale > 1 : newshape = ( scale * imshape [ 0 ] , scale * imshape [ 1 ] , scale * imshape [ 2 ] ) imsum = np . zeros ( newshape , dtype = imdtype ) if not memlim : imscl = np . zeros ( ( Nim , ) + newshape , dtype = imdtype ) for i in range ( Nim ) : imscl [ i , : , : , : ] = ndi . interpolation . zoom ( imin [ i , : , : , : ] , ( scale , scale , scale ) , order = int_order ) imsum += imscl [ i , : , : , : ] else : for i in range ( Nim ) : if Nim > 50 and using_multiple_files : imin_temp = imio . getnii ( imdic [ 'files' ] [ i ] ) imsum += ndi . interpolation . zoom ( imin_temp , ( scale , scale , scale ) , order = 0 ) if verbose : print 'i> image sum: read' , imdic [ 'files' ] [ i ] else : imsum += ndi . interpolation . zoom ( imin [ i , : , : , : ] , ( scale , scale , scale ) , order = 0 ) else : imscl = imin imsum = np . sum ( imin , axis = 0 ) # smooth the sum image for improving trimming ( if any ) # imsum = ndi . filters . gaussian _ filter ( imsum , imio . fwhm2sig ( 4.0 , voxsize = abs ( affine [ 0,0 ] ) ) , mode = ' mirror ' ) # find the object bounding indexes in x , y and z axes , e . g . , ix0 - ix1 for the x axis qx = np . sum ( imsum , axis = ( 0 , 1 ) ) ix0 = np . argmax ( qx > ( fmax * np . nanmax ( qx ) ) ) ix1 = ix0 + np . argmin ( qx [ ix0 : ] > ( fmax * np . nanmax ( qx ) ) ) qy = np . sum ( imsum , axis = ( 0 , 2 ) ) iy0 = np . argmax ( qy > ( fmax * np . nanmax ( qy ) ) ) iy1 = iy0 + np . argmin ( qy [ iy0 : ] > ( fmax * np . nanmax ( qy ) ) ) qz = np . sum ( imsum , axis = ( 1 , 2 ) ) iz0 = np . argmax ( qz > ( fmax * np . nanmax ( qz ) ) ) # find the maximum voxel range for x and y axes IX = ix1 - ix0 + 1 IY = iy1 - iy0 + 1 tmp = max ( IX , IY ) # > get the range such that it is divisible by # > divdim ( 64 by default ) for GPU execution IXY = divdim * ( ( tmp + divdim - 1 ) / divdim ) div = ( IXY - IX ) / 2 ix0 -= div ix1 += ( IXY - IX ) - div div = ( IXY - IY ) / 2 iy0 -= div iy1 += ( IXY - IY ) - div tmp = ( len ( qz ) - iz0 + 1 ) IZ = divdim * ( ( tmp + divdim - 1 ) / divdim ) iz0 -= IZ - tmp + 1 # save the trimming parameters in a dic trimpar = { 'x' : ( ix0 , ix1 ) , 'y' : ( iy0 , iy1 ) , 'z' : ( iz0 ) , 'fmax' : fmax } # new dims ( z , y , x ) newdims = ( imsum . shape [ 0 ] - iz0 , iy1 - iy0 + 1 , ix1 - ix0 + 1 ) imtrim = np . zeros ( ( Nim , ) + newdims , dtype = imdtype ) imsumt = np . zeros ( newdims , dtype = imdtype ) # in case of needed padding ( negative indx resulting above ) # the absolute values are supposed to work like padding in case the indx are negative iz0s , iy0s , ix0s = iz0 , iy0 , ix0 iz0t , iy0t , ix0t = 0 , 0 , 0 if iz0 < 0 : iz0s = 0 ; iz0t = abs ( iz0 ) print '-----------------------------------------------------------------' print 'w> Correcting for trimming outside the original image (z-axis)' print '-----------------------------------------------------------------' if iy0 < 0 : iy0s = 0 ; iy0t = abs ( iy0 ) print '-----------------------------------------------------------------' print 'w> Correcting for trimming outside the original image (y-axis)' print '-----------------------------------------------------------------' if ix0 < 0 : ix0s = 0 ; ix0t = abs ( ix0 ) print '-----------------------------------------------------------------' print 'w> Correcting for trimming outside the original image (x-axis)' print '-----------------------------------------------------------------' # > in case the upper index goes beyond the scaled but untrimmed image iy1t = imsumt . shape [ 1 ] if iy1 >= imsum . shape [ 1 ] : iy1t -= iy1 + 1 # > the same for x ix1t = imsumt . shape [ 2 ] if ix1 >= imsum . shape [ 2 ] : ix1t -= ix1 + 1 # first trim the sum image imsumt [ iz0t : , iy0t : iy1t , ix0t : ix1t ] = imsum [ iz0s : , iy0s : iy1 + 1 , ix0s : ix1 + 1 ] # > new affine matrix for the scaled and trimmed image A = np . diag ( sf * np . diag ( affine ) ) # > note half of new voxel offset is used for the new centre of voxels A [ 0 , 3 ] = affine [ 0 , 3 ] + A [ 0 , 0 ] * ( ix0 - 0.5 ) A [ 1 , 3 ] = affine [ 1 , 3 ] + ( affine [ 1 , 1 ] * ( imshape [ 1 ] - 1 ) - A [ 1 , 1 ] * ( iy1 - 0.5 ) ) A [ 2 , 3 ] = affine [ 2 , 3 ] - A [ 1 , 1 ] * 0.5 A [ 3 , 3 ] = 1 # output dictionary dctout = { 'affine' : A , 'trimpar' : trimpar , 'imsum' : imsumt } # NIfTI image description ( to be stored in the header ) niidescr = 'trimm(x,y,z):' + str ( trimpar [ 'x' ] ) + ',' + str ( trimpar [ 'y' ] ) + ',' + str ( ( trimpar [ 'z' ] , ) ) + ';scale=' + str ( scale ) + ';fmx=' + str ( fmax ) # store the sum image if store_avg : fsum = os . path . join ( petudir , 'avg_trimmed-upsampled-scale-' + str ( scale ) + fcomment + '.nii.gz' ) imio . array2nii ( imsumt [ : : - 1 , : : - 1 , : ] , A , fsum , descrip = niidescr ) if verbose : print 'i> saved averaged image to:' , fsum dctout [ 'fsum' ] = fsum # list of file names for the upsampled and trimmed images fpetu = [ ] # perform the trimming and save the intermediate images if requested for i in range ( Nim ) : # memory saving option , second time doing interpolation if memlim : if Nim > 50 and using_multiple_files : imin_temp = imio . getnii ( imdic [ 'files' ] [ i ] ) im = ndi . interpolation . zoom ( imin_temp , ( scale , scale , scale ) , order = int_order ) if verbose : print 'i> image scaling:' , imdic [ 'files' ] [ i ] else : im = ndi . interpolation . zoom ( imin [ i , : , : , : ] , ( scale , scale , scale ) , order = int_order ) else : im = imscl [ i , : , : , : ] # trim the scaled image imtrim [ i , iz0t : , iy0t : iy1t , ix0t : ix1t ] = im [ iz0s : , iy0s : iy1 + 1 , ix0s : ix1 + 1 ] # save the up - sampled and trimmed PET images if store_img_intrmd : _frm = '_trmfrm' + str ( i ) _fstr = '_trimmed-upsampled-scale-' + str ( scale ) + _frm * ( Nim > 1 ) + fcomment fpetu . append ( os . path . join ( petudir , fnms [ i ] + _fstr + '_i.nii.gz' ) ) imio . array2nii ( imtrim [ i , : : - 1 , : : - 1 , : ] , A , fpetu [ i ] , descrip = niidescr ) if verbose : print 'i> saved upsampled PET image to:' , fpetu [ i ] if store_img : _nfrm = '_nfrm' + str ( Nim ) fim = os . path . join ( petudir , 'trimmed-upsampled-scale-' + str ( scale ) ) + _nfrm * ( Nim > 1 ) + fcomment + '.nii.gz' imio . array2nii ( np . squeeze ( imtrim [ : , : : - 1 , : : - 1 , : ] ) , A , fim , descrip = niidescr ) dctout [ 'fim' ] = fim # file names ( with paths ) for the intermediate PET images dctout [ 'fimi' ] = fpetu dctout [ 'im' ] = np . squeeze ( imtrim ) dctout [ 'N' ] = Nim dctout [ 'affine' ] = A return dctout
def poll ( self , timeout ) : """: param float timeout : Timeout in seconds . A timeout that is less than the poll _ period will still cause a single read that may take up to poll _ period seconds ."""
now = time . time ( ) end_time = now + float ( timeout ) prev_timeout = self . stream . gettimeout ( ) self . stream . settimeout ( self . _poll_period ) incoming = None try : while ( end_time - now ) >= 0 : try : incoming = self . stream . recv ( self . _max_read ) except socket . timeout : pass if incoming : return incoming now = time . time ( ) raise ExpectTimeout ( ) finally : self . stream . settimeout ( prev_timeout )
def box_show ( text , width = 100 , height = 3 , corner = "+" , horizontal = "-" , vertical = "|" ) : """Print a formatted ascii text box ."""
print ( StrTemplate . box ( text = text , width = width , height = height , corner = corner , horizontal = horizontal , vertical = vertical ) )
def uniquified_mesh ( self ) : """This function returns a copy of the mesh in which vertices are copied such that each vertex appears in only one face , and hence has only one texture"""
import numpy as np from lace . mesh import Mesh new_mesh = Mesh ( v = self . v [ self . f . flatten ( ) ] , f = np . array ( range ( len ( self . f . flatten ( ) ) ) ) . reshape ( - 1 , 3 ) ) if self . vn is None : self . reset_normals ( ) new_mesh . vn = self . vn [ self . f . flatten ( ) ] if self . vt is not None : new_mesh . vt = self . vt [ self . ft . flatten ( ) ] new_mesh . ft = new_mesh . f . copy ( ) return new_mesh
def edit_event_view ( request , event_pk ) : '''The view to edit an event .'''
page_name = "Edit Event" profile = UserProfile . objects . get ( user = request . user ) event = get_object_or_404 ( Event , pk = event_pk ) if event . owner != profile and not request . user . is_superuser : return HttpResponseRedirect ( reverse ( 'events:view' , kwargs = { "event_pk" : event_pk } ) , ) event_form = EventForm ( request . POST or None , profile = profile , instance = event , ) if event_form . is_valid ( ) : event = event_form . save ( ) messages . add_message ( request , messages . SUCCESS , MESSAGES [ 'EVENT_UPDATED' ] . format ( event = event . title ) , ) return HttpResponseRedirect ( reverse ( 'events:view' , kwargs = { "event_pk" : event_pk } ) , ) return render_to_response ( 'edit_event.html' , { 'page_name' : page_name , 'event_form' : event_form , } , context_instance = RequestContext ( request ) )
def get_all_tags_with_auth ( image_name , branch = None ) : """Get the tag information using authentication credentials provided by the user . : param image _ name : The image name to query : param branch : The branch to filter by : return : A list of Version instances , latest first"""
logging . debug ( 'Getting %s with authentication' % image_name ) url = '%s/%s/images' % ( API_URL , image_name ) registry_user = os . environ . get ( 'REGISTRY_USER' ) registry_pass = os . environ . get ( 'REGISTRY_PASS' ) if registry_user is None or registry_pass is None : msg = ( 'The docker image seems to be private and no authentication' ' credentials were provided. Please set REGISTRY_USER and' ' REGISTRY_PASS environment variables and try again.' ) raise DockerTagNamingException ( msg ) user_credentials = ( registry_user , registry_pass ) headers = { 'X-Docker-Token' : 'true' , 'Accept' : 'application/json' , 'Content-Type' : 'application/json' } try : response = requests . get ( url , auth = user_credentials , headers = headers ) except RequestException , re : raise DockerTagNamingException ( 'HTTP request exception "%s"' % re ) if response . status_code in ( 404 , 401 ) : msg = ( 'Received unexpected status code %s from the registry' ' REST API after sending authentication credentials, the image' ' might not exist or is private.' ) raise AuthException ( msg % response . status_code ) token = response . headers . get ( 'x-docker-token' ) endpoint = response . headers . get ( 'x-docker-endpoints' ) if token is None : raise DockerTagNamingException ( 'No token returned by the REST API' ) if endpoint is None : raise DockerTagNamingException ( 'No docker endpoint returned by the' ' REST API' ) url = 'https://{0}/v1/repositories/{1}/tags' . format ( endpoint , image_name ) headers [ 'Authorization' ] = 'Token %s' % token try : response = requests . get ( url , headers = headers ) except RequestException , re : raise DockerTagNamingException ( 'HTTP request exception "%s"' % re ) if response . status_code in ( 404 , ) : logging . debug ( 'Received unexpected status code %s from the registry' ' REST API, the image might not exist in the registry' ' but was registered in the hub.' ) return [ ] if response . status_code != 200 : msg = ( 'Received unexpected status code %s from the registry' ' REST API. Please report a bug.' ) raise AuthException ( msg % response . status_code ) try : json_data = response . json ( ) except ValueError : msg = 'JSON decode failed! Raw data is: "%s". Please report a bug.' raise DockerTagNamingException ( msg % ( response . content [ : 25 ] ) . strip ( ) ) # Note that the registry returns a different format output = [ ] for tag_name in json_data . keys ( ) : try : version = version_parser ( tag_name ) except Exception , e : msg = 'Ignoring version tag "%s" with incorrect format: "%s"' logging . debug ( msg % ( tag_name , e ) ) continue if branch is not None : if version . branch != branch : continue output . append ( version ) def sort_func ( version_a , version_b ) : return cmp ( version_b . version_number , version_a . version_number ) output . sort ( sort_func ) return output
def neighbours ( self , word , size = 10 ) : """Get nearest words with KDTree , ranking by cosine distance"""
word = word . strip ( ) v = self . word_vec ( word ) [ distances ] , [ points ] = self . kdt . query ( array ( [ v ] ) , k = size , return_distance = True ) assert len ( distances ) == len ( points ) , "distances and points should be in same shape." words , scores = [ ] , { } for ( x , y ) in zip ( points , distances ) : w = self . index2word [ x ] if w == word : s = 1.0 else : s = cosine ( v , self . syn0 [ x ] ) if s < 0 : s = abs ( s ) words . append ( w ) scores [ w ] = min ( s , 1.0 ) for x in sorted ( words , key = scores . get , reverse = True ) : yield x , scores [ x ]
def get_firmwares ( ) : '''Return ` dmf _ control _ board ` compiled Arduino hex file paths . This function may be used to locate firmware binaries that are available for flashing to [ Arduino Mega2560 ] [ 1 ] boards . [1 ] : http : / / arduino . cc / en / Main / arduinoBoardMega2560'''
return OrderedDict ( [ ( board_dir . name , [ f . abspath ( ) for f in board_dir . walkfiles ( '*.hex' ) ] ) for board_dir in package_path ( ) . joinpath ( 'firmware' ) . dirs ( ) ] )
def remove_file ( profile , branch , file_path , commit_message = None ) : """Remove a file from a branch . Args : profile A profile generated from ` ` simplygithub . authentication . profile ` ` . Such profiles tell this module ( i ) the ` ` repo ` ` to connect to , and ( ii ) the ` ` token ` ` to connect with . branch The name of a branch . file _ path The path of the file to delete . commit _ message A commit message to give to the commit . Returns : A dict with data about the branch ' s new ref ( it includes the new SHA the branch ' s HEAD points to , after committing the new file ) ."""
branch_sha = get_branch_sha ( profile , branch ) tree = get_files_in_branch ( profile , branch_sha ) new_tree = remove_file_from_tree ( tree , file_path ) data = trees . create_tree ( profile , new_tree ) sha = data . get ( "sha" ) if not commit_message : commit_message = "Deleted " + file_path + "." parents = [ branch_sha ] commit_data = commits . create_commit ( profile , commit_message , sha , parents ) commit_sha = commit_data . get ( "sha" ) ref_data = refs . update_ref ( profile , "heads/" + branch , commit_sha ) return ref_data
def pop ( self , count = 1 ) : """Return new deque with rightmost element removed . Popping the empty queue will return the empty queue . A optional count can be given to indicate the number of elements to pop . Popping with a negative index is the same as popleft . Executes in amortized O ( k ) where k is the number of elements to pop . > > > pdeque ( [ 1 , 2 ] ) . pop ( ) pdeque ( [ 1 ] ) > > > pdeque ( [ 1 , 2 ] ) . pop ( 2) pdeque ( [ ] ) > > > pdeque ( [ 1 , 2 ] ) . pop ( - 1) pdeque ( [ 2 ] )"""
if count < 0 : return self . popleft ( - count ) new_right_list , new_left_list = PDeque . _pop_lists ( self . _right_list , self . _left_list , count ) return PDeque ( new_left_list , new_right_list , max ( self . _length - count , 0 ) , self . _maxlen )
def load ( fp , encode_nominal = False , return_type = DENSE ) : '''Load a file - like object containing the ARFF document and convert it into a Python object . : param fp : a file - like object . : param encode _ nominal : boolean , if True perform a label encoding while reading the . arff file . : param return _ type : determines the data structure used to store the dataset . Can be one of ` arff . DENSE ` , ` arff . COO ` , ` arff . LOD ` , ` arff . DENSE _ GEN ` or ` arff . LOD _ GEN ` . Consult the sections on ` working with sparse data ` _ and ` loading progressively ` _ . : return : a dictionary .'''
decoder = ArffDecoder ( ) return decoder . decode ( fp , encode_nominal = encode_nominal , return_type = return_type )
def competition_leaderboard_download ( self , competition , path , quiet = True ) : """Download competition leaderboards Parameters competition : the name of the competition path : a path to download the file to quiet : suppress verbose output ( default is True )"""
response = self . process_response ( self . competition_download_leaderboard_with_http_info ( competition , _preload_content = False ) ) if path is None : effective_path = self . get_default_download_dir ( 'competitions' , competition ) else : effective_path = path file_name = competition + '.zip' outfile = os . path . join ( effective_path , file_name ) self . download_file ( response , outfile , quiet )
def _GetAPFSVolumeIdentifiers ( self , scan_node ) : """Determines the APFS volume identifiers . Args : scan _ node ( dfvfs . SourceScanNode ) : scan node . Returns : list [ str ] : APFS volume identifiers . Raises : SourceScannerError : if the format of or within the source is not supported or the the scan node is invalid . UserAbort : if the user requested to abort ."""
if not scan_node or not scan_node . path_spec : raise errors . SourceScannerError ( 'Invalid scan node.' ) volume_system = apfs_volume_system . APFSVolumeSystem ( ) volume_system . Open ( scan_node . path_spec ) volume_identifiers = self . _source_scanner . GetVolumeIdentifiers ( volume_system ) if not volume_identifiers : return [ ] # TODO : refactor self . _ volumes to use scan options . if self . _volumes : if self . _volumes == 'all' : volumes = range ( 1 , volume_system . number_of_volumes + 1 ) else : volumes = self . _volumes selected_volume_identifiers = self . _NormalizedVolumeIdentifiers ( volume_system , volumes , prefix = 'apfs' ) if not set ( selected_volume_identifiers ) . difference ( volume_identifiers ) : return selected_volume_identifiers if len ( volume_identifiers ) > 1 : try : volume_identifiers = self . _PromptUserForAPFSVolumeIdentifiers ( volume_system , volume_identifiers ) except KeyboardInterrupt : raise errors . UserAbort ( 'File system scan aborted.' ) return self . _NormalizedVolumeIdentifiers ( volume_system , volume_identifiers , prefix = 'apfs' )
def write_basic_mesh ( Verts , E2V = None , mesh_type = 'tri' , pdata = None , pvdata = None , cdata = None , cvdata = None , fname = 'output.vtk' ) : """Write mesh file for basic types of elements . Parameters fname : { string } file to be written , e . g . ' mymesh . vtu ' Verts : { array } coordinate array ( N x D ) E2V : { array } element index array ( Nel x Nelnodes ) mesh _ type : { string } type of elements : tri , quad , tet , hex ( all 3d ) pdata : { array } scalar data on vertices ( N x Nfields ) pvdata : { array } vector data on vertices ( 3 * Nfields x N ) cdata : { array } scalar data on cells ( Nfields x Nel ) cvdata : { array } vector data on cells ( 3 * Nfields x Nel ) Returns writes a . vtu file for use in Paraview Notes The difference between write _ basic _ mesh and write _ vtu is that write _ vtu is more general and requires dictionaries of cell information . write _ basic _ mesh calls write _ vtu Examples > > > import numpy as np > > > from pyamg . vis import write _ basic _ mesh > > > Verts = np . array ( [ [ 0.0,0.0 ] , . . . [ 1.0,0.0 ] , . . . [ 2.0,0.0 ] , . . . [ 0.0,1.0 ] , . . . [ 1.0,1.0 ] , . . . [ 2.0,1.0 ] , . . . [ 0.0,2.0 ] , . . . [ 1.0,2.0 ] , . . . [ 2.0,2.0 ] , . . . [ 0.0,3.0 ] , . . . [ 1.0,3.0 ] , . . . [ 2.0,3.0 ] ] ) > > > E2V = np . array ( [ [ 0,4,3 ] , . . . [ 0,1,4 ] , . . . [ 1,5,4 ] , . . . [ 1,2,5 ] , . . . [ 3,7,6 ] , . . . [ 3,4,7 ] , . . . [ 4,8,7 ] , . . . [ 4,5,8 ] , . . . [ 6,10,9 ] , . . . [ 6,7,10 ] , . . . [ 7,11,10 ] , . . . [ 7,8,11 ] ] ) > > > pdata = np . ones ( ( 12,2 ) ) > > > pvdata = np . ones ( ( 12*3,2 ) ) > > > cdata = np . ones ( ( 12,2 ) ) > > > cvdata = np . ones ( ( 3*12,2 ) ) > > > write _ basic _ mesh ( Verts , E2V = E2V , mesh _ type = ' tri ' , pdata = pdata , pvdata = pvdata , cdata = cdata , cvdata = cvdata , fname = ' test . vtu ' ) See Also write _ vtu"""
if E2V is None : mesh_type = 'vertex' map_type_to_key = { 'vertex' : 1 , 'tri' : 5 , 'quad' : 9 , 'tet' : 10 , 'hex' : 12 } if mesh_type not in map_type_to_key : raise ValueError ( 'unknown mesh_type=%s' % mesh_type ) key = map_type_to_key [ mesh_type ] if mesh_type == 'vertex' : uidx = np . arange ( 0 , Verts . shape [ 0 ] ) . reshape ( ( Verts . shape [ 0 ] , 1 ) ) E2V = { key : uidx } else : E2V = { key : E2V } if cdata is not None : cdata = { key : cdata } if cvdata is not None : cvdata = { key : cvdata } write_vtu ( Verts = Verts , Cells = E2V , pdata = pdata , pvdata = pvdata , cdata = cdata , cvdata = cvdata , fname = fname )
def field ( self , name ) : '''Get the gdb . Value for the given field within the PyObject , coping with some python 2 versus python 3 differences . Various libpython types are defined using the " PyObject _ HEAD " and " PyObject _ VAR _ HEAD " macros . In Python 2 , this these are defined so that " ob _ type " and ( for a var object ) " ob _ size " are fields of the type in question . In Python 3 , this is defined as an embedded PyVarObject type thus : PyVarObject ob _ base ; so that the " ob _ size " field is located insize the " ob _ base " field , and the " ob _ type " is most easily accessed by casting back to a ( PyObject * ) .'''
if self . is_null ( ) : raise NullPyObjectPtr ( self ) if name == 'ob_type' : pyo_ptr = self . _gdbval . cast ( PyObjectPtr . get_gdb_type ( ) ) return pyo_ptr . dereference ( ) [ name ] if name == 'ob_size' : try : # Python 2: return self . _gdbval . dereference ( ) [ name ] except RuntimeError : # Python 3: return self . _gdbval . dereference ( ) [ 'ob_base' ] [ name ] # General case : look it up inside the object : return self . _gdbval . dereference ( ) [ name ]
def get_stp_mst_detail_output_cist_port_port_hello_time ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_stp_mst_detail = ET . Element ( "get_stp_mst_detail" ) config = get_stp_mst_detail output = ET . SubElement ( get_stp_mst_detail , "output" ) cist = ET . SubElement ( output , "cist" ) port = ET . SubElement ( cist , "port" ) port_hello_time = ET . SubElement ( port , "port-hello-time" ) port_hello_time . text = kwargs . pop ( 'port_hello_time' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
async def deactivate ( cls , access_key : str ) -> dict : '''Deactivates this keypair . Deactivated keypairs cannot make any API requests unless activated again by an administrator . You need an admin privilege for this operation .'''
q = 'mutation($access_key: String!, $input: ModifyKeyPairInput!) {' + ' modify_keypair(access_key: $access_key, props: $input) {' ' ok msg' ' }' '}' variables = { 'access_key' : access_key , 'input' : { 'is_active' : False , 'is_admin' : None , 'resource_policy' : None , 'rate_limit' : None , } , } rqst = Request ( cls . session , 'POST' , '/admin/graphql' ) rqst . set_json ( { 'query' : q , 'variables' : variables , } ) async with rqst . fetch ( ) as resp : data = await resp . json ( ) return data [ 'modify_keypair' ]
def _system_parameters ( ** kwargs ) : """Returns system keyword arguments removing Nones . Args : kwargs : system keyword arguments . Returns : dict : system keyword arguments ."""
return { key : value for key , value in kwargs . items ( ) if ( value is not None or value == { } ) }
def _compute_needed_metrics ( self , instance , available_metrics ) : """Compare the available metrics for one MOR we have computed and intersect them with the set of metrics we want to report"""
i_key = self . _instance_key ( instance ) if self . in_compatibility_mode ( instance ) : if instance . get ( 'all_metrics' , False ) : return available_metrics wanted_metrics = [ ] # Get only the basic metrics for counter_id in available_metrics : # No cache yet , skip it for now if not self . metadata_cache . contains ( i_key , counter_id ) : self . log . debug ( "No metadata found for counter {}, will not collect it" . format ( ensure_unicode ( counter_id ) ) ) continue metadata = self . metadata_cache . get_metadata ( i_key , counter_id ) if metadata . get ( 'name' ) in BASIC_METRICS : wanted_metrics . append ( vim . PerformanceManager . MetricId ( counterId = counter_id , instance = "*" ) ) return wanted_metrics else : # The metadata cache contains only metrics of the desired level , so use it to filter the metrics to keep return [ vim . PerformanceManager . MetricId ( counterId = counter_id , instance = "*" ) for counter_id in available_metrics if self . metadata_cache . contains ( i_key , counter_id ) ]
def store_net_db ( self , tenant_id , net , net_dict , result ) : """Store service network in DB ."""
network_dict = { 'name' : net_dict . get ( 'name' ) , 'config_profile' : net_dict . get ( 'config_profile' ) , 'segmentation_id' : net_dict . get ( 'segmentation_id' ) , 'tenant_id' : tenant_id , 'fwd_mode' : net_dict . get ( 'fwd_mode' ) , 'vlan' : net_dict . get ( 'vlan_id' ) } self . add_network_db ( net , network_dict , fw_const . FW_CONST , result )
def split ( self ) : """Split a Path2D into multiple Path2D objects where each one has exactly one root curve . Parameters self : trimesh . path . Path2D Input geometry Returns split : list of trimesh . path . Path2D Original geometry as separate paths"""
# avoid a circular import by referencing class of self Path2D = type ( self ) # save the results of the split to an array split = [ ] # get objects from cache to avoid a bajillion # cache checks inside the tight loop paths = self . paths discrete = self . discrete polygons_closed = self . polygons_closed enclosure_directed = self . enclosure_directed for root_index , root in enumerate ( self . root ) : # get a list of the root curve ' s children connected = list ( enclosure_directed [ root ] . keys ( ) ) # add the root node to the list connected . append ( root ) # store new paths and entities new_paths = [ ] new_entities = [ ] for index in connected : path = paths [ index ] # add a path which is just sequential indexes new_paths . append ( np . arange ( len ( path ) ) + len ( new_entities ) ) # save the entity indexes new_entities . extend ( path ) # store the root index from the original drawing metadata = copy . deepcopy ( self . metadata ) metadata [ 'split_2D' ] = root_index # we made the root path the last index of connected new_root = np . array ( [ len ( new_paths ) - 1 ] ) # prevents the copying from nuking our cache with self . _cache : # create the Path2D split . append ( Path2D ( entities = copy . deepcopy ( self . entities [ new_entities ] ) , vertices = copy . deepcopy ( self . vertices ) , metadata = metadata ) ) # add back expensive things to the cache split [ - 1 ] . _cache . update ( { 'paths' : new_paths , 'polygons_closed' : polygons_closed [ connected ] , 'discrete' : discrete [ connected ] , 'root' : new_root } ) # set the cache ID split [ - 1 ] . _cache . id_set ( ) return np . array ( split )
def create_app_multi_region ( regional_options , app_name , src_dir , publish = False , set_default = False , billTo = None , try_versions = None , try_update = True , confirm = True ) : """Creates a new app object from the specified applet ( s ) . : param regional _ options : Region - specific options for the app . See https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Apps # API - method : - / app / new for details ; this should contain keys for each region the app is to be enabled in , and for the values , a dict containing ( at minimum ) a key " applet " whose value is an applet ID for that region . : type regional _ options : dict"""
return _create_app ( dict ( regionalOptions = regional_options ) , app_name , src_dir , publish = publish , set_default = set_default , billTo = billTo , try_versions = try_versions , try_update = try_update , confirm = confirm )
def composition ( mol ) : """Molecular composition in dict format ( ex . Glucose { ' C ' : 6 , ' H ' : 12 , ' O ' : 6 } ) ."""
mol . require ( "Valence" ) c = Counter ( ) for _ , a in mol . atoms_iter ( ) : c += a . composition ( ) return c
def category_filter ( self , category_filter ) : """Sets the category _ filter of this AzureActivityLogConfiguration . A list of Azure ActivityLog categories to pull events for . Allowable values are ADMINISTRATIVE , SERVICEHEALTH , ALERT , AUTOSCALE , SECURITY # noqa : E501 : param category _ filter : The category _ filter of this AzureActivityLogConfiguration . # noqa : E501 : type : list [ str ]"""
allowed_values = [ "ADMINISTRATIVE" , "SERVICEHEALTH" , "ALERT" , "AUTOSCALE" , "SECURITY" ] # noqa : E501 if not set ( category_filter ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid values for `category_filter` [{0}], must be a subset of [{1}]" # noqa : E501 . format ( ", " . join ( map ( str , set ( category_filter ) - set ( allowed_values ) ) ) , # noqa : E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _category_filter = category_filter
def __parse ( value ) : """Parse the string date . Supports the subset of ISO8601 used by xsd : date , but is lenient with what is accepted , handling most reasonable syntax . Any timezone is parsed but ignored because a ) it is meaningless without a time and b ) B { datetime } . I { date } does not support timezone information . @ param value : A date string . @ type value : str @ return : A date object . @ rtype : B { datetime } . I { date }"""
match_result = _RE_DATE . match ( value ) if match_result is None : raise ValueError ( "date data has invalid format '%s'" % ( value , ) ) return _date_from_match ( match_result )
def create ( self , name , data ) : """Create a Job Binary Internal . : param str data : raw data of script text"""
return self . _update ( '/job-binary-internals/%s' % urlparse . quote ( name . encode ( 'utf-8' ) ) , data , 'job_binary_internal' , dump_json = False )
def begin ( self ) : """Start taking temperature measurements . Returns True if the device is intialized , False otherwise ."""
# Check manufacturer and device ID match expected values . mid = self . _device . readU16BE ( MCP9808_REG_MANUF_ID ) did = self . _device . readU16BE ( MCP9808_REG_DEVICE_ID ) self . _logger . debug ( 'Read manufacturer ID: {0:04X}' . format ( mid ) ) self . _logger . debug ( 'Read device ID: {0:04X}' . format ( did ) ) return mid == 0x0054 and did == 0x0400
def DeleteHuntObject ( self , hunt_id , cursor = None ) : """Deletes a given hunt object ."""
query = "DELETE FROM hunts WHERE hunt_id = %s" hunt_id_int = db_utils . HuntIDToInt ( hunt_id ) rows_deleted = cursor . execute ( query , [ hunt_id_int ] ) if rows_deleted == 0 : raise db . UnknownHuntError ( hunt_id ) query = "DELETE FROM hunt_output_plugins_states WHERE hunt_id = %s" cursor . execute ( query , [ hunt_id_int ] )
def orthogonalization_matrix ( lengths , angles ) : """Return orthogonalization matrix for crystallographic cell coordinates . Angles are expected in degrees . The de - orthogonalization matrix is the inverse . > > > O = orthogonalization _ matrix ( [ 10 , 10 , 10 ] , [ 90 , 90 , 90 ] ) > > > numpy . allclose ( O [ : 3 , : 3 ] , numpy . identity ( 3 , float ) * 10) True > > > O = orthogonalization _ matrix ( [ 9.8 , 12.0 , 15.5 ] , [ 87.2 , 80.7 , 69.7 ] ) > > > numpy . allclose ( numpy . sum ( O ) , 43.063229) True"""
a , b , c = lengths angles = numpy . radians ( angles ) sina , sinb , _ = numpy . sin ( angles ) cosa , cosb , cosg = numpy . cos ( angles ) co = ( cosa * cosb - cosg ) / ( sina * sinb ) return numpy . array ( [ [ a * sinb * math . sqrt ( 1.0 - co * co ) , 0.0 , 0.0 , 0.0 ] , [ - a * sinb * co , b * sina , 0.0 , 0.0 ] , [ a * cosb , b * cosa , c , 0.0 ] , [ 0.0 , 0.0 , 0.0 , 1.0 ] ] )
def _subs_tree ( cls , tvars = None , args = None ) : """An internal helper function : calculate substitution tree for generic cls after replacing its type parameters with substitutions in tvars - > args ( if any ) . Repeat the same following _ _ origin _ _ ' s . Return a list of arguments with all possible substitutions performed . Arguments that are generic classes themselves are represented as tuples ( so that no new classes are created by this function ) . For example : _ subs _ tree ( List [ Tuple [ int , T ] ] [ str ] ) = = [ ( Tuple , int , str ) ]"""
if cls . __origin__ is None : return cls # Make of chain of origins ( i . e . cls - > cls . _ _ origin _ _ ) current = cls . __origin__ orig_chain = [ ] while current . __origin__ is not None : orig_chain . append ( current ) current = current . __origin__ # Replace type variables in _ _ args _ _ if asked . . . tree_args = [ ] for arg in cls . __args__ : tree_args . append ( _replace_arg ( arg , tvars , args ) ) # . . . then continue replacing down the origin chain . for ocls in orig_chain : new_tree_args = [ ] for arg in ocls . __args__ : new_tree_args . append ( _replace_arg ( arg , ocls . __parameters__ , tree_args ) ) tree_args = new_tree_args return tree_args
def _parse_features ( features , new_names ) : """Takes a collection of features structured in a various ways and parses them into one way . If input format is not recognized it raises an error . : return : A collection of features : rtype : collections . OrderedDict ( FeatureType : collections . OrderedDict ( str : str or Ellipsis ) or Ellipsis ) : raises : ValueError"""
if isinstance ( features , dict ) : return FeatureParser . _parse_dict ( features , new_names ) if isinstance ( features , list ) : return FeatureParser . _parse_list ( features , new_names ) if isinstance ( features , tuple ) : return FeatureParser . _parse_tuple ( features , new_names ) if features is ... : return OrderedDict ( [ ( feature_type , ... ) for feature_type in FeatureType ] ) if isinstance ( features , FeatureType ) : return OrderedDict ( [ ( features , ... ) ] ) if isinstance ( features , str ) : return OrderedDict ( [ ( None , OrderedDict ( [ ( features , ... ) ] ) ) ] ) raise ValueError ( 'Unknown format of input features: {}' . format ( features ) )
def get_errors ( audit_results ) : """Args : audit _ results : results of ` AxeCoreAudit . do _ audit ( ) ` . Returns : A dictionary with keys " errors " and " total " ."""
errors = { "errors" : [ ] , "total" : 0 } if audit_results : errors [ "errors" ] . extend ( audit_results ) for i in audit_results : for _node in i [ "nodes" ] : errors [ "total" ] += 1 return errors
def initialize ( cls ) : """Initialize Axes logging and show version information . This method is re - entrant and can be called multiple times . It displays version information exactly once at application startup ."""
if cls . logging_initialized : return cls . logging_initialized = True if not settings . AXES_VERBOSE : return log . info ( 'AXES: BEGIN LOG' ) log . info ( 'AXES: Using django-axes %s' , get_version ( ) ) if settings . AXES_ONLY_USER_FAILURES : log . info ( 'AXES: blocking by username only.' ) elif settings . AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP : log . info ( 'AXES: blocking by combination of username and IP.' ) else : log . info ( 'AXES: blocking by IP only.' )
def add_provider ( self , cls : Type [ BaseProvider ] ) -> None : """Add a custom provider to Generic ( ) object . : param cls : Custom provider . : return : None : raises TypeError : if cls is not class ."""
if inspect . isclass ( cls ) : if not issubclass ( cls , BaseProvider ) : raise TypeError ( 'The provider must be a ' 'subclass of BaseProvider' ) try : meta = getattr ( cls , 'Meta' ) name = getattr ( meta , 'name' ) except AttributeError : name = cls . __name__ . lower ( ) setattr ( self , name , cls ( seed = self . seed ) ) else : raise TypeError ( 'The provider must be a class' )
def verify_exif ( filename ) : '''Check that image file has the required EXIF fields . Incompatible files will be ignored server side .'''
# required tags in IFD name convention required_exif = required_fields ( ) exif = ExifRead ( filename ) required_exif_exist = exif . fields_exist ( required_exif ) return required_exif_exist
def get_reduced_symbols ( symbols ) : """Reduces expanded list of symbols . Args : symbols : list containing any chemical symbols as often as the atom appears in the structure Returns : reduced _ symbols : any symbols appears only once"""
reduced_symbols = [ ] for ss in symbols : if not ( ss in reduced_symbols ) : reduced_symbols . append ( ss ) return reduced_symbols
def to_posix_path ( code_path ) : """Change the code _ path to be of unix - style if running on windows when supplied with an absolute windows path . Parameters code _ path : str Directory in the host operating system that should be mounted within the container . Returns str Posix equivalent of absolute windows style path . Examples > > > to _ posix _ path ( ' / Users / UserName / sam - app ' ) / Users / UserName / sam - app > > > to _ posix _ path ( ' C : \\ \\ Users \\ \\ UserName \\ \\ AppData \\ \\ Local \\ \\ Temp \\ \\ mydir ' ) / c / Users / UserName / AppData / Local / Temp / mydir"""
return re . sub ( "^([A-Za-z])+:" , lambda match : posixpath . sep + match . group ( ) . replace ( ":" , "" ) . lower ( ) , pathlib . PureWindowsPath ( code_path ) . as_posix ( ) ) if os . name == "nt" else code_path
def get_next_step ( self ) : """Find the proper step when user clicks the Next button . : returns : The step to be switched to : rtype : WizardStep instance or None"""
if self . rbAggLayerFromCanvas . isChecked ( ) : new_step = self . parent . step_fc_agglayer_from_canvas elif self . rbAggLayerFromBrowser . isChecked ( ) : new_step = self . parent . step_fc_agglayer_from_browser else : new_step = self . parent . step_fc_summary return new_step
def to_json ( self ) : """Writes the complete Morse - Smale merge hierarchy to a string object . @ Out , a string object storing the entire merge hierarchy of all minima and maxima ."""
capsule = { } capsule [ "Hierarchy" ] = [ ] for ( dying , ( persistence , surviving , saddle ) , ) in self . merge_sequence . items ( ) : capsule [ "Hierarchy" ] . append ( { "Dying" : dying , "Persistence" : persistence , "Surviving" : surviving , "Saddle" : saddle , } ) capsule [ "Partitions" ] = [ ] base = np . array ( [ None , None ] * len ( self . Y ) ) . reshape ( - 1 , 2 ) for ( min_index , max_index ) , items in self . base_partitions . items ( ) : base [ items , : ] = [ min_index , max_index ] capsule [ "Partitions" ] = base . tolist ( ) return json . dumps ( capsule )
def build_all ( self , verbose = False , hide_base_schemas = True , hide_implicit_types = True , hide_implicit_preds = True ) : """Extract all ontology entities from an RDF graph and construct Python representations of them ."""
if verbose : printDebug ( "Scanning entities..." , "green" ) printDebug ( "----------" , "comment" ) self . build_ontologies ( ) if verbose : printDebug ( "Ontologies.........: %d" % len ( self . all_ontologies ) , "comment" ) self . build_classes ( hide_base_schemas , hide_implicit_types ) if verbose : printDebug ( "Classes............: %d" % len ( self . all_classes ) , "comment" ) self . build_properties ( hide_implicit_preds ) if verbose : printDebug ( "Properties.........: %d" % len ( self . all_properties ) , "comment" ) if verbose : printDebug ( "..annotation.......: %d" % len ( self . all_properties_annotation ) , "comment" ) if verbose : printDebug ( "..datatype.........: %d" % len ( self . all_properties_datatype ) , "comment" ) if verbose : printDebug ( "..object...........: %d" % len ( self . all_properties_object ) , "comment" ) self . build_skos_concepts ( ) if verbose : printDebug ( "Concepts (SKOS)....: %d" % len ( self . all_skos_concepts ) , "comment" ) self . build_shapes ( ) if verbose : printDebug ( "Shapes (SHACL).....: %d" % len ( self . all_shapes ) , "comment" ) # self . _ _ computeTopLayer ( ) self . __computeInferredProperties ( ) if verbose : printDebug ( "----------" , "comment" )
def __analizar_errores ( self , ret ) : "Comprueba y extrae errores si existen en la respuesta XML"
if 'arrayErrores' in ret : errores = ret [ 'arrayErrores' ] or [ ] self . Errores = [ err [ 'error' ] for err in errores ] self . ErrCode = ' ' . join ( self . Errores ) self . ErrMsg = '\n' . join ( self . Errores )
def delete ( self ) : """Delete this NIC . Authorization requirements : * Object - access permission to the Partition containing this HBA . * Task permission to the " Partition Details " task . Raises : : exc : ` ~ zhmcclient . HTTPError ` : exc : ` ~ zhmcclient . ParseError ` : exc : ` ~ zhmcclient . AuthError ` : exc : ` ~ zhmcclient . ConnectionError `"""
self . manager . session . delete ( self . _uri ) self . manager . _name_uri_cache . delete ( self . properties . get ( self . manager . _name_prop , None ) )
def calculate_sleep_time ( attempt , delay_factor = 5.0 , randomization_factor = .5 , max_delay = 120 ) : """Calculate the sleep time between retries , in seconds . Based off of ` taskcluster . utils . calculateSleepTime ` , but with kwargs instead of constant ` delay _ factor ` / ` randomization _ factor ` / ` max _ delay ` . The taskcluster function generally slept for less than a second , which didn ' t always get past server issues . Args : attempt ( int ) : the retry attempt number delay _ factor ( float , optional ) : a multiplier for the delay time . Defaults to 5. randomization _ factor ( float , optional ) : a randomization multiplier for the delay time . Defaults to . 5. max _ delay ( float , optional ) : the max delay to sleep . Defaults to 120 ( seconds ) . Returns : float : the time to sleep , in seconds ."""
if attempt <= 0 : return 0 # We subtract one to get exponents : 1 , 2 , 3 , 4 , 5 , . . delay = float ( 2 ** ( attempt - 1 ) ) * float ( delay_factor ) # Apply randomization factor . Only increase the delay here . delay = delay * ( randomization_factor * random . random ( ) + 1 ) # Always limit with a maximum delay return min ( delay , max_delay )
def start ( self ) : '''Starts ( Subscribes ) the client .'''
self . sub = rospy . Subscriber ( self . topic , BumperEvent , self . __callback )
def siblings_after ( self ) : """: return : a list of this node ' s siblings that occur * after * this node in the DOM ."""
impl_nodelist = self . adapter . get_node_children ( self . parent . impl_node ) after_nodelist = [ ] is_after_myself = False for n in impl_nodelist : if is_after_myself : after_nodelist . append ( n ) elif n == self . impl_node : is_after_myself = True return self . _convert_nodelist ( after_nodelist )
def GET ( self ) : """Checks if user is authenticated and calls POST _ AUTH or performs login and calls GET _ AUTH . Otherwise , returns the login template ."""
data = self . user_manager . session_lti_info ( ) if data is None : raise web . notfound ( ) try : course = self . course_factory . get_course ( data [ "task" ] [ 0 ] ) if data [ "consumer_key" ] not in course . lti_keys ( ) . keys ( ) : raise Exception ( ) except : return self . template_helper . get_renderer ( ) . lti_bind ( False , "" , None , "Invalid LTI data" ) user_profile = self . database . users . find_one ( { "ltibindings." + data [ "task" ] [ 0 ] + "." + data [ "consumer_key" ] : data [ "username" ] } ) if user_profile : self . user_manager . connect_user ( user_profile [ "username" ] , user_profile [ "realname" ] , user_profile [ "email" ] , user_profile [ "language" ] ) if self . user_manager . session_logged_in ( ) : raise web . seeother ( self . app . get_homepath ( ) + "/lti/task" ) return self . template_helper . get_renderer ( ) . lti_login ( False )
def print_row ( self , row , rstrip = True ) : """Format and print the pre - rendered data to the output device ."""
line = '' . join ( map ( str , row ) ) print ( line . rstrip ( ) if rstrip else line , file = self . table . file )
def del_hparam ( self , name ) : """Removes the hyperparameter with key ' name ' . Does nothing if it isn ' t present . Args : name : Name of the hyperparameter ."""
if hasattr ( self , name ) : delattr ( self , name ) del self . _hparam_types [ name ]
def brunt_vaisala_period ( heights , potential_temperature , axis = 0 ) : r"""Calculate the Brunt - Vaisala period . This function is a helper function for ` brunt _ vaisala _ frequency ` that calculates the period of oscilation as in Exercise 3.13 of [ Hobbs2006 ] _ : . . math : : \ tau = \ frac { 2 \ pi } { N } Returns ` NaN ` when : math : ` N ^ 2 > 0 ` . Parameters heights : array - like One - dimensional profile of atmospheric height potential _ temperature : array - like Atmospheric potential temperature axis : int , optional The axis corresponding to vertical in the potential temperature array , defaults to 0. Returns array - like Brunt - Vaisala period . See Also brunt _ vaisala _ frequency , brunt _ vaisala _ frequency _ squared , potential _ temperature"""
bv_freq_squared = brunt_vaisala_frequency_squared ( heights , potential_temperature , axis = axis ) bv_freq_squared [ bv_freq_squared . magnitude <= 0 ] = np . nan return 2 * np . pi / np . sqrt ( bv_freq_squared )
def attachmethod ( target ) : '''Reference : https : / / blog . tonyseek . com / post / open - class - in - python / class Spam ( object ) : pass @ attach _ method ( Spam ) def egg1 ( self , name ) : print ( ( self , name ) ) spam1 = Spam ( ) # OpenClass 加入的方法 egg1 可用 spam1 . egg1 ( " Test1 " ) # 输出Test1'''
if isinstance ( target , type ) : def decorator ( func ) : setattr ( target , func . __name__ , func ) else : def decorator ( func ) : setattr ( target , func . __name__ , partial ( func , target ) ) return decorator
def add_dataset ( data_type , val , unit_id = None , metadata = { } , name = "" , user_id = None , flush = False ) : """Data can exist without scenarios . This is the mechanism whereby single pieces of data can be added without doing it through a scenario . A typical use of this would be for setting default values on types ."""
d = Dataset ( ) d . type = data_type d . value = val d . set_metadata ( metadata ) d . unit_id = unit_id d . name = name d . created_by = user_id d . hash = d . set_hash ( ) try : existing_dataset = db . DBSession . query ( Dataset ) . filter ( Dataset . hash == d . hash ) . one ( ) if existing_dataset . check_user ( user_id ) : d = existing_dataset else : d . set_metadata ( { 'created_at' : datetime . datetime . now ( ) } ) d . set_hash ( ) db . DBSession . add ( d ) except NoResultFound : db . DBSession . add ( d ) if flush == True : db . DBSession . flush ( ) return d
def _augment ( self ) : """Finds a minimum cost path and adds it to the matching"""
# build a minimum cost tree _pred , _ready , istar , j , mu = self . _build_tree ( ) # update prices self . _v [ _ready ] += self . _d [ _ready ] - mu # augment the solution with the minimum cost path from the # tree . Follows an alternating path along matched , unmatched # edges from X to Y while True : i = _pred [ j ] self . _y [ j ] = i k = j j = self . _x [ i ] self . _x [ i ] = k if i == istar : break self . _update_cred ( )
def seed ( self ) : """Seed new initial values for the stochastics ."""
for generation in self . generations : for s in generation : try : if s . rseed is not None : value = s . random ( ** s . parents . value ) except : pass
def replace_store_profile_by_id ( cls , store_profile_id , store_profile , ** kwargs ) : """Replace StoreProfile Replace all attributes of StoreProfile This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async = True > > > thread = api . replace _ store _ profile _ by _ id ( store _ profile _ id , store _ profile , async = True ) > > > result = thread . get ( ) : param async bool : param str store _ profile _ id : ID of storeProfile to replace ( required ) : param StoreProfile store _ profile : Attributes of storeProfile to replace ( required ) : return : StoreProfile If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async' ) : return cls . _replace_store_profile_by_id_with_http_info ( store_profile_id , store_profile , ** kwargs ) else : ( data ) = cls . _replace_store_profile_by_id_with_http_info ( store_profile_id , store_profile , ** kwargs ) return data
def get_device_by_ain ( self , ain ) : """Returns a device specified by the AIN ."""
devices = self . get_devices ( ) for device in devices : if device . ain == ain : return device
def crpix ( self ) : """The location of the reference coordinate in the pixel frame . First simple respond with the header values , if they don ' t exist try usnig the DETSEC values @ rtype : float , float"""
try : return self . wcs . crpix1 , self . wcs . crpix2 except Exception as ex : logging . debug ( "Couldn't get CRPIX from WCS: {}" . format ( ex ) ) logging . debug ( "Switching to use DATASEC for CRPIX value computation." ) try : ( x1 , x2 ) , ( y1 , y2 ) = util . get_pixel_bounds_from_datasec_keyword ( self [ 'DETSEC' ] ) dx = float ( self [ 'NAXIS1' ] ) dy = float ( self [ 'NAXIS2' ] ) except KeyError as ke : raise KeyError ( "Header missing keyword: {}, required for CRPIX[12] computation" . format ( ke . args [ 0 ] ) ) crpix1 = self . _DET_X_CEN - ( x1 + x2 ) / 2. + dx / 2. crpix2 = self . _DET_Y_CEN - ( y1 + y2 ) / 2. + dy / 2. return crpix1 , crpix2
def get_job_log_url ( self , project , ** params ) : """Gets job log url , filtered by parameters : param project : project ( repository name ) to query data for : param params : keyword arguments to filter results"""
return self . _get_json ( self . JOB_LOG_URL_ENDPOINT , project , ** params )
def _Pcn_zm_crp ( x , dsz , Nv , dimN = 2 , dimC = 1 ) : """Projection onto dictionary update constraint set : support projection , mean subtraction , and normalisation . The result is cropped to the support of the largest filter in the dictionary . Parameters x : array _ like Input array dsz : tuple Filter support size ( s ) , specified using the same format as the ` dsz ` parameter of : func : ` bcrop ` . Nv : tuple Sizes of problem spatial indices dimN : int , optional ( default 2) Number of problem spatial indices dimC : int , optional ( default 1) Number of problem channel indices Returns y : ndarray Projection of input onto constraint set"""
return normalise ( zeromean ( bcrop ( x , dsz , dimN ) , dsz , dimN ) , dimN + dimC )
def parse_numbering ( document , xmlcontent ) : """Parse numbering document . Numbering is defined in file ' numbering . xml ' ."""
numbering = etree . fromstring ( xmlcontent ) document . abstruct_numbering = { } document . numbering = { } for abstruct_num in numbering . xpath ( './/w:abstractNum' , namespaces = NAMESPACES ) : numb = { } for lvl in abstruct_num . xpath ( './w:lvl' , namespaces = NAMESPACES ) : ilvl = int ( lvl . attrib [ _name ( '{{{w}}}ilvl' ) ] ) fmt = lvl . find ( _name ( '{{{w}}}numFmt' ) ) numb [ ilvl ] = { 'numFmt' : fmt . attrib [ _name ( '{{{w}}}val' ) ] } document . abstruct_numbering [ abstruct_num . attrib [ _name ( '{{{w}}}abstractNumId' ) ] ] = numb for num in numbering . xpath ( './/w:num' , namespaces = NAMESPACES ) : num_id = num . attrib [ _name ( '{{{w}}}numId' ) ] abs_num = num . find ( _name ( '{{{w}}}abstractNumId' ) ) if abs_num is not None : number_id = abs_num . attrib [ _name ( '{{{w}}}val' ) ] document . numbering [ int ( num_id ) ] = number_id
def com_daltonmaag_check_required_fields ( ufo_font ) : """Check that required fields are present in the UFO fontinfo . ufo2ft requires these info fields to compile a font binary : unitsPerEm , ascender , descender , xHeight , capHeight and familyName ."""
recommended_fields = [ ] for field in [ "unitsPerEm" , "ascender" , "descender" , "xHeight" , "capHeight" , "familyName" ] : if ufo_font . info . __dict__ . get ( "_" + field ) is None : recommended_fields . append ( field ) if recommended_fields : yield FAIL , f"Required field(s) missing: {recommended_fields}" else : yield PASS , "Required fields present."
def cmd_cm ( self , nm = None , ch = None ) : """cm nm = color _ map _ name ch = chname Set a color map ( name ` nm ` ) for the given channel . If no value is given , reports the current color map ."""
viewer = self . get_viewer ( ch ) if viewer is None : self . log ( "No current viewer/channel." ) return if nm is None : rgbmap = viewer . get_rgbmap ( ) cmap = rgbmap . get_cmap ( ) self . log ( cmap . name ) else : viewer . set_color_map ( nm )
def tenant_present ( name , description = None , enabled = True , profile = None , ** connection_args ) : '''Ensures that the keystone tenant exists name The name of the tenant to manage description The description to use for this tenant enabled Availability state for this tenant'''
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : 'Tenant / project "{0}" already exists' . format ( name ) } _api_version ( profile = profile , ** connection_args ) # Check if tenant is already present tenant = __salt__ [ 'keystone.tenant_get' ] ( name = name , profile = profile , ** connection_args ) if 'Error' not in tenant : if tenant [ name ] . get ( 'description' , None ) != description : if __opts__ . get ( 'test' ) : ret [ 'result' ] = None ret [ 'comment' ] = 'Tenant / project "{0}" will be updated' . format ( name ) ret [ 'changes' ] [ 'Description' ] = 'Will be updated' return ret __salt__ [ 'keystone.tenant_update' ] ( name = name , description = description , enabled = enabled , profile = profile , ** connection_args ) ret [ 'comment' ] = 'Tenant / project "{0}" has been updated' . format ( name ) ret [ 'changes' ] [ 'Description' ] = 'Updated' if tenant [ name ] . get ( 'enabled' , None ) != enabled : if __opts__ . get ( 'test' ) : ret [ 'result' ] = None ret [ 'comment' ] = 'Tenant / project "{0}" will be updated' . format ( name ) ret [ 'changes' ] [ 'Enabled' ] = 'Will be {0}' . format ( enabled ) return ret __salt__ [ 'keystone.tenant_update' ] ( name = name , description = description , enabled = enabled , profile = profile , ** connection_args ) ret [ 'comment' ] = 'Tenant / project "{0}" has been updated' . format ( name ) ret [ 'changes' ] [ 'Enabled' ] = 'Now {0}' . format ( enabled ) else : if __opts__ . get ( 'test' ) : ret [ 'result' ] = None ret [ 'comment' ] = 'Tenant / project "{0}" will be added' . format ( name ) ret [ 'changes' ] [ 'Tenant' ] = 'Will be created' return ret # Create tenant if _OS_IDENTITY_API_VERSION > 2 : created = __salt__ [ 'keystone.project_create' ] ( name = name , domain = 'default' , description = description , enabled = enabled , profile = profile , ** connection_args ) else : created = __salt__ [ 'keystone.tenant_create' ] ( name = name , description = description , enabled = enabled , profile = profile , ** connection_args ) ret [ 'changes' ] [ 'Tenant' ] = 'Created' if created is True else 'Failed' ret [ 'result' ] = created ret [ 'comment' ] = 'Tenant / project "{0}" has been added' . format ( name ) return ret
def type ( self , name : str ) : """return the first complete definition of type ' name '"""
for f in self . body : if ( hasattr ( f , '_ctype' ) and f . _ctype . _storage == Storages . TYPEDEF and f . _name == name ) : return f