idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
244,200 | def mean_by_window ( self , indices , window ) : masks = self . _makewindows ( indices , window ) newindex = arange ( 0 , len ( masks [ 0 ] ) ) return self . map ( lambda x : mean ( [ x [ m ] for m in masks ] , axis = 0 ) , index = newindex ) | Average series across multiple windows specified by their centers . | 73 | 10 |
244,201 | def subsample ( self , sample_factor = 2 ) : if sample_factor < 0 : raise Exception ( 'Factor for subsampling must be postive, got %g' % sample_factor ) s = slice ( 0 , len ( self . index ) , sample_factor ) newindex = self . index [ s ] return self . map ( lambda v : v [ s ] , index = newindex ) | Subsample series by an integer factor . | 87 | 8 |
244,202 | def downsample ( self , sample_factor = 2 ) : if sample_factor < 0 : raise Exception ( 'Factor for subsampling must be postive, got %g' % sample_factor ) newlength = floor ( len ( self . index ) / sample_factor ) func = lambda v : v [ 0 : int ( newlength * sample_factor ) ] . reshape ( - 1 , sample_factor ) . mean ( axis = 1 ) newindex = arange ( newlength ) return self . map ( func , index = newindex ) | Downsample series by an integer factor by averaging . | 117 | 10 |
244,203 | def fourier ( self , freq = None ) : def get ( y , freq ) : y = y - mean ( y ) nframes = len ( y ) ft = fft . fft ( y ) ft = ft [ 0 : int ( fix ( nframes / 2 ) ) ] ampFt = 2 * abs ( ft ) / nframes amp = ampFt [ freq ] ampSum = sqrt ( sum ( ampFt ** 2 ) ) co = amp / ampSum ph = - ( pi / 2 ) - angle ( ft [ freq ] ) if ph < 0 : ph += pi * 2 return array ( [ co , ph ] ) if freq >= int ( fix ( size ( self . index ) / 2 ) ) : raise Exception ( 'Requested frequency, %g, is too high, ' 'must be less than half the series duration' % freq ) index = [ 'coherence' , 'phase' ] return self . map ( lambda x : get ( x , freq ) , index = index ) | Compute statistics of a Fourier decomposition on series data . | 223 | 13 |
244,204 | def convolve ( self , signal , mode = 'full' ) : from numpy import convolve s = asarray ( signal ) n = size ( self . index ) m = size ( s ) # use expected lengths to make a new index if mode == 'same' : newmax = max ( n , m ) elif mode == 'valid' : newmax = max ( m , n ) - min ( m , n ) + 1 else : newmax = n + m - 1 newindex = arange ( 0 , newmax ) return self . map ( lambda x : convolve ( x , signal , mode ) , index = newindex ) | Convolve series data against another signal . | 137 | 9 |
244,205 | def crosscorr ( self , signal , lag = 0 ) : from scipy . linalg import norm s = asarray ( signal ) s = s - mean ( s ) s = s / norm ( s ) if size ( s ) != size ( self . index ) : raise Exception ( 'Size of signal to cross correlate with, %g, ' 'does not match size of series' % size ( s ) ) # created a matrix with lagged signals if lag is not 0 : shifts = range ( - lag , lag + 1 ) d = len ( s ) m = len ( shifts ) sshifted = zeros ( ( m , d ) ) for i in range ( 0 , len ( shifts ) ) : tmp = roll ( s , shifts [ i ] ) if shifts [ i ] < 0 : tmp [ ( d + shifts [ i ] ) : ] = 0 if shifts [ i ] > 0 : tmp [ : shifts [ i ] ] = 0 sshifted [ i , : ] = tmp s = sshifted else : shifts = [ 0 ] def get ( y , s ) : y = y - mean ( y ) n = norm ( y ) if n == 0 : b = zeros ( ( s . shape [ 0 ] , ) ) else : y /= n b = dot ( s , y ) return b return self . map ( lambda x : get ( x , s ) , index = shifts ) | Cross correlate series data against another signal . | 300 | 8 |
244,206 | def detrend ( self , method = 'linear' , order = 5 ) : check_options ( method , [ 'linear' , 'nonlinear' ] ) if method == 'linear' : order = 1 def func ( y ) : x = arange ( len ( y ) ) p = polyfit ( x , y , order ) p [ - 1 ] = 0 yy = polyval ( p , x ) return y - yy return self . map ( func ) | Detrend series data with linear or nonlinear detrending . | 100 | 13 |
244,207 | def normalize ( self , method = 'percentile' , window = None , perc = 20 , offset = 0.1 ) : check_options ( method , [ 'mean' , 'percentile' , 'window' , 'window-exact' ] ) from warnings import warn if not ( method == 'window' or method == 'window-exact' ) and window is not None : warn ( 'Setting window without using method "window" has no effect' ) if method == 'mean' : baseFunc = mean if method == 'percentile' : baseFunc = lambda x : percentile ( x , perc ) if method == 'window' : from scipy . ndimage . filters import percentile_filter baseFunc = lambda x : percentile_filter ( x . astype ( float64 ) , perc , window , mode = 'nearest' ) if method == 'window-exact' : if window & 0x1 : left , right = ( ceil ( window / 2 ) , ceil ( window / 2 ) + 1 ) else : left , right = ( window / 2 , window / 2 ) n = len ( self . index ) baseFunc = lambda x : asarray ( [ percentile ( x [ max ( ix - left , 0 ) : min ( ix + right + 1 , n ) ] , perc ) for ix in arange ( 0 , n ) ] ) def get ( y ) : b = baseFunc ( y ) return ( y - b ) / ( b + offset ) return self . map ( get ) | Normalize by subtracting and dividing by a baseline . | 338 | 11 |
244,208 | def toimages ( self , chunk_size = 'auto' ) : from thunder . images . images import Images if chunk_size is 'auto' : chunk_size = str ( max ( [ int ( 1e5 / prod ( self . baseshape ) ) , 1 ] ) ) n = len ( self . shape ) - 1 if self . mode == 'spark' : return Images ( self . values . swap ( tuple ( range ( n ) ) , ( 0 , ) , size = chunk_size ) ) if self . mode == 'local' : return Images ( self . values . transpose ( ( n , ) + tuple ( range ( 0 , n ) ) ) ) | Converts to images data . | 145 | 6 |
244,209 | def tobinary ( self , path , prefix = 'series' , overwrite = False , credentials = None ) : from thunder . series . writers import tobinary tobinary ( self , path , prefix = prefix , overwrite = overwrite , credentials = credentials ) | Write data to binary files . | 51 | 6 |
244,210 | def addextension ( path , ext = None ) : if ext : if '*' in path : return path elif os . path . splitext ( path ) [ 1 ] : return path else : if not ext . startswith ( '.' ) : ext = '.' + ext if not path . endswith ( ext ) : if not path . endswith ( os . path . sep ) : path += os . path . sep return path + '*' + ext else : return path else : return path | Helper function for handling of paths given separately passed file extensions . | 111 | 12 |
244,211 | def select ( files , start , stop ) : if start or stop : if start is None : start = 0 if stop is None : stop = len ( files ) files = files [ start : stop ] return files | Helper function for handling start and stop indices | 44 | 8 |
244,212 | def listrecursive ( path , ext = None ) : filenames = set ( ) for root , dirs , files in os . walk ( path ) : if ext : if ext == 'tif' or ext == 'tiff' : tmp = fnmatch . filter ( files , '*.' + 'tiff' ) files = tmp + fnmatch . filter ( files , '*.' + 'tif' ) else : files = fnmatch . filter ( files , '*.' + ext ) for filename in files : filenames . add ( os . path . join ( root , filename ) ) filenames = list ( filenames ) filenames . sort ( ) return sorted ( filenames ) | List files recurisvely | 151 | 7 |
244,213 | def listflat ( path , ext = None ) : if os . path . isdir ( path ) : if ext : if ext == 'tif' or ext == 'tiff' : files = glob . glob ( os . path . join ( path , '*.tif' ) ) files = files + glob . glob ( os . path . join ( path , '*.tiff' ) ) else : files = glob . glob ( os . path . join ( path , '*.' + ext ) ) else : files = [ os . path . join ( path , fname ) for fname in os . listdir ( path ) ] else : files = glob . glob ( path ) # filter out directories files = [ fpath for fpath in files if not isinstance ( fpath , list ) and not os . path . isdir ( fpath ) ] return sorted ( files ) | List files without recursion | 185 | 5 |
244,214 | def normalize_scheme ( path , ext ) : path = addextension ( path , ext ) parsed = urlparse ( path ) if parsed . scheme : # this appears to already be a fully-qualified URI return path else : # this looks like a local path spec import os dirname , filename = os . path . split ( path ) if not os . path . isabs ( dirname ) : # need to make relative local paths absolute dirname = os . path . abspath ( dirname ) path = os . path . join ( dirname , filename ) return "file://" + path | Normalize scheme for paths related to hdfs | 126 | 10 |
244,215 | def list ( path , ext = None , start = None , stop = None , recursive = False ) : files = listflat ( path , ext ) if not recursive else listrecursive ( path , ext ) if len ( files ) < 1 : raise FileNotFoundError ( 'Cannot find files of type "%s" in %s' % ( ext if ext else '*' , path ) ) files = select ( files , start , stop ) return files | Get sorted list of file paths matching path and extension | 96 | 10 |
244,216 | def read ( self , path , ext = None , start = None , stop = None , recursive = False , npartitions = None ) : path = uri_to_path ( path ) files = self . list ( path , ext = ext , start = start , stop = stop , recursive = recursive ) nfiles = len ( files ) self . nfiles = nfiles if spark and isinstance ( self . engine , spark ) : npartitions = min ( npartitions , nfiles ) if npartitions else nfiles rdd = self . engine . parallelize ( enumerate ( files ) , npartitions ) return rdd . map ( lambda kv : ( kv [ 0 ] , readlocal ( kv [ 1 ] ) , kv [ 1 ] ) ) else : return [ ( k , readlocal ( v ) , v ) for k , v in enumerate ( files ) ] | Sets up Spark RDD across files specified by dataPath on local filesystem . | 192 | 16 |
244,217 | def list ( path , filename = None , start = None , stop = None , recursive = False , directories = False ) : path = uri_to_path ( path ) if not filename and recursive : return listrecursive ( path ) if filename : if os . path . isdir ( path ) : path = os . path . join ( path , filename ) else : path = os . path . join ( os . path . dirname ( path ) , filename ) else : if os . path . isdir ( path ) and not directories : path = os . path . join ( path , "*" ) files = glob . glob ( path ) if not directories : files = [ fpath for fpath in files if not os . path . isdir ( fpath ) ] files . sort ( ) files = select ( files , start , stop ) return files | List files specified by dataPath . | 180 | 7 |
244,218 | def parse_query ( query , delim = '/' ) : key = '' prefix = '' postfix = '' parsed = urlparse ( query ) query = parsed . path . lstrip ( delim ) bucket = parsed . netloc if not parsed . scheme . lower ( ) in ( '' , "gs" , "s3" , "s3n" ) : raise ValueError ( "Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got: '%s'" % parsed . scheme ) storage = parsed . scheme . lower ( ) if not bucket . strip ( ) and query : toks = query . split ( delim , 1 ) bucket = toks [ 0 ] if len ( toks ) == 2 : key = toks [ 1 ] else : key = '' if not bucket . strip ( ) : raise ValueError ( "Could not parse bucket name from query string '%s'" % query ) tokens = query . split ( "*" ) n = len ( tokens ) if n == 0 : pass elif n == 1 : key = tokens [ 0 ] elif n == 2 : index = tokens [ 0 ] . rfind ( delim ) if index >= 0 : key = tokens [ 0 ] [ : ( index + 1 ) ] prefix = tokens [ 0 ] [ ( index + 1 ) : ] if len ( tokens [ 0 ] ) > ( index + 1 ) else '' else : prefix = tokens [ 0 ] postfix = tokens [ 1 ] else : raise ValueError ( "Only one wildcard ('*') allowed in query string, got: '%s'" % query ) return storage , bucket , key , prefix , postfix | Parse a boto query | 356 | 6 |
244,219 | def retrieve_keys ( bucket , key , prefix = '' , postfix = '' , delim = '/' , directories = False , recursive = False ) : if key and prefix : assert key . endswith ( delim ) key += prefix # check whether key is a directory if not key . endswith ( delim ) and key : # check for matching prefix if BotoClient . check_prefix ( bucket , key + delim , delim = delim ) : # found a directory key += delim listdelim = delim if not recursive else None results = bucket . list ( prefix = key , delimiter = listdelim ) if postfix : func = lambda k_ : BotoClient . filter_predicate ( k_ , postfix , inclusive = True ) return filter ( func , results ) elif not directories : func = lambda k_ : BotoClient . filter_predicate ( k_ , delim , inclusive = False ) return filter ( func , results ) else : return results | Retrieve keys from a bucket | 204 | 6 |
244,220 | def getfiles ( self , path , ext = None , start = None , stop = None , recursive = False ) : from . utils import connection_with_anon , connection_with_gs parse = BotoClient . parse_query ( path ) scheme = parse [ 0 ] bucket_name = parse [ 1 ] if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( self . credentials ) bucket = conn . get_bucket ( parse [ 1 ] ) elif scheme == 'gs' : conn = connection_with_gs ( bucket_name ) bucket = conn . get_bucket ( ) else : raise NotImplementedError ( "No file reader implementation for URL scheme " + scheme ) keys = BotoClient . retrieve_keys ( bucket , parse [ 2 ] , prefix = parse [ 3 ] , postfix = parse [ 4 ] , recursive = recursive ) keylist = [ key . name for key in keys ] if ext : if ext == 'tif' or ext == 'tiff' : keylist = [ keyname for keyname in keylist if keyname . endswith ( 'tif' ) ] keylist . append ( [ keyname for keyname in keylist if keyname . endswith ( 'tiff' ) ] ) else : keylist = [ keyname for keyname in keylist if keyname . endswith ( ext ) ] keylist . sort ( ) keylist = select ( keylist , start , stop ) return scheme , bucket . name , keylist | Get scheme bucket and keys for a set of files | 331 | 10 |
244,221 | def list ( self , dataPath , ext = None , start = None , stop = None , recursive = False ) : scheme , bucket_name , keylist = self . getfiles ( dataPath , ext = ext , start = start , stop = stop , recursive = recursive ) return [ "%s:///%s/%s" % ( scheme , bucket_name , key ) for key in keylist ] | List files from remote storage | 86 | 5 |
244,222 | def read ( self , path , ext = None , start = None , stop = None , recursive = False , npartitions = None ) : from . utils import connection_with_anon , connection_with_gs path = addextension ( path , ext ) scheme , bucket_name , keylist = self . getfiles ( path , start = start , stop = stop , recursive = recursive ) if not keylist : raise FileNotFoundError ( "No objects found for '%s'" % path ) credentials = self . credentials self . nfiles = len ( keylist ) if spark and isinstance ( self . engine , spark ) : def getsplit ( kvIter ) : if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( credentials ) bucket = conn . get_bucket ( bucket_name ) elif scheme == 'gs' : conn = boto . storage_uri ( bucket_name , 'gs' ) bucket = conn . get_bucket ( ) else : raise NotImplementedError ( "No file reader implementation for URL scheme " + scheme ) for kv in kvIter : idx , keyname = kv key = bucket . get_key ( keyname ) buf = key . get_contents_as_string ( ) yield idx , buf , keyname npartitions = min ( npartitions , self . nfiles ) if npartitions else self . nfiles rdd = self . engine . parallelize ( enumerate ( keylist ) , npartitions ) return rdd . mapPartitions ( getsplit ) else : if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( credentials ) bucket = conn . get_bucket ( bucket_name ) elif scheme == 'gs' : conn = connection_with_gs ( bucket_name ) bucket = conn . get_bucket ( ) else : raise NotImplementedError ( "No file reader implementation for URL scheme " + scheme ) def getsplit ( kv ) : idx , keyName = kv key = bucket . get_key ( keyName ) buf = key . get_contents_as_string ( ) return idx , buf , keyName return [ getsplit ( kv ) for kv in enumerate ( keylist ) ] | Sets up Spark RDD across S3 or GS objects specified by dataPath . | 508 | 17 |
244,223 | def getkeys ( self , path , filename = None , directories = False , recursive = False ) : from . utils import connection_with_anon , connection_with_gs parse = BotoClient . parse_query ( path ) scheme = parse [ 0 ] bucket_name = parse [ 1 ] key = parse [ 2 ] if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( self . credentials ) bucket = conn . get_bucket ( bucket_name ) elif scheme == 'gs' : conn = connection_with_gs ( bucket_name ) bucket = conn . get_bucket ( ) else : raise NotImplementedError ( "No file reader implementation for URL scheme " + scheme ) if filename : if not key . endswith ( "/" ) : if self . check_prefix ( bucket , key + "/" ) : key += "/" else : index = key . rfind ( "/" ) if index >= 0 : key = key [ : ( index + 1 ) ] else : key = "" key += filename keylist = BotoClient . retrieve_keys ( bucket , key , prefix = parse [ 3 ] , postfix = parse [ 4 ] , directories = directories , recursive = recursive ) return scheme , keylist | Get matching keys for a path | 274 | 6 |
244,224 | def getkey ( self , path , filename = None ) : scheme , keys = self . getkeys ( path , filename = filename ) try : key = next ( keys ) except StopIteration : raise FileNotFoundError ( "Could not find object for: '%s'" % path ) # we expect to only have a single key returned nextKey = None try : nextKey = next ( keys ) except StopIteration : pass if nextKey : raise ValueError ( "Found multiple keys for: '%s'" % path ) return scheme , key | Get single matching key for a path | 115 | 7 |
244,225 | def list ( self , path , filename = None , start = None , stop = None , recursive = False , directories = False ) : storageScheme , keys = self . getkeys ( path , filename = filename , directories = directories , recursive = recursive ) keys = [ storageScheme + ":///" + key . bucket . name + "/" + key . name for key in keys ] keys . sort ( ) keys = select ( keys , start , stop ) return keys | List objects specified by path . | 98 | 6 |
244,226 | def read ( self , path , filename = None , offset = None , size = - 1 ) : storageScheme , key = self . getkey ( path , filename = filename ) if offset or ( size > - 1 ) : if not offset : offset = 0 if size > - 1 : sizeStr = offset + size - 1 # range header is inclusive else : sizeStr = "" headers = { "Range" : "bytes=%d-%s" % ( offset , sizeStr ) } return key . get_contents_as_string ( headers = headers ) else : return key . get_contents_as_string ( ) | Read a file specified by path . | 135 | 7 |
244,227 | def open ( self , path , filename = None ) : scheme , key = self . getkey ( path , filename = filename ) return BotoReadFileHandle ( scheme , key ) | Open a file specified by path . | 38 | 7 |
244,228 | def check_path ( path , credentials = None ) : from thunder . readers import get_file_reader reader = get_file_reader ( path ) ( credentials = credentials ) existing = reader . list ( path , directories = True ) if existing : raise ValueError ( 'Path %s appears to already exist. Specify a new directory, ' 'or call with overwrite=True to overwrite.' % path ) | Check that specified output path does not already exist | 85 | 9 |
244,229 | def connection_with_anon ( credentials , anon = True ) : from boto . s3 . connection import S3Connection from boto . exception import NoAuthHandlerFound try : conn = S3Connection ( aws_access_key_id = credentials [ 'access' ] , aws_secret_access_key = credentials [ 'secret' ] ) return conn except NoAuthHandlerFound : if anon : conn = S3Connection ( anon = True ) return conn else : raise | Connect to S3 with automatic handling for anonymous access . | 105 | 11 |
244,230 | def activate ( self , path , isdirectory ) : from . utils import connection_with_anon , connection_with_gs parsed = BotoClient . parse_query ( path ) scheme = parsed [ 0 ] bucket_name = parsed [ 1 ] key = parsed [ 2 ] if scheme == 's3' or scheme == 's3n' : conn = connection_with_anon ( self . credentials ) bucket = conn . get_bucket ( bucket_name ) elif scheme == 'gs' : conn = connection_with_gs ( bucket_name ) bucket = conn . get_bucket ( ) else : raise NotImplementedError ( "No file reader implementation for URL scheme " + scheme ) if isdirectory and ( not key . endswith ( "/" ) ) : key += "/" self . _scheme = scheme self . _conn = conn self . _key = key self . _bucket = bucket self . _active = True | Set up a boto connection . | 204 | 7 |
244,231 | def topng ( images , path , prefix = "image" , overwrite = False , credentials = None ) : value_shape = images . value_shape if not len ( value_shape ) in [ 2 , 3 ] : raise ValueError ( "Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len ( value_shape ) ) from scipy . misc import imsave from io import BytesIO from thunder . writers import get_parallel_writer def tobuffer ( kv ) : key , img = kv fname = prefix + "-" + "%05d.png" % int ( key ) bytebuf = BytesIO ( ) imsave ( bytebuf , img , format = 'PNG' ) return fname , bytebuf . getvalue ( ) writer = get_parallel_writer ( path ) ( path , overwrite = overwrite , credentials = credentials ) images . foreach ( lambda x : writer . write ( tobuffer ( x ) ) ) | Write out PNG files for 2d image data . | 217 | 10 |
244,232 | def tobinary ( images , path , prefix = "image" , overwrite = False , credentials = None ) : from thunder . writers import get_parallel_writer def tobuffer ( kv ) : key , img = kv fname = prefix + "-" + "%05d.bin" % int ( key ) return fname , img . copy ( ) writer = get_parallel_writer ( path ) ( path , overwrite = overwrite , credentials = credentials ) images . foreach ( lambda x : writer . write ( tobuffer ( x ) ) ) config ( path , list ( images . value_shape ) , images . dtype , overwrite = overwrite ) | Write out images as binary files . | 139 | 7 |
244,233 | def yearInfo2yearDay ( yearInfo ) : yearInfo = int ( yearInfo ) res = 29 * 12 leap = False if yearInfo % 16 != 0 : leap = True res += 29 yearInfo //= 16 for i in range ( 12 + leap ) : if yearInfo % 2 == 1 : res += 1 yearInfo //= 2 return res | calculate the days in a lunar year from the lunar year s info | 74 | 15 |
244,234 | def cleanupFilename ( self , name ) : context = self . context id = '' name = name . replace ( '\\' , '/' ) # Fixup Windows filenames name = name . split ( '/' ) [ - 1 ] # Throw away any path part. for c in name : if c . isalnum ( ) or c in '._' : id += c # Raise condition here, but not a lot we can do about that if context . check_id ( id ) is None and getattr ( context , id , None ) is None : return id # Now make the id unique count = 1 while 1 : if count == 1 : sc = '' else : sc = str ( count ) newid = "copy{0:s}_of_{1:s}" . format ( sc , id ) if context . check_id ( newid ) is None and getattr ( context , newid , None ) is None : return newid count += 1 | Generate a unique id which doesn t match the system generated ids | 204 | 14 |
244,235 | def parse_data_slots ( value ) : value = unquote ( value ) if '>' in value : wrappers , children = value . split ( '>' , 1 ) else : wrappers = value children = '' if '*' in children : prepends , appends = children . split ( '*' , 1 ) else : prepends = children appends = '' wrappers = list ( filter ( bool , list ( map ( str . strip , wrappers . split ( ) ) ) ) ) prepends = list ( filter ( bool , list ( map ( str . strip , prepends . split ( ) ) ) ) ) appends = list ( filter ( bool , list ( map ( str . strip , appends . split ( ) ) ) ) ) return wrappers , prepends , appends | Parse data - slots value into slots used to wrap node prepend to node or append to node . | 172 | 21 |
244,236 | def cook_layout ( layout , ajax ) : # Fix XHTML layouts with CR[+LF] line endings layout = re . sub ( '\r' , '\n' , re . sub ( '\r\n' , '\n' , layout ) ) # Parse layout if isinstance ( layout , six . text_type ) : result = getHTMLSerializer ( [ layout . encode ( 'utf-8' ) ] , encoding = 'utf-8' ) else : result = getHTMLSerializer ( [ layout ] , encoding = 'utf-8' ) # Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[) if '<![CDATA[' in layout : result . serializer = html . tostring # Wrap all panels with a metal:fill-slot -tag: all_slots = [ ] for layoutPanelNode in slotsXPath ( result . tree ) : data_slots = layoutPanelNode . attrib [ 'data-slots' ] all_slots += wrap_append_prepend_slots ( layoutPanelNode , data_slots ) del layoutPanelNode . attrib [ 'data-slots' ] # When no slots are explicitly defined, try to inject the very default # slots if len ( all_slots ) == 0 : for node in result . tree . xpath ( '//*[@data-panel="content"]' ) : wrap_append_prepend_slots ( node , 'content > body header main * content-core' ) # Append implicit slots head = result . tree . getroot ( ) . find ( 'head' ) if not ajax and head is not None : for name in [ 'top_slot' , 'head_slot' , 'style_slot' , 'javascript_head_slot' ] : slot = etree . Element ( '{{{0:s}}}{1:s}' . format ( NSMAP [ 'metal' ] , name ) , nsmap = NSMAP ) slot . attrib [ 'define-slot' ] = name head . append ( slot ) template = TEMPLATE metal = 'xmlns:metal="http://namespaces.zope.org/metal"' return ( template % '' . join ( result ) ) . replace ( metal , '' ) | Return main_template compatible layout | 507 | 6 |
244,237 | def existing ( self ) : catalog = api . portal . get_tool ( 'portal_catalog' ) results = [ ] layout_path = self . _get_layout_path ( self . request . form . get ( 'layout' , '' ) ) for brain in catalog ( layout = layout_path ) : results . append ( { 'title' : brain . Title , 'url' : brain . getURL ( ) } ) return json . dumps ( { 'total' : len ( results ) , 'data' : results } ) | find existing content assigned to this layout | 115 | 7 |
244,238 | def load_reader_options ( ) : options = os . environ [ 'PANDOC_READER_OPTIONS' ] options = json . loads ( options , object_pairs_hook = OrderedDict ) return options | Retrieve Pandoc Reader options from the environment | 51 | 9 |
244,239 | def yaml_filter ( element , doc , tag = None , function = None , tags = None , strict_yaml = False ) : # Allow for either tag+function or a dict {tag: function} assert ( tag is None ) + ( tags is None ) == 1 # XOR if tags is None : tags = { tag : function } if type ( element ) == CodeBlock : for tag in tags : if tag in element . classes : function = tags [ tag ] if not strict_yaml : # Split YAML and data parts (separated by ... or ---) raw = re . split ( "^([.]{3,}|[-]{3,})$" , element . text , 1 , re . MULTILINE ) data = raw [ 2 ] if len ( raw ) > 2 else '' data = data . lstrip ( '\n' ) raw = raw [ 0 ] try : options = yaml . safe_load ( raw ) except yaml . scanner . ScannerError : debug ( "panflute: malformed YAML block" ) return if options is None : options = { } else : options = { } data = [ ] raw = re . split ( "^([.]{3,}|[-]{3,})$" , element . text , 0 , re . MULTILINE ) rawmode = True for chunk in raw : chunk = chunk . strip ( '\n' ) if not chunk : continue if rawmode : if chunk . startswith ( '---' ) : rawmode = False else : data . append ( chunk ) else : if chunk . startswith ( '---' ) or chunk . startswith ( '...' ) : rawmode = True else : try : options . update ( yaml . safe_load ( chunk ) ) except yaml . scanner . ScannerError : debug ( "panflute: malformed YAML block" ) return data = '\n' . join ( data ) return function ( options = options , data = data , element = element , doc = doc ) | Convenience function for parsing code blocks with YAML options | 447 | 13 |
244,240 | def _set_content ( self , value , oktypes ) : if value is None : value = [ ] self . _content = ListContainer ( * value , oktypes = oktypes , parent = self ) | Similar to content . setter but when there are no existing oktypes | 44 | 14 |
244,241 | def offset ( self , n ) : idx = self . index if idx is not None : sibling = idx + n container = self . container if 0 <= sibling < len ( container ) : return container [ sibling ] | Return a sibling element offset by n | 47 | 7 |
244,242 | def search ( self , term : str , case_sensitive : bool = False ) -> 'PrettyDir' : if case_sensitive : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if term in pattr . name ] ) else : term = term . lower ( ) return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if term in pattr . name . lower ( ) ] ) | Searches for names that match some pattern . | 99 | 10 |
244,243 | def properties ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if category_match ( pattr . category , AttrCategory . PROPERTY ) ] , ) | Returns all properties of the inspected object . | 52 | 8 |
244,244 | def methods ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if category_match ( pattr . category , AttrCategory . FUNCTION ) ] , ) | Returns all methods of the inspected object . | 52 | 8 |
244,245 | def public ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if not pattr . name . startswith ( '_' ) ] ) | Returns public attributes of the inspected object . | 48 | 8 |
244,246 | def own ( self ) -> 'PrettyDir' : return PrettyDir ( self . obj , [ pattr for pattr in self . pattrs if pattr . name in type ( self . obj ) . __dict__ or pattr . name in self . obj . __dict__ ] , ) | Returns attributes that are not inhterited from parent classes . | 63 | 12 |
244,247 | def get_oneline_doc ( self ) -> str : attr = self . attr_obj if self . display_group == AttrCategory . DESCRIPTOR : if isinstance ( attr , property ) : doc_list = [ '@property with getter' ] if attr . fset : doc_list . append ( SETTER ) if attr . fdel : doc_list . append ( DELETER ) else : doc_list = [ 'class %s' % attr . __class__ . __name__ ] if hasattr ( attr , '__get__' ) : doc_list . append ( GETTER ) if hasattr ( attr , '__set__' ) : doc_list . append ( SETTER ) if hasattr ( attr , '__delete__' ) : doc_list . append ( DELETER ) doc_list [ 0 ] = ' ' . join ( [ doc_list [ 0 ] , 'with' , doc_list . pop ( 1 ) ] ) if attr . __doc__ is not None : doc_list . append ( inspect . getdoc ( attr ) . split ( '\n' , 1 ) [ 0 ] ) return ', ' . join ( doc_list ) if hasattr ( attr , '__doc__' ) : doc = inspect . getdoc ( attr ) return doc . split ( '\n' , 1 ) [ 0 ] if doc else '' # default doc is None return '' | Doc doesn t necessarily mean doctring . It could be anything that should be put after the attr s name as an explanation . | 320 | 26 |
244,248 | def format_pattrs ( pattrs : List [ 'api.PrettyAttribute' ] ) -> str : output = [ ] pattrs . sort ( key = lambda x : ( _FORMATTER [ x . display_group ] . display_index , x . display_group , x . name , ) ) for display_group , grouped_pattrs in groupby ( pattrs , lambda x : x . display_group ) : output . append ( _FORMATTER [ display_group ] . formatter ( display_group , grouped_pattrs ) ) return '\n' . join ( output ) | Generates repr string given a list of pattrs . | 134 | 12 |
244,249 | def get_attr_from_dict ( inspected_obj : Any , attr_name : str ) -> Any : if inspect . isclass ( inspected_obj ) : obj_list = [ inspected_obj ] + list ( inspected_obj . __mro__ ) else : obj_list = [ inspected_obj ] + list ( inspected_obj . __class__ . __mro__ ) for obj in obj_list : if hasattr ( obj , '__dict__' ) and attr_name in obj . __dict__ : return obj . __dict__ [ attr_name ] # This happens when user-defined __dir__ returns something that's not # in any __dict__. See test_override_dir. # Returns attr_name so that it's treated as a normal property. return attr_name | Ensures we get descriptor object instead of its return value . | 178 | 13 |
244,250 | def attr_category_postprocess ( get_attr_category_func ) : @ functools . wraps ( get_attr_category_func ) def wrapped ( name : str , attr : Any , obj : Any ) -> Tuple [ AttrCategory , ... ] : category = get_attr_category_func ( name , attr , obj ) category = list ( category ) if isinstance ( category , tuple ) else [ category ] if is_slotted_attr ( obj , name ) : # Refactoring all tuples to lists is not easy # and pleasant. Maybe do this in future if necessary category . append ( AttrCategory . SLOT ) return tuple ( category ) return wrapped | Unifies attr_category to a tuple add AttrCategory . SLOT if needed . | 149 | 19 |
244,251 | def get_peak_mem ( ) : import resource rusage_denom = 1024. if sys . platform == 'darwin' : # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource . getrusage ( resource . RUSAGE_SELF ) . ru_maxrss / rusage_denom return mem | this returns peak memory use since process starts till the moment its called | 90 | 13 |
244,252 | def dfs_do_func_on_graph ( node , func , * args , * * kwargs ) : for _node in node . tree_iterator ( ) : func ( _node , * args , * * kwargs ) | invoke func on each node of the dr graph | 52 | 9 |
244,253 | def sparse_is_desireable ( lhs , rhs ) : return False if len ( lhs . shape ) == 1 : return False else : lhs_rows , lhs_cols = lhs . shape if len ( rhs . shape ) == 1 : rhs_rows = 1 rhs_cols = rhs . size else : rhs_rows , rhs_cols = rhs . shape result_size = lhs_rows * rhs_cols if sp . issparse ( lhs ) and sp . issparse ( rhs ) : return True elif sp . issparse ( lhs ) : lhs_zero_rows = lhs_rows - np . unique ( lhs . nonzero ( ) [ 0 ] ) . size rhs_zero_cols = np . all ( rhs == 0 , axis = 0 ) . sum ( ) elif sp . issparse ( rhs ) : lhs_zero_rows = np . all ( lhs == 0 , axis = 1 ) . sum ( ) rhs_zero_cols = rhs_cols - np . unique ( rhs . nonzero ( ) [ 1 ] ) . size else : lhs_zero_rows = np . all ( lhs == 0 , axis = 1 ) . sum ( ) rhs_zero_cols = np . all ( rhs == 0 , axis = 0 ) . sum ( ) num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return ( float ( num_zeros ) / float ( size ) ) >= 0.5 | Examines a pair of matrices and determines if the result of their multiplication should be sparse or not . | 463 | 21 |
244,254 | def convert_inputs_to_sparse_if_necessary ( lhs , rhs ) : if not sp . issparse ( lhs ) or not sp . issparse ( rhs ) : if sparse_is_desireable ( lhs , rhs ) : if not sp . issparse ( lhs ) : lhs = sp . csc_matrix ( lhs ) #print "converting lhs into sparse matrix" if not sp . issparse ( rhs ) : rhs = sp . csc_matrix ( rhs ) #print "converting rhs into sparse matrix" return lhs , rhs | This function checks to see if a sparse output is desireable given the inputs and if so casts the inputs to sparse in order to make it so . | 137 | 30 |
244,255 | def dr_wrt ( self , wrt , profiler = None ) : if wrt is self . x : jacs = [ ] for fvi , freevar in enumerate ( self . free_variables ) : tm = timer ( ) if isinstance ( freevar , ch . Select ) : new_jac = self . obj . dr_wrt ( freevar . a , profiler = profiler ) try : new_jac = new_jac [ : , freevar . idxs ] except : # non-csc sparse matrices may not support column-wise indexing new_jac = new_jac . tocsc ( ) [ : , freevar . idxs ] else : new_jac = self . obj . dr_wrt ( freevar , profiler = profiler ) pif ( 'dx wrt {} in {}sec, sparse: {}' . format ( freevar . short_name , tm ( ) , sp . issparse ( new_jac ) ) ) if self . _make_dense and sp . issparse ( new_jac ) : new_jac = new_jac . todense ( ) if self . _make_sparse and not sp . issparse ( new_jac ) : new_jac = sp . csc_matrix ( new_jac ) if new_jac is None : raise Exception ( 'Objective has no derivative wrt free variable {}. ' 'You should likely remove it.' . format ( fvi ) ) jacs . append ( new_jac ) tm = timer ( ) utils . dfs_do_func_on_graph ( self . obj , clear_cache_single ) pif ( 'dfs_do_func_on_graph in {}sec' . format ( tm ( ) ) ) tm = timer ( ) J = hstack ( jacs ) pif ( 'hstack in {}sec' . format ( tm ( ) ) ) return J | Loop over free variables and delete cache for the whole tree after finished each one | 420 | 15 |
244,256 | def J ( self ) : result = self . dr_wrt ( self . x , profiler = self . profiler ) . copy ( ) if self . profiler : self . profiler . harvest ( ) return np . atleast_2d ( result ) if not sp . issparse ( result ) else result | Compute Jacobian . Analyze dr graph first to disable unnecessary caching | 68 | 14 |
244,257 | def sid ( self ) : pnames = list ( self . terms ) + list ( self . dterms ) pnames . sort ( ) return ( self . __class__ , tuple ( [ ( k , id ( self . __dict__ [ k ] ) ) for k in pnames if k in self . __dict__ ] ) ) | Semantic id . | 71 | 4 |
244,258 | def compute_dr_wrt ( self , wrt ) : if wrt is self : # special base case return sp . eye ( self . x . size , self . x . size ) #return np.array([[1]]) return None | Default method for objects that just contain a number or ndarray | 53 | 13 |
244,259 | def get_ubuntu_release_from_sentry ( self , sentry_unit ) : msg = None cmd = 'lsb_release -cs' release , code = sentry_unit . run ( cmd ) if code == 0 : self . log . debug ( '{} lsb_release: {}' . format ( sentry_unit . info [ 'unit_name' ] , release ) ) else : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , release , code ) ) if release not in self . ubuntu_releases : msg = ( "Release ({}) not found in Ubuntu releases " "({})" . format ( release , self . ubuntu_releases ) ) return release , msg | Get Ubuntu release codename from sentry unit . | 177 | 10 |
244,260 | def validate_services ( self , commands ) : self . log . debug ( 'Checking status of system services...' ) # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. self . log . warn ( 'DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.' ) for k , v in six . iteritems ( commands ) : for cmd in v : output , code = k . run ( cmd ) self . log . debug ( '{} `{}` returned ' '{}' . format ( k . info [ 'unit_name' ] , cmd , code ) ) if code != 0 : return "command `{}` returned {}" . format ( cmd , str ( code ) ) return None | Validate that lists of commands succeed on service units . Can be used to verify system services are running on the corresponding service units . | 196 | 26 |
244,261 | def validate_services_by_name ( self , sentry_services ) : self . log . debug ( 'Checking status of system services...' ) # Point at which systemd became a thing systemd_switch = self . ubuntu_releases . index ( 'vivid' ) for sentry_unit , services_list in six . iteritems ( sentry_services ) : # Get lsb_release codename from unit release , ret = self . get_ubuntu_release_from_sentry ( sentry_unit ) if ret : return ret for service_name in services_list : if ( self . ubuntu_releases . index ( release ) >= systemd_switch or service_name in [ 'rabbitmq-server' , 'apache2' , 'memcached' ] ) : # init is systemd (or regular sysv) cmd = 'sudo service {} status' . format ( service_name ) output , code = sentry_unit . run ( cmd ) service_running = code == 0 elif self . ubuntu_releases . index ( release ) < systemd_switch : # init is upstart cmd = 'sudo status {}' . format ( service_name ) output , code = sentry_unit . run ( cmd ) service_running = code == 0 and "start/running" in output self . log . debug ( '{} `{}` returned ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code ) ) if not service_running : return u"command `{}` returned {} {}" . format ( cmd , output , str ( code ) ) return None | Validate system service status by service name automatically detecting init system based on Ubuntu release codename . | 357 | 19 |
244,262 | def _get_config ( self , unit , filename ) : file_contents = unit . file_contents ( filename ) # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 config = configparser . ConfigParser ( allow_no_value = True ) config . readfp ( io . StringIO ( file_contents ) ) return config | Get a ConfigParser object for parsing a unit s config file . | 107 | 13 |
244,263 | def validate_config_data ( self , sentry_unit , config_file , section , expected ) : self . log . debug ( 'Validating config file data ({} in {} on {})' '...' . format ( section , config_file , sentry_unit . info [ 'unit_name' ] ) ) config = self . _get_config ( sentry_unit , config_file ) if section != 'DEFAULT' and not config . has_section ( section ) : return "section [{}] does not exist" . format ( section ) for k in expected . keys ( ) : if not config . has_option ( section , k ) : return "section [{}] is missing option {}" . format ( section , k ) actual = config . get ( section , k ) v = expected [ k ] if ( isinstance ( v , six . string_types ) or isinstance ( v , bool ) or isinstance ( v , six . integer_types ) ) : # handle explicit values if actual != v : return "section [{}] {}:{} != expected {}:{}" . format ( section , k , actual , k , expected [ k ] ) # handle function pointers, such as not_null or valid_ip elif not v ( actual ) : return "section [{}] {}:{} != expected {}:{}" . format ( section , k , actual , k , expected [ k ] ) return None | Validate config file data . | 308 | 6 |
244,264 | def _validate_dict_data ( self , expected , actual ) : self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) self . log . debug ( 'expected: {}' . format ( repr ( expected ) ) ) for k , v in six . iteritems ( expected ) : if k in actual : if ( isinstance ( v , six . string_types ) or isinstance ( v , bool ) or isinstance ( v , six . integer_types ) ) : # handle explicit values if v != actual [ k ] : return "{}:{}" . format ( k , actual [ k ] ) # handle function pointers, such as not_null or valid_ip elif not v ( actual [ k ] ) : return "{}:{}" . format ( k , actual [ k ] ) else : return "key '{}' does not exist" . format ( k ) return None | Validate dictionary data . | 196 | 5 |
244,265 | def validate_relation_data ( self , sentry_unit , relation , expected ) : actual = sentry_unit . relation ( relation [ 0 ] , relation [ 1 ] ) return self . _validate_dict_data ( expected , actual ) | Validate actual relation data based on expected relation data . | 53 | 11 |
244,266 | def _validate_list_data ( self , expected , actual ) : for e in expected : if e not in actual : return "expected item {} not found in actual list" . format ( e ) return None | Compare expected list vs actual list data . | 45 | 8 |
244,267 | def service_restarted ( self , sentry_unit , service , filename , pgrep_full = None , sleep_time = 20 ) : # /!\ DEPRECATION WARNING (beisner): # This method is prone to races in that no before-time is known. # Use validate_service_config_changed instead. # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 self . log . warn ( 'DEPRECATION WARNING: use ' 'validate_service_config_changed instead of ' 'service_restarted due to known races.' ) time . sleep ( sleep_time ) if ( self . _get_proc_start_time ( sentry_unit , service , pgrep_full ) >= self . _get_file_mtime ( sentry_unit , filename ) ) : return True else : return False | Check if service was restarted . | 216 | 7 |
244,268 | def service_restarted_since ( self , sentry_unit , mtime , service , pgrep_full = None , sleep_time = 20 , retry_count = 30 , retry_sleep_time = 10 ) : # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 unit_name = sentry_unit . info [ 'unit_name' ] self . log . debug ( 'Checking that %s service restarted since %s on ' '%s' % ( service , mtime , unit_name ) ) time . sleep ( sleep_time ) proc_start_time = None tries = 0 while tries <= retry_count and not proc_start_time : try : proc_start_time = self . _get_proc_start_time ( sentry_unit , service , pgrep_full ) self . log . debug ( 'Attempt {} to get {} proc start time on {} ' 'OK' . format ( tries , service , unit_name ) ) except IOError as e : # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self . log . debug ( 'Attempt {} to get {} proc start time on {} ' 'failed\n{}' . format ( tries , service , unit_name , e ) ) time . sleep ( retry_sleep_time ) tries += 1 if not proc_start_time : self . log . warn ( 'No proc start time found, assuming service did ' 'not start' ) return False if proc_start_time >= mtime : self . log . debug ( 'Proc start time is newer than provided mtime' '(%s >= %s) on %s (OK)' % ( proc_start_time , mtime , unit_name ) ) return True else : self . log . warn ( 'Proc start time (%s) is older than provided mtime ' '(%s) on %s, service did not ' 'restart' % ( proc_start_time , mtime , unit_name ) ) return False | Check if service was been started after a given time . | 493 | 11 |
244,269 | def config_updated_since ( self , sentry_unit , filename , mtime , sleep_time = 20 , retry_count = 30 , retry_sleep_time = 10 ) : unit_name = sentry_unit . info [ 'unit_name' ] self . log . debug ( 'Checking that %s updated since %s on ' '%s' % ( filename , mtime , unit_name ) ) time . sleep ( sleep_time ) file_mtime = None tries = 0 while tries <= retry_count and not file_mtime : try : file_mtime = self . _get_file_mtime ( sentry_unit , filename ) self . log . debug ( 'Attempt {} to get {} file mtime on {} ' 'OK' . format ( tries , filename , unit_name ) ) except IOError as e : # NOTE(beisner) - race avoidance, file may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self . log . debug ( 'Attempt {} to get {} file mtime on {} ' 'failed\n{}' . format ( tries , filename , unit_name , e ) ) time . sleep ( retry_sleep_time ) tries += 1 if not file_mtime : self . log . warn ( 'Could not determine file mtime, assuming ' 'file does not exist' ) return False if file_mtime >= mtime : self . log . debug ( 'File mtime is newer than provided mtime ' '(%s >= %s) on %s (OK)' % ( file_mtime , mtime , unit_name ) ) return True else : self . log . warn ( 'File mtime is older than provided mtime' '(%s < on %s) on %s' % ( file_mtime , mtime , unit_name ) ) return False | Check if file was modified after a given time . | 413 | 10 |
244,270 | def validate_service_config_changed ( self , sentry_unit , mtime , service , filename , pgrep_full = None , sleep_time = 20 , retry_count = 30 , retry_sleep_time = 10 ) : # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 service_restart = self . service_restarted_since ( sentry_unit , mtime , service , pgrep_full = pgrep_full , sleep_time = sleep_time , retry_count = retry_count , retry_sleep_time = retry_sleep_time ) config_update = self . config_updated_since ( sentry_unit , filename , mtime , sleep_time = sleep_time , retry_count = retry_count , retry_sleep_time = retry_sleep_time ) return service_restart and config_update | Check service and file were updated after mtime | 231 | 9 |
244,271 | def file_to_url ( self , file_rel_path ) : _abs_path = os . path . abspath ( file_rel_path ) return urlparse . urlparse ( _abs_path , scheme = 'file' ) . geturl ( ) | Convert a relative file path to a file URL . | 57 | 11 |
244,272 | def check_commands_on_units ( self , commands , sentry_units ) : self . log . debug ( 'Checking exit codes for {} commands on {} ' 'sentry units...' . format ( len ( commands ) , len ( sentry_units ) ) ) for sentry_unit in sentry_units : for cmd in commands : output , code = sentry_unit . run ( cmd ) if code == 0 : self . log . debug ( '{} `{}` returned {} ' '(OK)' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code ) ) else : return ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) return None | Check that all commands in a list exit zero on all sentry units in a list . | 179 | 18 |
244,273 | def get_unit_process_ids ( self , unit_processes , expect_success = True , pgrep_full = False ) : pid_dict = { } for sentry_unit , process_list in six . iteritems ( unit_processes ) : pid_dict [ sentry_unit ] = { } for process in process_list : pids = self . get_process_id_list ( sentry_unit , process , expect_success = expect_success , pgrep_full = pgrep_full ) pid_dict [ sentry_unit ] . update ( { process : pids } ) return pid_dict | Construct a dict containing unit sentries process names and process IDs . | 137 | 13 |
244,274 | def validate_unit_process_ids ( self , expected , actual ) : self . log . debug ( 'Checking units for running processes...' ) self . log . debug ( 'Expected PIDs: {}' . format ( expected ) ) self . log . debug ( 'Actual PIDs: {}' . format ( actual ) ) if len ( actual ) != len ( expected ) : return ( 'Unit count mismatch. expected, actual: {}, ' '{} ' . format ( len ( expected ) , len ( actual ) ) ) for ( e_sentry , e_proc_names ) in six . iteritems ( expected ) : e_sentry_name = e_sentry . info [ 'unit_name' ] if e_sentry in actual . keys ( ) : a_proc_names = actual [ e_sentry ] else : return ( 'Expected sentry ({}) not found in actual dict data.' '{}' . format ( e_sentry_name , e_sentry ) ) if len ( e_proc_names . keys ( ) ) != len ( a_proc_names . keys ( ) ) : return ( 'Process name count mismatch. expected, actual: {}, ' '{}' . format ( len ( expected ) , len ( actual ) ) ) for ( e_proc_name , e_pids ) , ( a_proc_name , a_pids ) in zip ( e_proc_names . items ( ) , a_proc_names . items ( ) ) : if e_proc_name != a_proc_name : return ( 'Process name mismatch. expected, actual: {}, ' '{}' . format ( e_proc_name , a_proc_name ) ) a_pids_length = len ( a_pids ) fail_msg = ( 'PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})' . format ( e_sentry_name , e_proc_name , e_pids , a_pids_length , a_pids ) ) # If expected is a list, ensure at least one PID quantity match if isinstance ( e_pids , list ) and a_pids_length not in e_pids : return fail_msg # If expected is not bool and not list, # ensure PID quantities match elif not isinstance ( e_pids , bool ) and not isinstance ( e_pids , list ) and a_pids_length != e_pids : return fail_msg # If expected is bool True, ensure 1 or more PIDs exist elif isinstance ( e_pids , bool ) and e_pids is True and a_pids_length < 1 : return fail_msg # If expected is bool False, ensure 0 PIDs exist elif isinstance ( e_pids , bool ) and e_pids is False and a_pids_length != 0 : return fail_msg else : self . log . debug ( 'PID check OK: {} {} {}: ' '{}' . format ( e_sentry_name , e_proc_name , e_pids , a_pids ) ) return None | Validate process id quantities for services on units . | 695 | 10 |
244,275 | def validate_list_of_identical_dicts ( self , list_of_dicts ) : hashes = [ ] for _dict in list_of_dicts : hashes . append ( hash ( frozenset ( _dict . items ( ) ) ) ) self . log . debug ( 'Hashes: {}' . format ( hashes ) ) if len ( set ( hashes ) ) == 1 : self . log . debug ( 'Dicts within list are identical' ) else : return 'Dicts within list are not identical' return None | Check that all dicts within a list are identical . | 115 | 11 |
244,276 | def get_unit_hostnames ( self , units ) : host_names = { } for unit in units : host_names [ unit . info [ 'unit_name' ] ] = str ( unit . file_contents ( '/etc/hostname' ) . strip ( ) ) self . log . debug ( 'Unit host names: {}' . format ( host_names ) ) return host_names | Return a dict of juju unit names to hostnames . | 86 | 12 |
244,277 | def run_cmd_unit ( self , sentry_unit , cmd ) : output , code = sentry_unit . run ( cmd ) if code == 0 : self . log . debug ( '{} `{}` command returned {} ' '(OK)' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code ) ) else : msg = ( '{} `{}` command returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return str ( output ) , code | Run a command on a unit return the output and exit code . | 143 | 13 |
244,278 | def file_exists_on_unit ( self , sentry_unit , file_name ) : try : sentry_unit . file_stat ( file_name ) return True except IOError : return False except Exception as e : msg = 'Error checking file {}: {}' . format ( file_name , e ) amulet . raise_status ( amulet . FAIL , msg = msg ) | Check if a file exists on a unit . | 84 | 9 |
244,279 | def file_contents_safe ( self , sentry_unit , file_name , max_wait = 60 , fatal = False ) : unit_name = sentry_unit . info [ 'unit_name' ] file_contents = False tries = 0 while not file_contents and tries < ( max_wait / 4 ) : try : file_contents = sentry_unit . file_contents ( file_name ) except IOError : self . log . debug ( 'Attempt {} to open file {} from {} ' 'failed' . format ( tries , file_name , unit_name ) ) time . sleep ( 4 ) tries += 1 if file_contents : return file_contents elif not fatal : return None elif fatal : msg = 'Failed to get file contents from unit.' amulet . raise_status ( amulet . FAIL , msg ) | Get file contents from a sentry unit . Wrap amulet file_contents with retry logic to address races where a file checks as existing but no longer exists by the time file_contents is called . Return None if file not found . Optionally raise if fatal is True . | 187 | 57 |
244,280 | def port_knock_tcp ( self , host = "localhost" , port = 22 , timeout = 15 ) : # Resolve host name if possible try : connect_host = socket . gethostbyname ( host ) host_human = "{} ({})" . format ( connect_host , host ) except socket . error as e : self . log . warn ( 'Unable to resolve address: ' '{} ({}) Trying anyway!' . format ( host , e ) ) connect_host = host host_human = connect_host # Attempt socket connection try : knock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) knock . settimeout ( timeout ) knock . connect ( ( connect_host , port ) ) knock . close ( ) self . log . debug ( 'Socket connect OK for host ' '{} on port {}.' . format ( host_human , port ) ) return True except socket . error as e : self . log . debug ( 'Socket connect FAIL for' ' {} port {} ({})' . format ( host_human , port , e ) ) return False | Open a TCP socket to check for a listening sevice on a host . | 239 | 15 |
244,281 | def port_knock_units ( self , sentry_units , port = 22 , timeout = 15 , expect_success = True ) : for unit in sentry_units : host = unit . info [ 'public-address' ] connected = self . port_knock_tcp ( host , port , timeout ) if not connected and expect_success : return 'Socket connect failed.' elif connected and not expect_success : return 'Socket connected unexpectedly.' | Open a TCP socket to check for a listening sevice on each listed juju unit . | 97 | 18 |
244,282 | def wait_on_action ( self , action_id , _check_output = subprocess . check_output ) : data = amulet . actions . get_action_output ( action_id , full_output = True ) return data . get ( u"status" ) == "completed" | Wait for a given action returning if it completed or not . | 63 | 12 |
244,283 | def status_get ( self , unit ) : raw_status , return_code = unit . run ( "status-get --format=json --include-data" ) if return_code != 0 : return ( "unknown" , "" ) status = json . loads ( raw_status ) return ( status [ "status" ] , status [ "message" ] ) | Return the current service status of this unit . | 77 | 9 |
244,284 | def execute ( self , sql ) : cursor = self . connection . cursor ( ) try : cursor . execute ( sql ) finally : cursor . close ( ) | Execute arbitary SQL against the database . | 32 | 9 |
244,285 | def select ( self , sql ) : cursor = self . connection . cursor ( ) try : cursor . execute ( sql ) results = [ list ( i ) for i in cursor . fetchall ( ) ] finally : cursor . close ( ) return results | Execute arbitrary SQL select query against the database and return the results . | 51 | 14 |
244,286 | def migrate_passwords_to_leader_storage ( self , excludes = None ) : if not is_leader ( ) : log ( "Skipping password migration as not the lead unit" , level = DEBUG ) return dirname = os . path . dirname ( self . root_passwd_file_template ) path = os . path . join ( dirname , '*.passwd' ) for f in glob . glob ( path ) : if excludes and f in excludes : log ( "Excluding %s from leader storage migration" % ( f ) , level = DEBUG ) continue key = os . path . basename ( f ) with open ( f , 'r' ) as passwd : _value = passwd . read ( ) . strip ( ) try : leader_set ( settings = { key : _value } ) if self . delete_ondisk_passwd_file : os . unlink ( f ) except ValueError : # NOTE cluster relation not yet ready - skip for now pass | Migrate any passwords storage on disk to leader storage . | 212 | 11 |
244,287 | def get_mysql_password_on_disk ( self , username = None , password = None ) : if username : template = self . user_passwd_file_template passwd_file = template . format ( username ) else : passwd_file = self . root_passwd_file_template _password = None if os . path . exists ( passwd_file ) : log ( "Using existing password file '%s'" % passwd_file , level = DEBUG ) with open ( passwd_file , 'r' ) as passwd : _password = passwd . read ( ) . strip ( ) else : log ( "Generating new password file '%s'" % passwd_file , level = DEBUG ) if not os . path . isdir ( os . path . dirname ( passwd_file ) ) : # NOTE: need to ensure this is not mysql root dir (which needs # to be mysql readable) mkdir ( os . path . dirname ( passwd_file ) , owner = 'root' , group = 'root' , perms = 0o770 ) # Force permissions - for some reason the chmod in makedirs # fails os . chmod ( os . path . dirname ( passwd_file ) , 0o770 ) _password = password or pwgen ( length = 32 ) write_file ( passwd_file , _password , owner = 'root' , group = 'root' , perms = 0o660 ) return _password | Retrieve generate or store a mysql password for the provided username on disk . | 321 | 15 |
244,288 | def passwd_keys ( self , username ) : keys = [ ] if username == 'mysql' : log ( "Bad username '%s'" % ( username ) , level = WARNING ) if username : # IMPORTANT: *newer* format must be returned first keys . append ( 'mysql-%s.passwd' % ( username ) ) keys . append ( '%s.passwd' % ( username ) ) else : keys . append ( 'mysql.passwd' ) for key in keys : yield key | Generator to return keys used to store passwords in peer store . | 114 | 13 |
244,289 | def get_mysql_password ( self , username = None , password = None ) : excludes = [ ] # First check peer relation. try : for key in self . passwd_keys ( username ) : _password = leader_get ( key ) if _password : break # If root password available don't update peer relation from local if _password and not username : excludes . append ( self . root_passwd_file_template ) except ValueError : # cluster relation is not yet started; use on-disk _password = None # If none available, generate new one if not _password : _password = self . get_mysql_password_on_disk ( username , password ) # Put on wire if required if self . migrate_passwd_to_leader_storage : self . migrate_passwords_to_leader_storage ( excludes = excludes ) return _password | Retrieve generate or store a mysql password for the provided username using peer relation cluster . | 185 | 17 |
244,290 | def set_mysql_password ( self , username , password ) : if username is None : username = 'root' # get root password via leader-get, it may be that in the past (when # changes to root-password were not supported) the user changed the # password, so leader-get is more reliable source than # config.previous('root-password'). rel_username = None if username == 'root' else username cur_passwd = self . get_mysql_password ( rel_username ) # password that needs to be set new_passwd = password # update password for all users (e.g. root@localhost, root@::1, etc) try : self . connect ( user = username , password = cur_passwd ) cursor = self . connection . cursor ( ) except MySQLdb . OperationalError as ex : raise MySQLSetPasswordError ( ( 'Cannot connect using password in ' 'leader settings (%s)' ) % ex , ex ) try : # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account # fails when using SET PASSWORD so using UPDATE against the # mysql.user table is needed, but changes to this table are not # replicated across the cluster, so this update needs to run in # all the nodes. More info at # http://galeracluster.com/documentation-webpages/userchanges.html release = CompareHostReleases ( lsb_release ( ) [ 'DISTRIB_CODENAME' ] ) if release < 'bionic' : SQL_UPDATE_PASSWD = ( "UPDATE mysql.user SET password = " "PASSWORD( %s ) WHERE user = %s;" ) else : # PXC 5.7 (introduced in Bionic) uses authentication_string SQL_UPDATE_PASSWD = ( "UPDATE mysql.user SET " "authentication_string = " "PASSWORD( %s ) WHERE user = %s;" ) cursor . execute ( SQL_UPDATE_PASSWD , ( new_passwd , username ) ) cursor . execute ( 'FLUSH PRIVILEGES;' ) self . connection . commit ( ) except MySQLdb . OperationalError as ex : raise MySQLSetPasswordError ( 'Cannot update password: %s' % str ( ex ) , ex ) finally : cursor . close ( ) # check the password was changed try : self . connect ( user = username , password = new_passwd ) self . execute ( 'select 1;' ) except MySQLdb . OperationalError as ex : raise MySQLSetPasswordError ( ( 'Cannot connect using new password: ' '%s' ) % str ( ex ) , ex ) if not is_leader ( ) : log ( 'Only the leader can set a new password in the relation' , level = DEBUG ) return for key in self . passwd_keys ( rel_username ) : _password = leader_get ( key ) if _password : log ( 'Updating password for %s (%s)' % ( key , rel_username ) , level = DEBUG ) leader_set ( settings = { key : new_passwd } ) | Update a mysql password for the provided username changing the leader settings | 682 | 12 |
244,291 | def get_allowed_units ( self , database , username , relation_id = None ) : self . connect ( password = self . get_mysql_root_password ( ) ) allowed_units = set ( ) for unit in related_units ( relation_id ) : settings = relation_get ( rid = relation_id , unit = unit ) # First check for setting with prefix, then without for attr in [ "%s_hostname" % ( database ) , 'hostname' ] : hosts = settings . get ( attr , None ) if hosts : break if hosts : # hostname can be json-encoded list of hostnames try : hosts = json . loads ( hosts ) except ValueError : hosts = [ hosts ] else : hosts = [ settings [ 'private-address' ] ] if hosts : for host in hosts : host = self . normalize_address ( host ) if self . grant_exists ( database , username , host ) : log ( "Grant exists for host '%s' on db '%s'" % ( host , database ) , level = DEBUG ) if unit not in allowed_units : allowed_units . add ( unit ) else : log ( "Grant does NOT exist for host '%s' on db '%s'" % ( host , database ) , level = DEBUG ) else : log ( "No hosts found for grant check" , level = INFO ) return allowed_units | Get list of units with access grants for database with username . | 301 | 12 |
244,292 | def configure_db ( self , hostname , database , username , admin = False ) : self . connect ( password = self . get_mysql_root_password ( ) ) if not self . database_exists ( database ) : self . create_database ( database ) remote_ip = self . normalize_address ( hostname ) password = self . get_mysql_password ( username ) if not self . grant_exists ( database , username , remote_ip ) : if not admin : self . create_grant ( database , username , remote_ip , password ) else : self . create_admin_grant ( username , remote_ip , password ) self . flush_priviledges ( ) return password | Configure access to database for username from hostname . | 155 | 11 |
244,293 | def human_to_bytes ( self , human ) : num_re = re . compile ( '^[0-9]+$' ) if num_re . match ( human ) : return human factors = { 'K' : 1024 , 'M' : 1048576 , 'G' : 1073741824 , 'T' : 1099511627776 } modifier = human [ - 1 ] if modifier in factors : return int ( human [ : - 1 ] ) * factors [ modifier ] if modifier == '%' : total_ram = self . human_to_bytes ( self . get_mem_total ( ) ) if self . is_32bit_system ( ) and total_ram > self . sys_mem_limit ( ) : total_ram = self . sys_mem_limit ( ) factor = int ( human [ : - 1 ] ) * 0.01 pctram = total_ram * factor return int ( pctram - ( pctram % self . DEFAULT_PAGE_SIZE ) ) raise ValueError ( "Can only convert K,M,G, or T" ) | Convert human readable configuration options to bytes . | 238 | 9 |
244,294 | def sys_mem_limit ( self ) : if platform . machine ( ) in [ 'armv7l' ] : _mem_limit = self . human_to_bytes ( '2700M' ) # experimentally determined else : # Limit for x86 based 32bit systems _mem_limit = self . human_to_bytes ( '4G' ) return _mem_limit | Determine the default memory limit for the current service unit . | 83 | 13 |
244,295 | def get_mem_total ( self ) : with open ( '/proc/meminfo' ) as meminfo_file : for line in meminfo_file : key , mem = line . split ( ':' , 2 ) if key == 'MemTotal' : mtot , modifier = mem . strip ( ) . split ( ' ' ) return '%s%s' % ( mtot , modifier [ 0 ] . upper ( ) ) | Calculate the total memory in the current service unit . | 92 | 12 |
244,296 | def parse_config ( self ) : config = config_get ( ) mysql_config = { } if 'max-connections' in config : mysql_config [ 'max_connections' ] = config [ 'max-connections' ] if 'wait-timeout' in config : mysql_config [ 'wait_timeout' ] = config [ 'wait-timeout' ] if 'innodb-flush-log-at-trx-commit' in config : mysql_config [ 'innodb_flush_log_at_trx_commit' ] = config [ 'innodb-flush-log-at-trx-commit' ] elif 'tuning-level' in config : mysql_config [ 'innodb_flush_log_at_trx_commit' ] = self . INNODB_FLUSH_CONFIG_VALUES . get ( config [ 'tuning-level' ] , 1 ) if ( 'innodb-change-buffering' in config and config [ 'innodb-change-buffering' ] in self . INNODB_VALID_BUFFERING_VALUES ) : mysql_config [ 'innodb_change_buffering' ] = config [ 'innodb-change-buffering' ] if 'innodb-io-capacity' in config : mysql_config [ 'innodb_io_capacity' ] = config [ 'innodb-io-capacity' ] # Set a sane default key_buffer size mysql_config [ 'key_buffer' ] = self . human_to_bytes ( '32M' ) total_memory = self . human_to_bytes ( self . get_mem_total ( ) ) dataset_bytes = config . get ( 'dataset-size' , None ) innodb_buffer_pool_size = config . get ( 'innodb-buffer-pool-size' , None ) if innodb_buffer_pool_size : innodb_buffer_pool_size = self . human_to_bytes ( innodb_buffer_pool_size ) elif dataset_bytes : log ( "Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead" , level = "WARN" ) innodb_buffer_pool_size = self . human_to_bytes ( dataset_bytes ) else : # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB # to ensure that deployments in containers # without constraints don't try to consume # silly amounts of memory. innodb_buffer_pool_size = min ( int ( total_memory * self . DEFAULT_INNODB_BUFFER_FACTOR ) , self . DEFAULT_INNODB_BUFFER_SIZE_MAX ) if innodb_buffer_pool_size > total_memory : log ( "innodb_buffer_pool_size; {} is greater than system available memory:{}" . format ( innodb_buffer_pool_size , total_memory ) , level = 'WARN' ) mysql_config [ 'innodb_buffer_pool_size' ] = innodb_buffer_pool_size return mysql_config | Parse charm configuration and calculate values for config files . | 711 | 11 |
244,297 | def create_loopback ( file_path ) : file_path = os . path . abspath ( file_path ) check_call ( [ 'losetup' , '--find' , file_path ] ) for d , f in six . iteritems ( loopback_devices ( ) ) : if f == file_path : return d | Create a loopback device for a given backing file . | 74 | 11 |
244,298 | def ensure_loopback_device ( path , size ) : for d , f in six . iteritems ( loopback_devices ( ) ) : if f == path : return d if not os . path . exists ( path ) : cmd = [ 'truncate' , '--size' , size , path ] check_call ( cmd ) return create_loopback ( path ) | Ensure a loopback device exists for a given backing file path and size . If it a loopback device is not mapped to file a new one will be created . | 81 | 34 |
244,299 | def leader_get ( attribute = None , rid = None ) : migration_key = '__leader_get_migrated_settings__' if not is_leader ( ) : return _leader_get ( attribute = attribute ) settings_migrated = False leader_settings = _leader_get ( attribute = attribute ) previously_migrated = _leader_get ( attribute = migration_key ) if previously_migrated : migrated = set ( json . loads ( previously_migrated ) ) else : migrated = set ( [ ] ) try : if migration_key in leader_settings : del leader_settings [ migration_key ] except TypeError : pass if attribute : if attribute in migrated : return leader_settings # If attribute not present in leader db, check if this unit has set # the attribute in the peer relation if not leader_settings : peer_setting = _relation_get ( attribute = attribute , unit = local_unit ( ) , rid = rid ) if peer_setting : leader_set ( settings = { attribute : peer_setting } ) leader_settings = peer_setting if leader_settings : settings_migrated = True migrated . add ( attribute ) else : r_settings = _relation_get ( unit = local_unit ( ) , rid = rid ) if r_settings : for key in set ( r_settings . keys ( ) ) . difference ( migrated ) : # Leader setting wins if not leader_settings . get ( key ) : leader_settings [ key ] = r_settings [ key ] settings_migrated = True migrated . add ( key ) if settings_migrated : leader_set ( * * leader_settings ) if migrated and settings_migrated : migrated = json . dumps ( list ( migrated ) ) leader_set ( settings = { migration_key : migrated } ) return leader_settings | Wrapper to ensure that settings are migrated from the peer relation . | 385 | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.