query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Checks if the venue is open at the time of day given a venue id .
def is_open ( self , id , time , day ) : details = self . get_details ( id ) has_data = False for obj in details [ "objects" ] : hours = obj [ "open_hours" ] [ day ] if hours : has_data = True for interval in hours : interval = interval . replace ( ' ' , '' ) . split ( '-' ) open_time = interval [ 0 ] close_time = interval [ 1 ] if open_time < time < close_time : return True if has_data : return False else : return None
11,600
https://github.com/Locu-Unofficial/locu-python/blob/fcdf136b68333ab7055e623591801dd35df3bc45/locu/api.py#L316-L356
[ "def", "_simulate_unitary", "(", "self", ",", "op", ":", "ops", ".", "Operation", ",", "data", ":", "_StateAndBuffer", ",", "indices", ":", "List", "[", "int", "]", ")", "->", "None", ":", "result", "=", "protocols", ".", "apply_unitary", "(", "op", ",", "args", "=", "protocols", ".", "ApplyUnitaryArgs", "(", "data", ".", "state", ",", "data", ".", "buffer", ",", "indices", ")", ")", "if", "result", "is", "data", ".", "buffer", ":", "data", ".", "buffer", "=", "data", ".", "state", "data", ".", "state", "=", "result" ]
Locu Menu Item Search API Call Wrapper
def search ( self , name = None , category = None , description = None , price = None , price__gt = None , price__gte = None , price__lt = None , price__lte = None , location = ( None , None ) , radius = None , tl_coord = ( None , None ) , br_coord = ( None , None ) , country = None , locality = None , region = None , postal_code = None , street_address = None , website_url = None ) : params = self . _get_params ( name = name , description = description , price = price , price__gt = price__gt , price__gte = price__gte , price__lt = price__lt , price__lte = price__lte , location = location , radius = radius , tl_coord = tl_coord , br_coord = br_coord , country = country , locality = locality , region = region , postal_code = postal_code , street_address = street_address , website_url = website_url ) return self . _create_query ( 'search' , params )
11,601
https://github.com/Locu-Unofficial/locu-python/blob/fcdf136b68333ab7055e623591801dd35df3bc45/locu/api.py#L366-L428
[ "def", "clear_cache", "(", "dataset_name", "=", "None", ")", ":", "dr", "=", "data_resources", "[", "dataset_name", "]", "if", "'dirs'", "in", "dr", ":", "for", "dirs", ",", "files", "in", "zip", "(", "dr", "[", "'dirs'", "]", ",", "dr", "[", "'files'", "]", ")", ":", "for", "dir", ",", "file", "in", "zip", "(", "dirs", ",", "files", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "dataset_name", ",", "dir", ",", "file", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logging", ".", "info", "(", "\"clear_cache: removing \"", "+", "path", ")", "os", ".", "unlink", "(", "path", ")", "for", "dir", "in", "dirs", ":", "path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "dataset_name", ",", "dir", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logging", ".", "info", "(", "\"clear_cache: remove directory \"", "+", "path", ")", "os", ".", "rmdir", "(", "path", ")", "else", ":", "for", "file_list", "in", "dr", "[", "'files'", "]", ":", "for", "file", "in", "file_list", ":", "path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "dataset_name", ",", "file", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logging", ".", "info", "(", "\"clear_cache: remove \"", "+", "path", ")", "os", ".", "unlink", "(", "path", ")" ]
stack - docs is a CLI for building LSST Stack documentation such as pipelines . lsst . io .
def main ( ctx , root_project_dir , verbose ) : root_project_dir = discover_conf_py_directory ( root_project_dir ) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx . obj = { 'root_project_dir' : root_project_dir , 'verbose' : verbose } # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose : log_level = logging . DEBUG else : log_level = logging . INFO logger = logging . getLogger ( 'documenteer' ) logger . addHandler ( logging . StreamHandler ( ) ) logger . setLevel ( log_level )
11,602
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/stackcli.py#L38-L77
[ "def", "GetMemShares", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemShares", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Show help for any command .
def help ( ctx , topic , * * kw ) : # The help command implementation is taken from # https://www.burgundywall.com/post/having-click-help-subcommand if topic is None : click . echo ( ctx . parent . get_help ( ) ) else : click . echo ( main . commands [ topic ] . get_help ( ctx ) )
11,603
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/stackcli.py#L83-L91
[ "def", "set_value_BC", "(", "self", ",", "pores", ",", "values", ")", ":", "self", ".", "_set_BC", "(", "pores", "=", "pores", ",", "bctype", "=", "'value'", ",", "bcvalues", "=", "values", ",", "mode", "=", "'merge'", ")" ]
Clean Sphinx build products .
def clean ( ctx ) : logger = logging . getLogger ( __name__ ) dirnames = [ 'py-api' , '_build' , 'modules' , 'packages' ] dirnames = [ os . path . join ( ctx . obj [ 'root_project_dir' ] , dirname ) for dirname in dirnames ] for dirname in dirnames : if os . path . isdir ( dirname ) : shutil . rmtree ( dirname ) logger . debug ( 'Cleaned up %r' , dirname ) else : logger . debug ( 'Did not clean up %r (missing)' , dirname )
11,604
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/stackcli.py#L134-L158
[ "def", "_check_available_data", "(", "archive", ",", "arc_type", ",", "day", ")", ":", "available_stations", "=", "[", "]", "if", "arc_type", ".", "lower", "(", ")", "==", "'day_vols'", ":", "wavefiles", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "archive", ",", "day", ".", "strftime", "(", "'Y%Y'", ")", ",", "day", ".", "strftime", "(", "'R%j.01'", ")", ",", "'*'", ")", ")", "for", "wavefile", "in", "wavefiles", ":", "header", "=", "read", "(", "wavefile", ",", "headonly", "=", "True", ")", "available_stations", ".", "append", "(", "(", "header", "[", "0", "]", ".", "stats", ".", "station", ",", "header", "[", "0", "]", ".", "stats", ".", "channel", ")", ")", "elif", "arc_type", ".", "lower", "(", ")", "==", "'seishub'", ":", "client", "=", "SeishubClient", "(", "archive", ")", "st", "=", "client", ".", "get_previews", "(", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "86400", ")", "for", "tr", "in", "st", ":", "available_stations", ".", "append", "(", "(", "tr", ".", "stats", ".", "station", ",", "tr", ".", "stats", ".", "channel", ")", ")", "elif", "arc_type", ".", "lower", "(", ")", "==", "'fdsn'", ":", "client", "=", "FDSNClient", "(", "archive", ")", "inventory", "=", "client", ".", "get_stations", "(", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "86400", ",", "level", "=", "'channel'", ")", "for", "network", "in", "inventory", ":", "for", "station", "in", "network", ":", "for", "channel", "in", "station", ":", "available_stations", ".", "append", "(", "(", "station", ".", "code", ",", "channel", ".", "code", ")", ")", "return", "available_stations" ]
Query all entities of a specific type with their attributes
def query_with_attributes ( type_to_query , client ) : session = client . create_session ( ) # query all data query = session . query ( Attribute . name , Attribute . value , Entity . id ) . join ( Entity ) . filter ( Entity . type == type_to_query ) df = client . df_query ( query ) session . close ( ) # don't store NaN values df = df . dropna ( how = 'any' ) # pivot attribute names to columns, drop column names to one level # ('unstack' generated multi-level names) df = df . set_index ( [ 'id' , 'name' ] ) . unstack ( ) . reset_index ( ) # noinspection PyUnresolvedReferences df . columns = [ 'id' ] + list ( df . columns . get_level_values ( 1 ) [ 1 : ] ) return df
11,605
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/leonardo/leonardo/kvstore/models.py#L18-L50
[ "def", "write_networking_file", "(", "version", ",", "pairs", ")", ":", "vmnets", "=", "OrderedDict", "(", "sorted", "(", "pairs", ".", "items", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")", "try", ":", "with", "open", "(", "VMWARE_NETWORKING_FILE", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "f", ".", "write", "(", "version", ")", "for", "key", ",", "value", "in", "vmnets", ".", "items", "(", ")", ":", "f", ".", "write", "(", "\"answer {} {}\\n\"", ".", "format", "(", "key", ",", "value", ")", ")", "except", "OSError", "as", "e", ":", "raise", "SystemExit", "(", "\"Cannot open {}: {}\"", ".", "format", "(", "VMWARE_NETWORKING_FILE", ",", "e", ")", ")", "# restart VMware networking service", "if", "sys", ".", "platform", ".", "startswith", "(", "\"darwin\"", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "\"/Applications/VMware Fusion.app/Contents/Library/vmnet-cli\"", ")", ":", "raise", "SystemExit", "(", "\"VMware Fusion is not installed in Applications\"", ")", "os", ".", "system", "(", "r\"/Applications/VMware\\ Fusion.app/Contents/Library/vmnet-cli --configure\"", ")", "os", ".", "system", "(", "r\"/Applications/VMware\\ Fusion.app/Contents/Library/vmnet-cli --stop\"", ")", "os", ".", "system", "(", "r\"/Applications/VMware\\ Fusion.app/Contents/Library/vmnet-cli --start\"", ")", "else", ":", "os", ".", "system", "(", "\"vmware-networks --stop\"", ")", "os", ".", "system", "(", "\"vmware-networks --start\"", ")" ]
Reset all fields of this object to class defaults
def reset ( self ) : for name in self . __dict__ : if name . startswith ( "_" ) : continue attr = getattr ( self , name ) setattr ( self , name , attr and attr . __class__ ( ) )
11,606
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/serialize.py#L88-L97
[ "def", "sendCommand", "(", "self", ",", "command", ")", ":", "data", "=", "{", "'rapi'", ":", "command", "}", "full_url", "=", "self", ".", "url", "+", "urllib", ".", "parse", ".", "urlencode", "(", "data", ")", "data", "=", "urllib", ".", "request", ".", "urlopen", "(", "full_url", ")", "response", "=", "re", ".", "search", "(", "'\\<p>&gt;\\$(.+)\\<script'", ",", "data", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "if", "response", "==", "None", ":", "#If we are using version 1 - https://github.com/OpenEVSE/ESP8266_WiFi_v1.x/blob/master/OpenEVSE_RAPI_WiFi_ESP8266.ino#L357", "response", "=", "re", ".", "search", "(", "'\\>\\>\\$(.+)\\<p>'", ",", "data", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "return", "response", ".", "group", "(", "1", ")", ".", "split", "(", ")" ]
Convert a GeoJSON polygon feature to a numpy array
def geojson_polygon_to_mask ( feature , shape , lat_idx , lon_idx ) : import matplotlib # specify 'agg' renderer, Mac renderer does not support what we want to do below matplotlib . use ( 'agg' ) import matplotlib . pyplot as plt from matplotlib import patches import numpy as np # we can only do polygons right now if feature . geometry . type not in ( 'Polygon' , 'MultiPolygon' ) : raise ValueError ( "Cannot handle feature of type " + feature . geometry . type ) # fictional dpi - don't matter in the end dpi = 100 # -- start documentation include: poly-setup # make a new figure with no frame, no axes, with the correct size, black background fig = plt . figure ( frameon = False , dpi = dpi , ) fig . set_size_inches ( shape [ 1 ] / float ( dpi ) , shape [ 0 ] / float ( dpi ) ) ax = plt . Axes ( fig , [ 0. , 0. , 1. , 1. ] ) ax . set_axis_off ( ) # noinspection PyTypeChecker ax . set_xlim ( [ 0 , shape [ 1 ] ] ) # noinspection PyTypeChecker ax . set_ylim ( [ 0 , shape [ 0 ] ] ) fig . add_axes ( ax ) # -- end documentation include: poly-setup # for normal polygons make coordinates iterable if feature . geometry . type == 'Polygon' : coords = [ feature . geometry . coordinates ] else : coords = feature . geometry . coordinates for poly_coords in coords : # the polygon may contain multiple outlines; the first is # always the outer one, the others are 'holes' for i , outline in enumerate ( poly_coords ) : # inside/outside fill value: figure background is white by # default, draw inverted polygon and invert again later value = 0. if i == 0 else 1. # convert lats/lons to row/column indices in the array outline = np . array ( outline ) xs = lon_idx ( outline [ : , 0 ] ) ys = lat_idx ( outline [ : , 1 ] ) # draw the polygon poly = patches . Polygon ( list ( zip ( xs , ys ) ) , facecolor = ( value , value , value ) , edgecolor = 'none' , antialiased = True ) ax . add_patch ( poly ) # -- start documentation include: poly-extract # extract the figure to a numpy array, fig . canvas . draw ( ) data = np . fromstring ( fig . canvas . tostring_rgb ( ) , dtype = np . uint8 , sep = '' ) # reshape to a proper numpy array, keep one channel only data = data . reshape ( fig . canvas . get_width_height ( ) [ : : - 1 ] + ( 3 , ) ) [ : , : , 0 ] # -- end documentation include: poly-extract # make sure we get the right shape back assert data . shape [ 0 ] == shape [ 0 ] assert data . shape [ 1 ] == shape [ 1 ] # convert from uints back to floats and invert to get black background data = 1. - data . astype ( float ) / 255. # type: np.array # image is flipped horizontally w.r.t. map data = data [ : : - 1 , : ] # done, clean up plt . close ( 'all' ) return data
11,607
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L549-L637
[ "def", "deserialize_non_framed_values", "(", "stream", ",", "header", ",", "verifier", "=", "None", ")", ":", "_LOGGER", ".", "debug", "(", "\"Starting non-framed body iv/tag deserialization\"", ")", "(", "data_iv", ",", "data_length", ")", "=", "unpack_values", "(", "\">{}sQ\"", ".", "format", "(", "header", ".", "algorithm", ".", "iv_len", ")", ",", "stream", ",", "verifier", ")", "return", "data_iv", ",", "data_length" ]
Load data from default location
def load ( self ) : # read file, keep all values as strings df = pd . read_csv ( self . input_file , sep = ',' , quotechar = '"' , encoding = 'utf-8' , dtype = object ) # wer are only interested in the NUTS code and description, rename them also df = df [ [ 'NUTS-Code' , 'Description' ] ] df . columns = [ 'key' , 'name' ] # we only want NUTS2 regions (4-digit codes) df = df [ df [ 'key' ] . str . len ( ) == 4 ] # drop 'Extra Regio' codes ending in 'ZZ' df = df [ df [ 'key' ] . str [ 2 : ] != 'ZZ' ] return df
11,608
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L50-L73
[ "def", "_create_update_tracking_related_event", "(", "instance", ")", ":", "events", "=", "{", "}", "# Create a dict mapping related model field to modified fields", "for", "field", ",", "related_fields", "in", "instance", ".", "_tracked_related_fields", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "instance", ".", "_meta", ".", "get_field", "(", "field", ")", ",", "ManyToManyField", ")", ":", "if", "isinstance", "(", "instance", ".", "_meta", ".", "get_field", "(", "field", ")", ",", "ForeignKey", ")", ":", "# Compare pk", "value", "=", "getattr", "(", "instance", ",", "'{0}_id'", ".", "format", "(", "field", ")", ")", "else", ":", "value", "=", "getattr", "(", "instance", ",", "field", ")", "if", "instance", ".", "_original_fields", "[", "field", "]", "!=", "value", ":", "for", "related_field", "in", "related_fields", ":", "events", ".", "setdefault", "(", "related_field", ",", "[", "]", ")", ".", "append", "(", "field", ")", "# Create the events from the events dict", "for", "related_field", ",", "fields", "in", "events", ".", "items", "(", ")", ":", "try", ":", "related_instances", "=", "getattr", "(", "instance", ",", "related_field", "[", "1", "]", ")", "except", "ObjectDoesNotExist", ":", "continue", "# FIXME: isinstance(related_instances, RelatedManager ?)", "if", "hasattr", "(", "related_instances", ",", "'all'", ")", ":", "related_instances", "=", "related_instances", ".", "all", "(", ")", "else", ":", "related_instances", "=", "[", "related_instances", "]", "for", "related_instance", "in", "related_instances", ":", "event", "=", "_create_event", "(", "related_instance", ",", "UPDATE", ")", "for", "field", "in", "fields", ":", "fieldname", "=", "'{0}__{1}'", ".", "format", "(", "related_field", "[", "0", "]", ",", "field", ")", "_create_tracked_field", "(", "event", ",", "instance", ",", "field", ",", "fieldname", "=", "fieldname", ")" ]
Returns the input file name with a default relative path
def input_file ( self ) : return path . join ( path . dirname ( __file__ ) , 'data' , 'tgs{:s}.tsv' . format ( self . number ) )
11,609
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L133-L136
[ "def", "OneHot0", "(", "*", "xs", ",", "simplify", "=", "True", ",", "conj", "=", "True", ")", ":", "xs", "=", "[", "Expression", ".", "box", "(", "x", ")", ".", "node", "for", "x", "in", "xs", "]", "terms", "=", "list", "(", ")", "if", "conj", ":", "for", "x0", ",", "x1", "in", "itertools", ".", "combinations", "(", "xs", ",", "2", ")", ":", "terms", ".", "append", "(", "exprnode", ".", "or_", "(", "exprnode", ".", "not_", "(", "x0", ")", ",", "exprnode", ".", "not_", "(", "x1", ")", ")", ")", "y", "=", "exprnode", ".", "and_", "(", "*", "terms", ")", "else", ":", "for", "_xs", "in", "itertools", ".", "combinations", "(", "xs", ",", "len", "(", "xs", ")", "-", "1", ")", ":", "terms", ".", "append", "(", "exprnode", ".", "and_", "(", "*", "[", "exprnode", ".", "not_", "(", "x", ")", "for", "x", "in", "_xs", "]", ")", ")", "y", "=", "exprnode", ".", "or_", "(", "*", "terms", ")", "if", "simplify", ":", "y", "=", "y", ".", "simplify", "(", ")", "return", "_expr", "(", "y", ")" ]
Load data table from tsv file from default location
def load ( self , key_filter = None , header_preproc = None ) : # read file, keep all values as strings df = pd . read_csv ( self . input_file , sep = '\t' , dtype = object ) if key_filter is not None : # filter on key column (first column) df = df [ df [ df . columns [ 0 ] ] . str . match ( key_filter ) ] # first column contains metadata, with NUTS2 region key as last (comma-separated) value meta_col = df . columns [ 0 ] df [ meta_col ] = df [ meta_col ] . str . split ( ',' ) . str [ - 1 ] # convert columns to numbers, skip first column (containing metadata) for col_name in df . columns [ 1 : ] : # some values have lower-case characters indicating footnotes, strip them stripped = df [ col_name ] . str . replace ( r'[a-z]' , '' ) # convert to numbers, convert any remaining empty values (indicated by ':' in the input table) to NaN df [ col_name ] = pd . to_numeric ( stripped , errors = 'coerce' ) # preprocess headers if header_preproc is not None : df . columns = list ( df . columns [ : 1 ] ) + [ header_preproc ( c ) for c in df . columns [ 1 : ] ] # rename columns, convert years to integers # noinspection PyTypeChecker df . columns = [ 'key' ] + [ int ( y ) for y in df . columns [ 1 : ] ] return df
11,610
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L138-L179
[ "def", "_objective_function", "(", "X", ",", "W", ",", "R", ",", "S", ",", "gamma", ")", ":", "subjs", "=", "len", "(", "X", ")", "func", "=", ".0", "for", "i", "in", "range", "(", "subjs", ")", ":", "func", "+=", "0.5", "*", "np", ".", "sum", "(", "(", "X", "[", "i", "]", "-", "W", "[", "i", "]", ".", "dot", "(", "R", ")", "-", "S", "[", "i", "]", ")", "**", "2", ")", "+", "gamma", "*", "np", ".", "sum", "(", "np", ".", "abs", "(", "S", "[", "i", "]", ")", ")", "return", "func" ]
Load the climate data as a map
def load ( self ) : from scipy . io import netcdf_file from scipy import interpolate import numpy as np # load file f = netcdf_file ( self . input_file ) # extract data, make explicity copies of data out = dict ( ) lats = f . variables [ 'lat' ] [ : ] . copy ( ) lons = f . variables [ 'lon' ] [ : ] . copy ( ) # lons start at 0, this is bad for working with data in Europe because the map border runs right through; # roll array by half its width to get Europe into the map center out [ 'data' ] = np . roll ( f . variables [ self . variable_name ] [ : , : , : ] . copy ( ) , shift = len ( lons ) // 2 , axis = 2 ) lons = np . roll ( lons , shift = len ( lons ) // 2 ) # avoid wraparound problems around zero by setting lon range to -180...180, this is # also the format used in the GeoJSON NUTS2 polygons lons [ lons > 180 ] -= 360 # data contains some very negative value (~ -9e36) as 'invalid data' flag, convert this to a masked array out [ 'data' ] = np . ma . array ( out [ 'data' ] ) out [ 'data' ] [ out [ 'data' ] < - 1.e6 ] = np . ma . masked # -- start documentation include: climate-input-interp # build interpolators to convert lats/lons to row/column indices out [ 'lat_idx' ] = interpolate . interp1d ( x = lats , y = np . arange ( len ( lats ) ) ) out [ 'lon_idx' ] = interpolate . interp1d ( x = lons , y = np . arange ( len ( lons ) ) ) # -- end documentation include: climate-input-interp # clean up f . close ( ) return out
11,611
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L659-L701
[ "def", "neural_gpu_body", "(", "inputs", ",", "hparams", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "\"neural_gpu\"", ")", ":", "def", "step", "(", "state", ",", "inp", ")", ":", "# pylint: disable=missing-docstring", "x", "=", "tf", ".", "nn", ".", "dropout", "(", "state", ",", "1.0", "-", "hparams", ".", "dropout", ")", "for", "layer", "in", "range", "(", "hparams", ".", "num_hidden_layers", ")", ":", "x", "=", "common_layers", ".", "conv_gru", "(", "x", ",", "(", "hparams", ".", "kernel_height", ",", "hparams", ".", "kernel_width", ")", ",", "hparams", ".", "hidden_size", ",", "name", "=", "\"cgru_%d\"", "%", "layer", ")", "# Padding input is zeroed-out in the modality, we check this by summing.", "padding_inp", "=", "tf", ".", "less", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "abs", "(", "inp", ")", ",", "axis", "=", "[", "1", ",", "2", "]", ")", ",", "0.00001", ")", "new_state", "=", "tf", ".", "where", "(", "padding_inp", ",", "state", ",", "x", ")", "# No-op where inp is padding.", "return", "new_state", "return", "tf", ".", "foldl", "(", "step", ",", "tf", ".", "transpose", "(", "inputs", ",", "[", "1", ",", "0", ",", "2", ",", "3", "]", ")", ",", "initializer", "=", "inputs", ",", "parallel_iterations", "=", "1", ",", "swap_memory", "=", "True", ")" ]
Clear output of one climate variable
def clear ( self ) : # mark this task as incomplete self . mark_incomplete ( ) # Delete the indicator metadata, this also deletes values by cascading. for suffix in list ( CLIMATE_SEASON_SUFFIXES . values ( ) ) : try : # noinspection PyUnresolvedReferences indicator = self . session . query ( models . ClimateIndicator ) . filter ( models . ClimateIndicator . description == self . description + suffix ) . one ( ) self . session . delete ( indicator ) except NoResultFound : # Data didn't exist yet, no problem pass self . close_session ( )
11,612
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L731-L749
[ "def", "batch_write_spans", "(", "self", ",", "name", ",", "spans", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"batch_write_spans\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"batch_write_spans\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "batch_write_spans", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"BatchWriteSpans\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"BatchWriteSpans\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "tracing_pb2", ".", "BatchWriteSpansRequest", "(", "name", "=", "name", ",", "spans", "=", "spans", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"name\"", ",", "name", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "self", ".", "_inner_api_calls", "[", "\"batch_write_spans\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Load climate data and convert to indicator objects
def run ( self ) : import numpy as np # get all NUTS region IDs, for linking values to region objects query = self . session . query ( models . NUTS2Region . key , models . NUTS2Region . id ) region_ids = self . client . df_query ( query ) . set_index ( 'key' ) [ 'id' ] . to_dict ( ) # load climate data and NUTS2 polygons data = next ( self . requires ( ) ) . load ( ) nuts = NUTS2GeoJSONInputFile ( ) . load ( ) # generated indicator IDs, keyed by season indicator_ids = dict ( ) # climate data by season t_data = dict ( ) # create new indicator objects for summer and winter, create averaged climate data for season , suffix in CLIMATE_SEASON_SUFFIXES . items ( ) : # noinspection PyUnresolvedReferences indicator = models . ClimateIndicator ( description = self . description + suffix ) self . session . add ( indicator ) # commit, to get indicator ID filled self . session . commit ( ) indicator_ids [ season ] = indicator . id # select winter or summer data by month index, average over time range if season == 'summer' : t_data [ season ] = np . ma . average ( data [ 'data' ] [ 3 : 9 , : , : ] , axis = 0 ) else : # noinspection PyTypeChecker t_data [ season ] = np . ma . average ( 0.5 * ( data [ 'data' ] [ 0 : 3 , : , : ] + data [ 'data' ] [ 9 : 12 , : , : ] ) , axis = 0 ) # container for output objects, for bulk saving objects = [ ] # start value for manual object id generation current_value_id = models . ClimateValue . get_max_id ( self . session ) # for each region, get a mask, average climate variable over the mask and store the indicator value; # loop over features first, then over seasons, because mask generation is expensive for feature in nuts : # draw region mask (doesn't matter for which season we take the map shape) mask = geojson_polygon_to_mask ( feature = feature , shape = t_data [ 'summer' ] . shape , lat_idx = data [ 'lat_idx' ] , lon_idx = data [ 'lon_idx' ] ) # create indicator values for summer and winter for season in list ( CLIMATE_SEASON_SUFFIXES . keys ( ) ) : # weighted average from region mask value = np . ma . average ( t_data [ season ] , weights = mask ) # region ID must be cast to int (DBs don't like numpy dtypes from pandas) region_id = region_ids . get ( feature . properties [ 'NUTS_ID' ] , None ) if region_id is not None : region_id = int ( region_id ) # append an indicator value, manually generate object IDs for bulk saving current_value_id += 1 objects . append ( models . ClimateValue ( id = current_value_id , value = value , region_id = region_id , indicator_id = indicator_ids [ season ] ) ) # # print some debugging output # print self.variable_name + ' ' + season, feature.properties['NUTS_ID'], value # # generate some plots for debugging # from matplotlib import pyplot as plt # plt.subplot(211) # plt.imshow(0.02 * t_data + mask * t_data, interpolation='none') # plt.subplot(212) # plt.imshow(t_data, interpolation='none') # plt.savefig('/tmp/' + feature.properties['NUTS_ID'] + '.png') # bulk-save all objects self . session . bulk_save_objects ( objects ) self . session . commit ( ) self . done ( )
11,613
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L751-L836
[ "def", "delete_vnet", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "resource_group", ",", "'/providers/Microsoft.Network/virtualNetworks/'", ",", "name", ",", "'?api-version='", ",", "NETWORK_API", "]", ")", "return", "do_delete", "(", "endpoint", ",", "access_token", ")" ]
It is used to close TCP connection and unregister the Spin instance from untwisted reactor .
def lose ( spin ) : try : spin . close ( ) except Exception as excpt : err = excpt . args [ 0 ] spin . drive ( CLOSE_ERR , err ) finally : spin . destroy ( ) spin . drive ( LOST )
11,614
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iostd.py#L10-L27
[ "def", "mkdir", "(", "self", ",", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")" ]
Set up a TCP server and installs the basic handles Stdin Stdout in the clients .
def create_server ( addr , port , backlog ) : server = Spin ( ) server . bind ( ( addr , port ) ) server . listen ( backlog ) Server ( server ) server . add_map ( ACCEPT , lambda server , spin : install_basic_handles ( spin ) ) return server
11,615
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iostd.py#L37-L57
[ "def", "load_image", "(", "name", ")", ":", "image", "=", "pyglet", ".", "image", ".", "load", "(", "name", ")", ".", "texture", "verify_dimensions", "(", "image", ")", "return", "image" ]
Set up a TCP client and installs the basic handles Stdin Stdout .
def create_client ( addr , port ) : sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) # First attempt to connect otherwise it leaves # an unconnected spin instance in the reactor. sock . connect_ex ( ( addr , port ) ) spin = Spin ( sock ) Client ( spin ) spin . add_map ( CONNECT , install_basic_handles ) spin . add_map ( CONNECT_ERR , lambda con , err : lose ( con ) ) return spin
11,616
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iostd.py#L67-L88
[ "def", "evict", "(", "self", ",", "key", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "key_data", "=", "self", ".", "_to_data", "(", "key", ")", "return", "self", ".", "_evict_internal", "(", "key_data", ")" ]
Execute the main bit of the application .
def main ( argv = None ) : # type: (Union[NoneType, List[str]]) -> NoneType app = application . Application ( ) app . run ( argv ) app . exit ( )
11,617
https://github.com/kataev/flake8-rst/blob/ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f/flake8_rst/cli.py#L5-L17
[ "def", "drop_temporary", "(", "pr", ":", "PullRequestDetails", ",", "problem", ":", "Optional", "[", "CannotAutomergeError", "]", ",", "prev_seen_times", ":", "Dict", "[", "int", ",", "datetime", ".", "datetime", "]", ",", "next_seen_times", ":", "Dict", "[", "int", ",", "datetime", ".", "datetime", "]", ",", ")", "->", "Optional", "[", "CannotAutomergeError", "]", ":", "if", "problem", "is", "not", "None", "and", "problem", ".", "may_be_temporary", ":", "since", "=", "prev_seen_times", ".", "get", "(", "pr", ".", "pull_id", ",", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ")", "if", "is_recent_date", "(", "since", ")", ":", "next_seen_times", "[", "pr", ".", "pull_id", "]", "=", "since", "return", "None", "return", "problem" ]
Calculate Indigo fingerprint similarity
def fingerprint_similarity ( mol1 , mol2 ) : idmol1 = to_real_mol ( mol1 ) idmol2 = to_real_mol ( mol2 ) fp1 = idmol1 . fingerprint ( "sim" ) fp2 = idmol2 . fingerprint ( "sim" ) return round ( idg . similarity ( fp1 , fp2 , "tanimoto" ) , 2 )
11,618
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/indigo.py#L64-L71
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
a standardized method of turning a dev_model object into training and testing arrays
def devmodel_to_array ( model_name , train_fraction = 1 ) : model_outputs = - 6 + model_name . Data_summary . shape [ 0 ] devmodel = model_name rawdf = devmodel . Data rawdf = rawdf . sample ( frac = 1 ) datadf = rawdf . select_dtypes ( include = [ np . number ] ) data = np . array ( datadf ) n = data . shape [ 0 ] d = data . shape [ 1 ] d -= model_outputs n_train = int ( n * train_fraction ) # set fraction for training n_test = n - n_train X_train = np . zeros ( ( n_train , d ) ) # prepare train/test arrays X_test = np . zeros ( ( n_test , d ) ) Y_train = np . zeros ( ( n_train , model_outputs ) ) Y_test = np . zeros ( ( n_test , model_outputs ) ) X_train [ : ] = data [ : n_train , : - model_outputs ] Y_train [ : ] = ( data [ : n_train , - model_outputs : ] . astype ( float ) ) X_test [ : ] = data [ n_train : , : - model_outputs ] Y_test [ : ] = ( data [ n_train : , - model_outputs : ] . astype ( float ) ) return X_train , Y_train , X_test , Y_test
11,619
https://github.com/wesleybeckner/salty/blob/ef17a97aea3e4f81fcd0359ce85b3438c0e6499b/salty/core.py#L162-L207
[ "def", "KillOldFlows", "(", "self", ")", ":", "if", "not", "self", ".", "IsRunning", "(", ")", ":", "return", "False", "start_time", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "LAST_RUN_TIME", ")", "lifetime", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "CRON_ARGS", ")", ".", "lifetime", "elapsed", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "-", "start_time", "if", "lifetime", "and", "elapsed", ">", "lifetime", ":", "self", ".", "StopCurrentRun", "(", ")", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"cron_job_timeout\"", ",", "fields", "=", "[", "self", ".", "urn", ".", "Basename", "(", ")", "]", ")", "stats_collector_instance", ".", "Get", "(", ")", ".", "RecordEvent", "(", "\"cron_job_latency\"", ",", "elapsed", ".", "seconds", ",", "fields", "=", "[", "self", ".", "urn", ".", "Basename", "(", ")", "]", ")", "return", "True", "return", "False" ]
Apply function to each step object in the index
def dapply ( self , fn , pairwise = False , symmetric = True , diagonal = False , block = None , * * kwargs ) : search_keys = [ k for k , v in kwargs . items ( ) if isinstance ( v , list ) and len ( v ) > 1 ] functions = util . make_list ( fn ) search = list ( product ( functions , util . dict_product ( kwargs ) ) ) results = [ ] for fn , kw in search : if not pairwise : r = self . index . to_series ( ) . apply ( lambda step : fn ( step , * * kw ) ) else : r = apply_pairwise ( self , fn , symmetric = symmetric , diagonal = diagonal , block = block , * * kw ) name = [ ] if len ( functions ) == 1 else [ fn . __name__ ] name += util . dict_subset ( kw , search_keys ) . values ( ) if isinstance ( r , pd . DataFrame ) : columns = pd . MultiIndex . from_tuples ( [ tuple ( name + util . make_list ( c ) ) for c in r . columns ] ) r . columns = columns else : r . name = tuple ( name ) results . append ( r ) if len ( results ) > 1 : result = pd . concat ( results , axis = 1 ) # get subset of parameters that were searched over column_names = [ ] if len ( functions ) == 1 else [ None ] column_names += search_keys column_names += [ None ] * ( len ( result . columns . names ) - len ( column_names ) ) result . columns . names = column_names return StepFrame ( result ) else : result = results [ 0 ] if isinstance ( result , pd . DataFrame ) : return StepFrame ( result ) else : result . name = functions [ 0 ] . __name__ return StepSeries ( result )
11,620
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/exploration.py#L125-L177
[ "def", "kube_limitrange", "(", "self", ",", "metric", ",", "scraper_config", ")", ":", "# type's cardinality's low: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3872-L3879", "# idem for resource: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3342-L3352", "# idem for constraint: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3882-L3901", "metric_base_name", "=", "scraper_config", "[", "'namespace'", "]", "+", "'.limitrange.{}.{}'", "constraints", "=", "{", "'min'", ":", "'min'", ",", "'max'", ":", "'max'", ",", "'default'", ":", "'default'", ",", "'defaultRequest'", ":", "'default_request'", ",", "'maxLimitRequestRatio'", ":", "'max_limit_request_ratio'", ",", "}", "if", "metric", ".", "type", "in", "METRIC_TYPES", ":", "for", "sample", "in", "metric", ".", "samples", ":", "constraint", "=", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ".", "get", "(", "\"constraint\"", ")", "if", "constraint", "in", "constraints", ":", "constraint", "=", "constraints", "[", "constraint", "]", "else", ":", "self", ".", "error", "(", "\"Constraint %s unsupported for metric %s\"", "%", "(", "constraint", ",", "metric", ".", "name", ")", ")", "continue", "resource", "=", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ".", "get", "(", "\"resource\"", ")", "tags", "=", "[", "self", ".", "_label_to_tag", "(", "\"namespace\"", ",", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ",", "scraper_config", ")", ",", "self", ".", "_label_to_tag", "(", "\"limitrange\"", ",", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ",", "scraper_config", ")", ",", "self", ".", "_label_to_tag", "(", "\"type\"", ",", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ",", "scraper_config", ",", "tag_name", "=", "\"consumer_type\"", ")", ",", "]", "+", "scraper_config", "[", "'custom_tags'", "]", "self", ".", "gauge", "(", "metric_base_name", ".", "format", "(", "resource", ",", "constraint", ")", ",", "sample", "[", "self", ".", "SAMPLE_VALUE", "]", ",", "tags", ")", "else", ":", "self", ".", "log", ".", "error", "(", "\"Metric type %s unsupported for metric %s\"", "%", "(", "metric", ".", "type", ",", "metric", ".", "name", ")", ")" ]
A helper function for determining all of the branches in the tree . This should be called after the tree has been fully constructed and its nodes and edges are populated .
def _identifyBranches ( self ) : if self . debug : sys . stdout . write ( "Identifying branches: " ) start = time . clock ( ) seen = set ( ) self . branches = set ( ) # Find all of the branching nodes in the tree, degree > 1 # That is, they appear in more than one edge for e1 , e2 in self . edges : if e1 not in seen : seen . add ( e1 ) else : self . branches . add ( e1 ) if e2 not in seen : seen . add ( e2 ) else : self . branches . add ( e2 ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
11,621
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L144-L172
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
A helper function for determining the condensed representation of the tree . That is one that does not hold all of the internal nodes of the graph . The results will be stored in ContourTree . superNodes and ContourTree . superArcs . These two can be used to potentially speed up queries by limiting the searching on the graph to only nodes on these super arcs .
def _identifySuperGraph ( self ) : if self . debug : sys . stdout . write ( "Condensing Graph: " ) start = time . clock ( ) G = nx . DiGraph ( ) G . add_edges_from ( self . edges ) if self . short_circuit : self . superNodes = G . nodes ( ) self . superArcs = G . edges ( ) # There should be a way to populate this from the data we # have... return self . augmentedEdges = { } N = len ( self . Y ) processed = np . zeros ( N ) for node in range ( N ) : # We can short circuit this here, since some of the nodes # will be handled within the while loops below. if processed [ node ] : continue # Loop through each internal node (see if below for # determining what is internal), trace up and down to a # node's first non-internal node in either direction # removing all of the internal nodes and pushing them into a # list. This list (removedNodes) will be put into a # dictionary keyed on the endpoints of the final super arc. if G . in_degree ( node ) == 1 and G . out_degree ( node ) == 1 : # The sorted list of nodes that will be condensed by # this super arc removedNodes = [ ] # Trace down to a non-internal node lower_link = list ( G . in_edges ( node ) ) [ 0 ] [ 0 ] while ( G . in_degree ( lower_link ) == 1 and G . out_degree ( lower_link ) == 1 ) : new_lower_link = list ( G . in_edges ( lower_link ) ) [ 0 ] [ 0 ] G . add_edge ( new_lower_link , node ) G . remove_node ( lower_link ) removedNodes . append ( lower_link ) lower_link = new_lower_link removedNodes . reverse ( ) removedNodes . append ( node ) # Trace up to a non-internal node upper_link = list ( G . out_edges ( node ) ) [ 0 ] [ 1 ] while ( G . in_degree ( upper_link ) == 1 and G . out_degree ( upper_link ) == 1 ) : new_upper_link = list ( G . out_edges ( upper_link ) ) [ 0 ] [ 1 ] G . add_edge ( node , new_upper_link ) G . remove_node ( upper_link ) removedNodes . append ( upper_link ) upper_link = new_upper_link G . add_edge ( lower_link , upper_link ) G . remove_node ( node ) self . augmentedEdges [ ( lower_link , upper_link ) ] = removedNodes # This is to help speed up the process by skipping nodes # we have already condensed, and to prevent us from not # being able to find nodes that have already been # removed. processed [ removedNodes ] = 1 self . superNodes = G . nodes ( ) self . superArcs = G . edges ( ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
11,622
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L174-L263
[ "def", "set_offset_and_sequence_number", "(", "self", ",", "event_data", ")", ":", "if", "not", "event_data", ":", "raise", "Exception", "(", "event_data", ")", "self", ".", "offset", "=", "event_data", ".", "offset", ".", "value", "self", ".", "sequence_number", "=", "event_data", ".", "sequence_number" ]
Returns a list of seed points for isosurface extraction given a threshold value
def get_seeds ( self , threshold ) : seeds = [ ] for e1 , e2 in self . superArcs : # Because we did some extra work in _process_tree, we can # safely assume e1 is lower than e2 if self . Y [ e1 ] <= threshold <= self . Y [ e2 ] : if ( e1 , e2 ) in self . augmentedEdges : # These should be sorted edgeList = self . augmentedEdges [ ( e1 , e2 ) ] elif ( e2 , e1 ) in self . augmentedEdges : e1 , e2 = e2 , e1 # These should be reverse sorted edgeList = list ( reversed ( self . augmentedEdges [ ( e1 , e2 ) ] ) ) else : continue startNode = e1 for endNode in edgeList + [ e2 ] : if self . Y [ endNode ] >= threshold : # Stop when you find the first point above the # threshold break startNode = endNode seeds . append ( startNode ) seeds . append ( endNode ) return seeds
11,623
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L265-L296
[ "def", "channel_view", "(", "x", ":", "Tensor", ")", "->", "Tensor", ":", "return", "x", ".", "transpose", "(", "0", ",", "1", ")", ".", "contiguous", "(", ")", ".", "view", "(", "x", ".", "shape", "[", "1", "]", ",", "-", "1", ")" ]
A function for creating networkx instances that can be used more efficiently for graph manipulation than the MergeTree class .
def _construct_nx_tree ( self , thisTree , thatTree = None ) : if self . debug : sys . stdout . write ( "Networkx Tree construction: " ) start = time . clock ( ) nxTree = nx . DiGraph ( ) nxTree . add_edges_from ( thisTree . edges ) nodesOfThatTree = [ ] if thatTree is not None : nodesOfThatTree = thatTree . nodes . keys ( ) # Fully or partially augment the join tree for ( superNode , _ ) , nodes in thisTree . augmentedEdges . items ( ) : superNodeEdge = list ( nxTree . out_edges ( superNode ) ) if len ( superNodeEdge ) > 1 : warnings . warn ( "The supernode {} should have only a single " "emanating edge. Merge tree is invalidly " "structured" . format ( superNode ) ) endNode = superNodeEdge [ 0 ] [ 1 ] startNode = superNode nxTree . remove_edge ( startNode , endNode ) for node in nodes : if thatTree is None or node in nodesOfThatTree : nxTree . add_edge ( startNode , node ) startNode = node # Make sure this is not the root node trying to connect to # itself if startNode != endNode : nxTree . add_edge ( startNode , endNode ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) ) return nxTree
11,624
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L298-L348
[ "def", "seek_to_frame", "(", "self", ",", "index", ")", ":", "pointer_position", "=", "self", ".", "frame_positions", "[", "index", "]", "self", ".", "blob_file", ".", "seek", "(", "pointer_position", ",", "0", ")" ]
A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance .
def _process_tree ( self , thisTree , thatTree ) : if self . debug : sys . stdout . write ( "Processing Tree: " ) start = time . clock ( ) # Get all of the leaf nodes that are not branches in the other # tree if len ( thisTree . nodes ( ) ) > 1 : leaves = set ( [ v for v in thisTree . nodes ( ) if thisTree . in_degree ( v ) == 0 and thatTree . in_degree ( v ) < 2 ] ) else : leaves = set ( ) while len ( leaves ) > 0 : v = leaves . pop ( ) # if self.debug: # sys.stdout.write('\tProcessing {} -> {}\n' # .format(v, thisTree.edges(v)[0][1])) # Take the leaf and edge out of the input tree and place it # on the CT edges = list ( thisTree . out_edges ( v ) ) if len ( edges ) != 1 : warnings . warn ( "The node {} should have a single emanating " "edge.\n" . format ( v ) ) e1 = edges [ 0 ] [ 0 ] e2 = edges [ 0 ] [ 1 ] # This may be a bit beside the point, but if we want all of # our edges pointing 'up,' we can verify that the edges we # add have the lower vertex pointing to the upper vertex. # This is useful only for nicely plotting with some graph # tools (graphviz/networkx), and I guess for consistency # sake. if self . Y [ e1 ] < self . Y [ e2 ] : self . edges . append ( ( e1 , e2 ) ) else : self . edges . append ( ( e2 , e1 ) ) # Removing the node will remove its constituent edges from # thisTree thisTree . remove_node ( v ) # This is the root node of the other tree if thatTree . out_degree ( v ) == 0 : thatTree . remove_node ( v ) # if self.debug: # sys.stdout.write('\t\tRemoving root {} from other tree\n' # .format(v)) # This is a "regular" node in the other tree, suppress it # there, but be sure to glue the upper and lower portions # together else : # The other ends of the node being removed are added to # "that" tree if len ( thatTree . in_edges ( v ) ) > 0 : startNode = list ( thatTree . in_edges ( v ) ) [ 0 ] [ 0 ] else : # This means we are at the root of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant startNode = None if len ( thatTree . out_edges ( v ) ) > 0 : endNode = list ( thatTree . out_edges ( v ) ) [ 0 ] [ 1 ] else : # This means we are at a leaf of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant endNode = None if startNode is not None and endNode is not None : thatTree . add_edge ( startNode , endNode ) thatTree . remove_node ( v ) # if self.debug: # sys.stdout.write('\t\tSuppressing {} in other tree and ' # 'gluing {} to {}\n' # .format(v, startNode, endNode)) if len ( thisTree . nodes ( ) ) > 1 : leaves = set ( [ v for v in thisTree . nodes ( ) if thisTree . in_degree ( v ) == 0 and thatTree . in_degree ( v ) < 2 ] ) else : leaves = set ( ) # if self.debug: # myMessage = '\t\tValid leaves: ' # sep = '' # for leaf in leaves: # myMessage += sep + str(leaf) # sep = ',' # sys.stdout.write(myMessage+'\n') if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
11,625
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L350-L472
[ "def", "_get", "(", "url", ":", "str", ",", "headers", ":", "dict", ")", "->", "dict", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "data", "=", "response", ".", "json", "(", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "GoogleApiError", "(", "{", "\"status_code\"", ":", "response", ".", "status_code", ",", "\"error\"", ":", "data", ".", "get", "(", "\"error\"", ",", "\"\"", ")", "}", ")", "return", "data" ]
Obtain the current branch name from the Git repository . If on Travis CI use the TRAVIS_BRANCH environment variable .
def read_git_branch ( ) : if os . getenv ( 'TRAVIS' ) : return os . getenv ( 'TRAVIS_BRANCH' ) else : try : repo = git . repo . base . Repo ( search_parent_directories = True ) return repo . active_branch . name except Exception : return ''
11,626
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L17-L28
[ "def", "_convert_strls", "(", "self", ",", "data", ")", ":", "convert_cols", "=", "[", "col", "for", "i", ",", "col", "in", "enumerate", "(", "data", ")", "if", "self", ".", "typlist", "[", "i", "]", "==", "32768", "or", "col", "in", "self", ".", "_convert_strl", "]", "if", "convert_cols", ":", "ssw", "=", "StataStrLWriter", "(", "data", ",", "convert_cols", ")", "tab", ",", "new_data", "=", "ssw", ".", "generate_table", "(", ")", "data", "=", "new_data", "self", ".", "_strl_blob", "=", "ssw", ".", "generate_blob", "(", "tab", ")", "return", "data" ]
Obtain the timestamp from the current head commit of a Git repository .
def read_git_commit_timestamp ( repo_path = None ) : repo = git . repo . base . Repo ( path = repo_path , search_parent_directories = True ) head_commit = repo . head . commit return head_commit . committed_datetime
11,627
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L31-L47
[ "def", "_init_libcrypto", "(", ")", ":", "libcrypto", "=", "_load_libcrypto", "(", ")", "try", ":", "libcrypto", ".", "OPENSSL_init_crypto", "(", ")", "except", "AttributeError", ":", "# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)", "libcrypto", ".", "OPENSSL_no_config", "(", ")", "libcrypto", ".", "OPENSSL_add_all_algorithms_noconf", "(", ")", "libcrypto", ".", "RSA_new", ".", "argtypes", "=", "(", ")", "libcrypto", ".", "RSA_new", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "RSA_size", ".", "argtype", "=", "(", "c_void_p", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "argtypes", "=", "(", "c_char_p", ",", "c_int", ")", "libcrypto", ".", "BIO_new_mem_buf", ".", "restype", "=", "c_void_p", "libcrypto", ".", "BIO_free", ".", "argtypes", "=", "(", "c_void_p", ",", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSAPrivateKey", ".", "restype", "=", "c_void_p", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "argtypes", "=", "(", "c_void_p", ",", "c_void_p", ",", "c_void_p", ",", "c_void_p", ")", "libcrypto", ".", "PEM_read_bio_RSA_PUBKEY", ".", "restype", "=", "c_void_p", "libcrypto", ".", "RSA_private_encrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "libcrypto", ".", "RSA_public_decrypt", ".", "argtypes", "=", "(", "c_int", ",", "c_char_p", ",", "c_char_p", ",", "c_void_p", ",", "c_int", ")", "return", "libcrypto" ]
Obtain the timestamp for the most recent commit to a given file in a Git repository .
def read_git_commit_timestamp_for_file ( filepath , repo_path = None ) : repo = git . repo . base . Repo ( path = repo_path , search_parent_directories = True ) head_commit = repo . head . commit # most recent commit datetime of the given file for commit in head_commit . iter_parents ( filepath ) : return commit . committed_datetime # Only get here if git could not find the file path in the history raise IOError ( 'File {} not found' . format ( filepath ) )
11,628
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L50-L80
[ "def", "_decayPoolingActivation", "(", "self", ")", ":", "if", "self", ".", "_decayFunctionType", "==", "'NoDecay'", ":", "self", ".", "_poolingActivation", "=", "self", ".", "_decayFunction", ".", "decay", "(", "self", ".", "_poolingActivation", ")", "elif", "self", ".", "_decayFunctionType", "==", "'Exponential'", ":", "self", ".", "_poolingActivation", "=", "self", ".", "_decayFunction", ".", "decay", "(", "self", ".", "_poolingActivationInitLevel", ",", "self", ".", "_poolingTimer", ")", "return", "self", ".", "_poolingActivation" ]
Get relative filepaths of files in a directory and sub - directories with the given extension .
def get_filepaths_with_extension ( extname , root_dir = '.' ) : # needed for comparison with os.path.splitext if not extname . startswith ( '.' ) : extname = '.' + extname # for case-insensitivity extname = extname . lower ( ) root_dir = os . path . abspath ( root_dir ) selected_filenames = [ ] for dirname , sub_dirnames , filenames in os . walk ( root_dir ) : for filename in filenames : if os . path . splitext ( filename ) [ - 1 ] . lower ( ) == extname : full_filename = os . path . join ( dirname , filename ) selected_filenames . append ( os . path . relpath ( full_filename , start = root_dir ) ) return selected_filenames
11,629
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L83-L116
[ "def", "retry_on_bad_auth", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "retry_version", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "while", "True", ":", "try", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "trolly", ".", "ResourceUnavailable", ":", "sys", ".", "stderr", ".", "write", "(", "'bad request (refresh board id)\\n'", ")", "self", ".", "_board_id", "=", "None", "self", ".", "save_key", "(", "'board_id'", ",", "None", ")", "except", "trolly", ".", "Unauthorised", ":", "sys", ".", "stderr", ".", "write", "(", "'bad permissions (refresh token)\\n'", ")", "self", ".", "_client", "=", "None", "self", ".", "_token", "=", "None", "self", ".", "save_key", "(", "'token'", ",", "None", ")", "return", "retry_version" ]
Get the datetime for the most recent commit to a project that affected Sphinx content .
def get_project_content_commit_date ( root_dir = '.' , exclusions = None ) : logger = logging . getLogger ( __name__ ) # Supported 'content' extensions extensions = ( 'rst' , 'ipynb' , 'png' , 'jpeg' , 'jpg' , 'svg' , 'gif' ) content_paths = [ ] for extname in extensions : content_paths += get_filepaths_with_extension ( extname , root_dir = root_dir ) # Known files that should be excluded; lower case for comparison exclude = Matcher ( exclusions if exclusions else [ 'readme.rst' , 'license.rst' ] ) # filter out excluded files content_paths = [ p for p in content_paths if not ( exclude ( p ) or exclude ( p . split ( os . path . sep ) [ 0 ] ) ) ] logger . debug ( 'Found content paths: {}' . format ( ', ' . join ( content_paths ) ) ) if not content_paths : raise RuntimeError ( 'No content files found in {}' . format ( root_dir ) ) commit_datetimes = [ ] for filepath in content_paths : try : datetime = read_git_commit_timestamp_for_file ( filepath , repo_path = root_dir ) commit_datetimes . append ( datetime ) except IOError : logger . warning ( 'Could not get commit for {}, skipping' . format ( filepath ) ) if not commit_datetimes : raise RuntimeError ( 'No content commits could be found' ) latest_datetime = max ( commit_datetimes ) return latest_datetime
11,630
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L119-L192
[ "def", "delete_group", "(", "self", ",", "group_id", ",", "force", "=", "False", ")", ":", "params", "=", "{", "'force'", ":", "force", "}", "response", "=", "self", ".", "_do_request", "(", "'DELETE'", ",", "'/v2/groups/{group_id}'", ".", "format", "(", "group_id", "=", "group_id", ")", ",", "params", "=", "params", ")", "return", "response", ".", "json", "(", ")" ]
Form the LSST the Docs edition name for this branch using the same logic as LTD Keeper does for transforming branch names into edition names .
def form_ltd_edition_name ( git_ref_name = None ) : if git_ref_name is None : name = read_git_branch ( ) else : name = git_ref_name # First, try to use the JIRA ticket number m = TICKET_BRANCH_PATTERN . match ( name ) if m is not None : return m . group ( 1 ) # Or use a tagged version m = TAG_PATTERN . match ( name ) if m is not None : return name if name == 'master' : # using this terminology for LTD Dasher name = 'Current' # Otherwise, reproduce the LTD slug name = name . replace ( '/' , '-' ) name = name . replace ( '_' , '-' ) name = name . replace ( '.' , '-' ) return name
11,631
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L195-L235
[ "def", "imsave", "(", "filename", ",", "data", ",", "maxval", "=", "None", ",", "pam", "=", "False", ")", ":", "try", ":", "netpbm", "=", "NetpbmFile", "(", "data", ",", "maxval", "=", "maxval", ")", "netpbm", ".", "write", "(", "filename", ",", "pam", "=", "pam", ")", "finally", ":", "netpbm", ".", "close", "(", ")" ]
Iterates over the worksheets in the book and sets the active worksheet as the current one before yielding .
def itersheets ( self ) : for ws in self . worksheets : # Expression with no explicit table specified will use None # when calling get_table, which should return the current worksheet/table prev_ws = self . active_worksheet self . active_worksheet = ws try : yield ws finally : self . active_worksheet = prev_ws
11,632
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L47-L60
[ "def", "_strip_ctype", "(", "name", ",", "ctype", ",", "protocol", "=", "2", ")", ":", "# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')", "try", ":", "name", ",", "ctypestr", "=", "name", ".", "rsplit", "(", "','", ",", "1", ")", "except", "ValueError", ":", "pass", "else", ":", "ctype", "=", "Nds2ChannelType", ".", "find", "(", "ctypestr", ")", ".", "value", "# NDS1 stores channels with trend suffix, so we put it back:", "if", "protocol", "==", "1", "and", "ctype", "in", "(", "Nds2ChannelType", ".", "STREND", ".", "value", ",", "Nds2ChannelType", ".", "MTREND", ".", "value", ")", ":", "name", "+=", "',{0}'", ".", "format", "(", "ctypestr", ")", "return", "name", ",", "ctype" ]
Write workbook to a . xlsx file using xlsxwriter . Return a xlsxwriter . workbook . Workbook .
def to_xlsx ( self , * * kwargs ) : from xlsxwriter . workbook import Workbook as _Workbook self . workbook_obj = _Workbook ( * * kwargs ) self . workbook_obj . set_calc_mode ( self . calc_mode ) for worksheet in self . itersheets ( ) : worksheet . to_xlsx ( workbook = self ) self . workbook_obj . filename = self . filename if self . filename : self . workbook_obj . close ( ) return self . workbook_obj
11,633
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L62-L80
[ "def", "get_possible_app_ids", "(", ")", ":", "try", ":", "req", "=", "requests", ".", "get", "(", "\"https://clients3.google.com/cast/chromecast/device/baseconfig\"", ")", "data", "=", "json", ".", "loads", "(", "req", ".", "text", "[", "4", ":", "]", ")", "return", "[", "app", "[", "'app_id'", "]", "for", "app", "in", "data", "[", "'applications'", "]", "]", "+", "data", "[", "\"enabled_app_ids\"", "]", "except", "ValueError", ":", "# If json fails to parse", "return", "[", "]" ]
Return a table worksheet pair for the named table
def get_table ( self , name ) : if name is None : assert self . active_table , "Can't get table without name unless an active table is set" name = self . active_table . name if self . active_worksheet : table = self . active_worksheet . get_table ( name ) assert table is self . active_table , "Active table is not from the active sheet" return table , self . active_worksheet for ws in self . worksheets : try : table = ws . get_table ( name ) if table is self . active_table : return table , ws except KeyError : pass raise RuntimeError ( "Active table not found in any sheet" ) # if the tablename explicitly uses the sheetname find the right sheet if "!" in name : ws_name , table_name = map ( lambda x : x . strip ( "'" ) , name . split ( "!" , 1 ) ) for ws in self . worksheets : if ws . name == ws_name : table = ws . get_table ( table_name ) return table , ws raise KeyError ( name ) # otherwise look in the current table if self . active_worksheet : table = self . active_worksheet . get_table ( name ) return table , self . active_worksheet # or fallback to the first matching name in any table for ws in self . worksheets : try : table = ws . get_table ( name ) return table , ws except KeyError : pass raise KeyError ( name )
11,634
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L141-L186
[ "def", "beacon", "(", "config", ")", ":", "parts", "=", "psutil", ".", "disk_partitions", "(", "all", "=", "True", ")", "ret", "=", "[", "]", "for", "mounts", "in", "config", ":", "mount", "=", "next", "(", "iter", "(", "mounts", ")", ")", "# Because we're using regular expressions", "# if our mount doesn't end with a $, insert one.", "mount_re", "=", "mount", "if", "not", "mount", ".", "endswith", "(", "'$'", ")", ":", "mount_re", "=", "'{0}$'", ".", "format", "(", "mount", ")", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "# mount_re comes in formatted with a $ at the end", "# can be `C:\\\\$` or `C:\\\\\\\\$`", "# re string must be like `C:\\\\\\\\` regardless of \\\\ or \\\\\\\\", "# also, psutil returns uppercase", "mount_re", "=", "re", ".", "sub", "(", "r':\\\\\\$'", ",", "r':\\\\\\\\'", ",", "mount_re", ")", "mount_re", "=", "re", ".", "sub", "(", "r':\\\\\\\\\\$'", ",", "r':\\\\\\\\'", ",", "mount_re", ")", "mount_re", "=", "mount_re", ".", "upper", "(", ")", "for", "part", "in", "parts", ":", "if", "re", ".", "match", "(", "mount_re", ",", "part", ".", "mountpoint", ")", ":", "_mount", "=", "part", ".", "mountpoint", "try", ":", "_current_usage", "=", "psutil", ".", "disk_usage", "(", "_mount", ")", "except", "OSError", ":", "log", ".", "warning", "(", "'%s is not a valid mount point.'", ",", "_mount", ")", "continue", "current_usage", "=", "_current_usage", ".", "percent", "monitor_usage", "=", "mounts", "[", "mount", "]", "if", "'%'", "in", "monitor_usage", ":", "monitor_usage", "=", "re", ".", "sub", "(", "'%'", ",", "''", ",", "monitor_usage", ")", "monitor_usage", "=", "float", "(", "monitor_usage", ")", "if", "current_usage", ">=", "monitor_usage", ":", "ret", ".", "append", "(", "{", "'diskusage'", ":", "current_usage", ",", "'mount'", ":", "_mount", "}", ")", "return", "ret" ]
Send a message to the socket
def send_message ( self , output ) : file_system_event = None if self . my_action_input : file_system_event = self . my_action_input . file_system_event or None output_action = ActionInput ( file_system_event , output , self . name , "*" ) Global . MESSAGE_DISPATCHER . send_message ( output_action )
11,635
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L107-L121
[ "def", "generate_citation_counter", "(", "self", ")", ":", "cite_counter", "=", "dict", "(", ")", "filename", "=", "'%s.aux'", "%", "self", ".", "project_name", "with", "open", "(", "filename", ")", "as", "fobj", ":", "main_aux", "=", "fobj", ".", "read", "(", ")", "cite_counter", "[", "filename", "]", "=", "_count_citations", "(", "filename", ")", "for", "match", "in", "re", ".", "finditer", "(", "r'\\\\@input\\{(.*.aux)\\}'", ",", "main_aux", ")", ":", "filename", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "try", ":", "counter", "=", "_count_citations", "(", "filename", ")", "except", "IOError", ":", "pass", "else", ":", "cite_counter", "[", "filename", "]", "=", "counter", "return", "cite_counter" ]
Stop the current action
def stop ( self ) : Global . LOGGER . debug ( f"action {self.name} stopped" ) self . is_running = False self . on_stop ( )
11,636
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L123-L127
[ "def", "close", "(", "self", ")", ":", "if", "not", "(", "yield", "from", "super", "(", ")", ".", "close", "(", ")", ")", ":", "return", "False", "for", "nio", "in", "self", ".", "_nios", ".", "values", "(", ")", ":", "if", "nio", "and", "isinstance", "(", "nio", ",", "NIOUDP", ")", ":", "self", ".", "manager", ".", "port_manager", ".", "release_udp_port", "(", "nio", ".", "lport", ",", "self", ".", "_project", ")", "yield", "from", "self", ".", "_stop_ubridge", "(", ")", "log", ".", "info", "(", "'Cloud \"{name}\" [{id}] has been closed'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ")", ")" ]
Start the action
def run ( self ) : Global . LOGGER . debug ( f"action {self.name} is running" ) for tmp_monitored_input in self . monitored_input : sender = "*" + tmp_monitored_input + "*" Global . LOGGER . debug ( f"action {self.name} is monitoring {sender}" ) while self . is_running : try : time . sleep ( Global . CONFIG_MANAGER . sleep_interval ) self . on_cycle ( ) except Exception as exc : Global . LOGGER . error ( f"error while running the action {self.name}: {str(exc)}" )
11,637
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L129-L145
[ "def", "untrack", "(", "context", ",", "file_names", ")", ":", "context", ".", "obj", ".", "find_repo_type", "(", ")", "for", "fn", "in", "file_names", ":", "if", "context", ".", "obj", ".", "vc_name", "==", "'git'", ":", "context", ".", "obj", ".", "call", "(", "[", "'git'", ",", "'rm'", ",", "'--cached'", ",", "fn", "]", ")", "elif", "context", ".", "obj", ".", "vc_name", "==", "'hg'", ":", "context", ".", "obj", ".", "call", "(", "[", "'hg'", ",", "'forget'", ",", "fn", "]", ")" ]
Factory method to create an instance of an Action from an input code
def create_action_for_code ( cls , action_code , name , configuration , managed_input ) : Global . LOGGER . debug ( f"creating action {name} for code {action_code}" ) Global . LOGGER . debug ( f"configuration length: {len(configuration)}" ) Global . LOGGER . debug ( f"input: {managed_input}" ) # get the actions catalog my_actions_file = Action . search_actions ( ) # load custom actions to find the right one for filename in my_actions_file : module_name = os . path . basename ( os . path . normpath ( filename ) ) [ : - 3 ] # garbage collect all the modules you load if they are not necessary context = { } Action . load_module ( module_name , filename ) for subclass in Action . __subclasses__ ( ) : if subclass . type == action_code : action_class = subclass action = action_class ( name , configuration , managed_input ) return action subclass = None gc . collect ( )
11,638
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L214-L238
[ "def", "read_footer", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "file_obj", ":", "if", "not", "_check_header_magic_bytes", "(", "file_obj", ")", "or", "not", "_check_footer_magic_bytes", "(", "file_obj", ")", ":", "raise", "ParquetFormatException", "(", "\"{0} is not a valid parquet file \"", "\"(missing magic bytes)\"", ".", "format", "(", "filename", ")", ")", "return", "_read_footer", "(", "file_obj", ")" ]
Extracts a LinkableClass from a jar .
def extract_class ( jar , name ) : with jar . open ( name ) as entry : return LinkableClass ( javatools . unpack_class ( entry ) )
11,639
https://github.com/bluekeyes/sphinx-javalink/blob/490e37506efa53e95ad88a665e347536e75b6254/javalink/loader.py#L10-L22
[ "def", "_create_storage_directories", "(", ")", ":", "# Create configuration directory", "if", "not", "os", ".", "path", ".", "exists", "(", "common", ".", "CONFIG_DIR", ")", ":", "os", ".", "makedirs", "(", "common", ".", "CONFIG_DIR", ")", "# Create data directory (for log file)", "if", "not", "os", ".", "path", ".", "exists", "(", "common", ".", "DATA_DIR", ")", ":", "os", ".", "makedirs", "(", "common", ".", "DATA_DIR", ")", "# Create run directory (for lock file)", "if", "not", "os", ".", "path", ".", "exists", "(", "common", ".", "RUN_DIR", ")", ":", "os", ".", "makedirs", "(", "common", ".", "RUN_DIR", ")" ]
Format a section node containg a summary of a Task class s key APIs .
def _format_summary_node ( self , task_class ) : modulename = task_class . __module__ classname = task_class . __name__ nodes = [ ] nodes . append ( self . _format_class_nodes ( task_class ) ) nodes . append ( self . _format_config_nodes ( modulename , classname ) ) methods = ( 'run' , 'runDataRef' ) for method in methods : if hasattr ( task_class , method ) : method_obj = getattr ( task_class , method ) nodes . append ( self . _format_method_nodes ( method_obj , modulename , classname ) ) return nodes
11,640
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L62-L84
[ "def", "match", "(", "self", ",", "filename", ",", "line", ",", "codes", ")", ":", "if", "self", ".", "regex_match_any", "(", "line", ",", "codes", ")", ":", "if", "self", ".", "_vary_codes", ":", "self", ".", "codes", "=", "tuple", "(", "[", "codes", "[", "-", "1", "]", "]", ")", "return", "True" ]
Create a desc node summarizing the class docstring .
def _format_class_nodes ( self , task_class ) : # Patterned after PyObject.handle_signature in Sphinx. # https://github.com/sphinx-doc/sphinx/blob/3e57ea0a5253ac198c1bff16c40abe71951bb586/sphinx/domains/python.py#L246 modulename = task_class . __module__ classname = task_class . __name__ fullname = '.' . join ( ( modulename , classname ) ) # The signature term signature = Signature ( task_class , bound_method = False ) desc_sig_node = self . _format_signature ( signature , modulename , classname , fullname , 'py:class' ) # The content is the one-sentence summary. content_node = desc_content ( ) content_node += self . _create_doc_summary ( task_class , fullname , 'py:class' ) desc_node = desc ( ) desc_node [ 'noindex' ] = True desc_node [ 'domain' ] = 'py' desc_node [ 'objtype' ] = 'class' desc_node += desc_sig_node desc_node += content_node return desc_node
11,641
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L86-L112
[ "def", "_write_vmx_file", "(", "self", ")", ":", "try", ":", "self", ".", "manager", ".", "write_vmx_file", "(", "self", ".", "_vmx_path", ",", "self", ".", "_vmx_pairs", ")", "except", "OSError", "as", "e", ":", "raise", "VMwareError", "(", "'Could not write VMware VMX file \"{}\": {}'", ".", "format", "(", "self", ".", "_vmx_path", ",", "e", ")", ")" ]
Create a desc node summarizing a method docstring .
def _format_method_nodes ( self , task_method , modulename , classname ) : methodname = task_method . __name__ fullname = '.' . join ( ( modulename , classname , methodname ) ) # The signature term signature = Signature ( task_method , bound_method = True ) desc_sig_node = self . _format_signature ( signature , modulename , classname , fullname , 'py:meth' ) # The content is the one-sentence summary. content_node = desc_content ( ) content_node += self . _create_doc_summary ( task_method , fullname , 'py:meth' ) desc_node = desc ( ) desc_node [ 'noindex' ] = True desc_node [ 'domain' ] = 'py' desc_node [ 'objtype' ] = 'method' desc_node += desc_sig_node desc_node += content_node return desc_node
11,642
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L114-L136
[ "def", "ideal_gas", "(", "target", ",", "pressure", "=", "'pore.pressure'", ",", "temperature", "=", "'pore.temperature'", ")", ":", "R", "=", "8.31447", "P", "=", "target", "[", "pressure", "]", "T", "=", "target", "[", "temperature", "]", "value", "=", "P", "/", "(", "R", "*", "T", ")", "return", "value" ]
Create a paragraph containing the object s one - sentence docstring summary with a link to further documentation .
def _create_doc_summary ( self , obj , fullname , refrole ) : summary_text = extract_docstring_summary ( get_docstring ( obj ) ) summary_text = summary_text . strip ( ) # Strip the last "." because the linked ellipses take its place if summary_text . endswith ( '.' ) : summary_text = summary_text . rstrip ( '.' ) content_node_p = nodes . paragraph ( text = summary_text ) content_node_p += self . _create_api_details_link ( fullname , refrole ) return content_node_p
11,643
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L175-L189
[ "def", "_write_value_failed", "(", "self", ",", "dbus_error", ")", ":", "error", "=", "_error_from_dbus_error", "(", "dbus_error", ")", "self", ".", "service", ".", "device", ".", "characteristic_write_value_failed", "(", "characteristic", "=", "self", ",", "error", "=", "error", ")" ]
Appends a link to the API docs labelled as ... that is appended to the content paragraph of an API description .
def _create_api_details_link ( self , fullname , refrole ) : ref_text = '... <{}>' . format ( fullname ) xref = PyXRefRole ( ) xref_nodes , _ = xref ( refrole , ref_text , ref_text , self . lineno , self . state . inliner ) return xref_nodes
11,644
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L191-L204
[ "def", "setGroups", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requests", "=", "0", "groups", "=", "[", "]", "try", ":", "for", "gk", "in", "self", "[", "'groupKeys'", "]", ":", "try", ":", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambugroup", "import", "MambuGroup", "self", ".", "mambugroupclass", "=", "MambuGroup", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "requests", "+=", "1", "groups", ".", "append", "(", "g", ")", "except", "KeyError", ":", "pass", "self", "[", "'groups'", "]", "=", "groups", "return", "requests" ]
Create a desc node summarizing the config attribute
def _format_config_nodes ( self , modulename , classname ) : fullname = '{0}.{1}.config' . format ( modulename , classname ) # The signature term desc_sig_node = desc_signature ( ) desc_sig_node [ 'module' ] = modulename desc_sig_node [ 'class' ] = classname desc_sig_node [ 'fullname' ] = fullname prefix = 'attribute' desc_sig_node += desc_annotation ( prefix , prefix ) desc_sig_name_node = desc_addname ( 'config' , 'config' ) # Fakes the look of a cross reference. desc_sig_name_node [ 'classes' ] . extend ( [ 'xref' , 'py' ] ) desc_sig_node += desc_sig_name_node # The content is the one-sentence summary. summary_text = ( 'Access configuration fields and retargetable subtasks.' ) content_node_p = nodes . paragraph ( text = summary_text ) content_node = desc_content ( ) content_node += content_node_p desc_node = desc ( ) desc_node [ 'noindex' ] = True desc_node [ 'domain' ] = 'py' desc_node [ 'objtype' ] = 'attribute' desc_node += desc_sig_node desc_node += content_node return desc_node
11,645
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L206-L242
[ "def", "serve", "(", "content", ")", ":", "temp_folder", "=", "tempfile", ".", "gettempdir", "(", ")", "temp_file_name", "=", "tempfile", ".", "gettempprefix", "(", ")", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "+", "\".html\"", "# Generate a file path with a random name in temporary dir", "temp_file_path", "=", "os", ".", "path", ".", "join", "(", "temp_folder", ",", "temp_file_name", ")", "# save content to temp file", "save", "(", "temp_file_path", ",", "content", ")", "# Open templfile in a browser", "webbrowser", ".", "open", "(", "\"file://{}\"", ".", "format", "(", "temp_file_path", ")", ")", "# Block the thread while content is served", "try", ":", "while", "True", ":", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "# cleanup the temp file", "os", ".", "remove", "(", "temp_file_path", ")" ]
Generate nodes that show a code sample demonstrating how to import the task class .
def _format_import_example ( self , task_class ) : code = 'from {0.__module__} import {0.__name__}' . format ( task_class ) # This is a bare-bones version of what Sphinx's code-block directive # does. The 'language' attr triggers the pygments treatment. literal_node = nodes . literal_block ( code , code ) literal_node [ 'language' ] = 'py' return [ literal_node ]
11,646
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L244-L265
[ "def", "build_vrt", "(", "source_file", ",", "destination_file", ",", "*", "*", "kwargs", ")", ":", "with", "rasterio", ".", "open", "(", "source_file", ")", "as", "src", ":", "vrt_doc", "=", "boundless_vrt_doc", "(", "src", ",", "*", "*", "kwargs", ")", ".", "tostring", "(", ")", "with", "open", "(", "destination_file", ",", "'wb'", ")", "as", "dst", ":", "dst", ".", "write", "(", "vrt_doc", ")", "return", "destination_file" ]
Format a message referring the reader to the full API docs .
def _format_api_docs_link_message ( self , task_class ) : fullname = '{0.__module__}.{0.__name__}' . format ( task_class ) p_node = nodes . paragraph ( ) _ = 'See the ' p_node += nodes . Text ( _ , _ ) xref = PyXRefRole ( ) xref_nodes , _ = xref ( 'py:class' , '~' + fullname , '~' + fullname , self . lineno , self . state . inliner ) p_node += xref_nodes _ = ' API reference for complete details.' p_node += nodes . Text ( _ , _ ) seealso_node = seealso ( ) seealso_node += p_node return [ seealso_node ]
11,647
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L267-L303
[ "def", "disable_pow_check", "(", "chain_class", ":", "Type", "[", "BaseChain", "]", ")", "->", "Type", "[", "BaseChain", "]", ":", "if", "not", "chain_class", ".", "vm_configuration", ":", "raise", "ValidationError", "(", "\"Chain class has no vm_configuration\"", ")", "if", "issubclass", "(", "chain_class", ",", "NoChainSealValidationMixin", ")", ":", "# Seal validation already disabled, hence nothing to change", "chain_class_without_seal_validation", "=", "chain_class", "else", ":", "chain_class_without_seal_validation", "=", "type", "(", "chain_class", ".", "__name__", ",", "(", "chain_class", ",", "NoChainSealValidationMixin", ")", ",", "{", "}", ",", ")", "return", "chain_class_without_seal_validation", ".", "configure", "(", "# type: ignore", "vm_configuration", "=", "_mix_in_disable_seal_validation", "(", "chain_class_without_seal_validation", ".", "vm_configuration", "# type: ignore", ")", ",", ")" ]
When an exception has occurred write the traceback to the user .
def send_exception ( self ) : self . compiler . reset ( ) exc = traceback . format_exc ( ) self . writer . write ( exc . encode ( 'utf8' ) ) yield from self . writer . drain ( )
11,648
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L66-L73
[ "async", "def", "services", "(", "self", ",", "*", ",", "dc", "=", "None", ",", "watch", "=", "None", ",", "consistency", "=", "None", ")", ":", "params", "=", "{", "\"dc\"", ":", "dc", "}", "response", "=", "await", "self", ".", "_api", ".", "get", "(", "\"/v1/catalog/services\"", ",", "params", "=", "params", ",", "watch", "=", "watch", ",", "consistency", "=", "consistency", ")", "return", "consul", "(", "response", ")" ]
Process a single command . May have many lines .
def handle_one_command ( self ) : while True : yield from self . write_prompt ( ) codeobj = yield from self . read_command ( ) if codeobj is not None : yield from self . run_command ( codeobj )
11,649
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L88-L96
[ "def", "edit", "(", "self", ",", "data_src", ",", "value", ")", ":", "# check if opening file", "if", "'filename'", "in", "value", ":", "items", "=", "[", "k", "for", "k", ",", "v", "in", "self", ".", "reg", ".", "data_source", ".", "iteritems", "(", ")", "if", "v", "==", "data_src", "]", "self", ".", "reg", ".", "unregister", "(", "items", ")", "# remove items from Registry", "# open file and register new data", "self", ".", "open", "(", "data_src", ",", "value", "[", "'filename'", "]", ",", "value", ".", "get", "(", "'path'", ")", ")", "self", ".", "layer", "[", "data_src", "]", ".", "update", "(", "value", ")" ]
Execute a compiled code object and write the output back to the client .
def run_command ( self , codeobj ) : try : value , stdout = yield from self . attempt_exec ( codeobj , self . namespace ) except Exception : yield from self . send_exception ( ) return else : yield from self . send_output ( value , stdout )
11,650
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L99-L107
[ "def", "unmount", "(", "self", ")", ":", "self", ".", "unmount_bindmounts", "(", ")", "self", ".", "unmount_mounts", "(", ")", "self", ".", "unmount_volume_groups", "(", ")", "self", ".", "unmount_loopbacks", "(", ")", "self", ".", "unmount_base_images", "(", ")", "self", ".", "clean_dirs", "(", ")" ]
Read a command from the user line by line .
def read_command ( self ) : reader = self . reader line = yield from reader . readline ( ) if line == b'' : # lost connection raise ConnectionResetError ( ) try : # skip the newline to make CommandCompiler work as advertised codeobj = self . attempt_compile ( line . rstrip ( b'\n' ) ) except SyntaxError : yield from self . send_exception ( ) return return codeobj
11,651
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L121-L140
[ "def", "create_index_if_missing", "(", "self", ",", "index", ",", "settings", "=", "None", ")", ":", "try", ":", "return", "self", ".", "create_index", "(", "index", ",", "settings", ")", "except", "IndexAlreadyExistsException", "as", "e", ":", "return", "e", ".", "result" ]
Write the output or value of the expression back to user .
def send_output ( self , value , stdout ) : writer = self . writer if value is not None : writer . write ( '{!r}\n' . format ( value ) . encode ( 'utf8' ) ) if stdout : writer . write ( stdout . encode ( 'utf8' ) ) yield from writer . drain ( )
11,652
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L143-L160
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "sim", "=", "self", ".", "Similarity", "(", ")", "total", "=", "0.0", "# Calculate similarity ratio for each attribute", "cname", "=", "self", ".", "__class__", ".", "__name__", "for", "aname", ",", "weight", "in", "self", ".", "attributes", ".", "items", "(", ")", ":", "attr1", "=", "getattr", "(", "self", ",", "aname", ",", "None", ")", "attr2", "=", "getattr", "(", "other", ",", "aname", ",", "None", ")", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ")", "# Similarity is ignored if None on both objects", "if", "attr1", "is", "None", "and", "attr2", "is", "None", ":", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "\"attributes are both None\"", ")", "continue", "# Similarity is 0 if either attribute is non-Comparable", "if", "not", "all", "(", "(", "isinstance", "(", "attr1", ",", "Comparable", ")", ",", "isinstance", "(", "attr2", ",", "Comparable", ")", ")", ")", ":", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "\"attributes not Comparable\"", ")", "total", "+=", "weight", "continue", "# Calculate similarity between the attributes", "attr_sim", "=", "(", "attr1", "%", "attr2", ")", "self", ".", "log", "(", "attr1", ",", "attr2", ",", "'%'", ",", "cname", "=", "cname", ",", "aname", "=", "aname", ",", "result", "=", "attr_sim", ")", "# Add the similarity to the total", "sim", "+=", "attr_sim", "*", "weight", "total", "+=", "weight", "# Scale the similarity so the total is 1.0", "if", "total", ":", "sim", "*=", "(", "1.0", "/", "total", ")", "return", "sim" ]
Calls the service method defined with the arguments provided
def call ( self , method , * args ) : try : response = getattr ( self . client . service , method ) ( * args ) except ( URLError , SSLError ) as e : log . exception ( 'Failed to connect to responsys service' ) raise ConnectError ( "Request to service timed out" ) except WebFault as web_fault : fault_name = getattr ( web_fault . fault , 'faultstring' , None ) error = str ( web_fault . fault . detail ) if fault_name == 'TableFault' : raise TableFault ( error ) if fault_name == 'ListFault' : raise ListFault ( error ) if fault_name == 'API_LIMIT_EXCEEDED' : raise ApiLimitError ( error ) if fault_name == 'AccountFault' : raise AccountFault ( error ) raise ServiceError ( web_fault . fault , web_fault . document ) return response
11,653
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L110-L131
[ "def", "get_booking", "(", "request", ")", ":", "booking", "=", "None", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "try", ":", "booking", "=", "Booking", ".", "objects", ".", "get", "(", "user", "=", "request", ".", "user", ",", "booking_status__slug", "=", "'inprogress'", ")", "except", "Booking", ".", "DoesNotExist", ":", "# The user does not have any open bookings", "pass", "else", ":", "session", "=", "Session", ".", "objects", ".", "get", "(", "session_key", "=", "request", ".", "session", ".", "session_key", ")", "try", ":", "booking", "=", "Booking", ".", "objects", ".", "get", "(", "session", "=", "session", ")", "except", "Booking", ".", "DoesNotExist", ":", "# The user does not have any bookings in his session", "pass", "return", "booking" ]
Connects to the Responsys soap service
def connect ( self ) : if self . session and self . session . is_expired : # Close the session to avoid max concurrent session errors self . disconnect ( abandon_session = True ) if not self . session : try : login_result = self . login ( self . username , self . password ) except AccountFault : log . error ( 'Login failed, invalid username or password' ) raise else : self . session = login_result . session_id self . connected = time ( ) return self . connected
11,654
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L133-L155
[ "def", "parse_string", "(", "self", ",", "string", ")", ":", "self", ".", "log", ".", "info", "(", "\"Parsing ASCII data\"", ")", "if", "not", "string", ":", "self", ".", "log", ".", "warning", "(", "\"Empty metadata\"", ")", "return", "lines", "=", "string", ".", "splitlines", "(", ")", "application_data", "=", "[", "]", "application", "=", "lines", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", "self", ".", "log", ".", "debug", "(", "\"Reading meta information for '%s'\"", "%", "application", ")", "for", "line", "in", "lines", ":", "if", "application", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"Reading meta information for '%s'\"", "%", "application", ")", "application", "=", "line", ".", "split", "(", ")", "[", "0", "]", "application_data", ".", "append", "(", "line", ")", "if", "line", ".", "startswith", "(", "application", "+", "b' Linux'", ")", ":", "self", ".", "_record_app_data", "(", "application_data", ")", "application_data", "=", "[", "]", "application", "=", "None" ]
Disconnects from the Responsys soap service
def disconnect ( self , abandon_session = False ) : self . connected = False if ( self . session and self . session . is_expired ) or abandon_session : try : self . logout ( ) except : log . warning ( 'Logout call to responsys failed, session may have not been terminated' , exc_info = True ) del self . session return True
11,655
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L157-L173
[ "def", "get_file", "(", "self", ",", "cache_id_obj", ",", "section", "=", "None", ")", ":", "section", "=", "\"default\"", "if", "section", "is", "None", "else", "section", "if", "\"/\"", "in", "section", ":", "raise", "ValueError", "(", "\"invalid section '{0}'\"", ".", "format", "(", "section", ")", ")", "cache_id", "=", "\"{:08x}\"", ".", "format", "(", "zlib", ".", "crc32", "(", "b\"&\"", ".", "join", "(", "sorted", "(", "[", "str", "(", "k", ")", ".", "encode", "(", "'utf8'", ")", "+", "b\"=\"", "+", "str", "(", "v", ")", ".", "encode", "(", "'utf8'", ")", "for", "k", ",", "v", "in", "cache_id_obj", ".", "items", "(", ")", "]", ")", ")", ")", "&", "0xffffffff", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "_full_base", ",", "os", ".", "path", ".", "join", "(", "section", ",", "os", ".", "path", ".", "join", "(", "\"{0}\"", ".", "format", "(", "cache_id", "[", ":", "2", "]", ")", ",", "\"{0}.tmp\"", ".", "format", "(", "cache_id", "[", "2", ":", "]", ")", ")", ")", ")" ]
Responsys . mergeListMembers call
def merge_list_members ( self , list_ , record_data , merge_rule ) : list_ = list_ . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) merge_rule = merge_rule . get_soap_object ( self . client ) return MergeResult ( self . call ( 'mergeListMembers' , list_ , record_data , merge_rule ) )
11,656
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L205-L218
[ "def", "pressure", "(", "self", ")", ":", "self", ".", "_read_temperature", "(", ")", "# Algorithm from the BME280 driver", "# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c", "adc", "=", "self", ".", "_read24", "(", "_BME280_REGISTER_PRESSUREDATA", ")", "/", "16", "# lowest 4 bits get dropped", "var1", "=", "float", "(", "self", ".", "_t_fine", ")", "/", "2.0", "-", "64000.0", "var2", "=", "var1", "*", "var1", "*", "self", ".", "_pressure_calib", "[", "5", "]", "/", "32768.0", "var2", "=", "var2", "+", "var1", "*", "self", ".", "_pressure_calib", "[", "4", "]", "*", "2.0", "var2", "=", "var2", "/", "4.0", "+", "self", ".", "_pressure_calib", "[", "3", "]", "*", "65536.0", "var3", "=", "self", ".", "_pressure_calib", "[", "2", "]", "*", "var1", "*", "var1", "/", "524288.0", "var1", "=", "(", "var3", "+", "self", ".", "_pressure_calib", "[", "1", "]", "*", "var1", ")", "/", "524288.0", "var1", "=", "(", "1.0", "+", "var1", "/", "32768.0", ")", "*", "self", ".", "_pressure_calib", "[", "0", "]", "if", "var1", "==", "0", ":", "return", "0", "if", "var1", ":", "pressure", "=", "1048576.0", "-", "adc", "pressure", "=", "(", "(", "pressure", "-", "var2", "/", "4096.0", ")", "*", "6250.0", ")", "/", "var1", "var1", "=", "self", ".", "_pressure_calib", "[", "8", "]", "*", "pressure", "*", "pressure", "/", "2147483648.0", "var2", "=", "pressure", "*", "self", ".", "_pressure_calib", "[", "7", "]", "/", "32768.0", "pressure", "=", "pressure", "+", "(", "var1", "+", "var2", "+", "self", ".", "_pressure_calib", "[", "6", "]", ")", "/", "16.0", "pressure", "/=", "100", "if", "pressure", "<", "_BME280_PRESSURE_MIN_HPA", ":", "return", "_BME280_PRESSURE_MIN_HPA", "if", "pressure", ">", "_BME280_PRESSURE_MAX_HPA", ":", "return", "_BME280_PRESSURE_MAX_HPA", "return", "pressure", "else", ":", "return", "_BME280_PRESSURE_MIN_HPA" ]
Responsys . mergeListMembersRIID call
def merge_list_members_RIID ( self , list_ , record_data , merge_rule ) : list_ = list_ . get_soap_object ( self . client ) result = self . call ( 'mergeListMembersRIID' , list_ , record_data , merge_rule ) return RecipientResult ( result . recipientResult )
11,657
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L220-L232
[ "def", "hermetic_environment_as", "(", "*", "*", "kwargs", ")", ":", "old_environment", "=", "os", ".", "environ", ".", "copy", "(", ")", "if", "PY3", "else", "_copy_and_decode_env", "(", "os", ".", "environ", ")", "_purge_env", "(", ")", "try", ":", "with", "environment_as", "(", "*", "*", "kwargs", ")", ":", "yield", "finally", ":", "_purge_env", "(", ")", "_restore_env", "(", "old_environment", ")" ]
Responsys . deleteListMembers call
def delete_list_members ( self , list_ , query_column , ids_to_delete ) : list_ = list_ . get_soap_object ( self . client ) result = self . call ( 'deleteListMembers' , list_ , query_column , ids_to_delete ) if hasattr ( result , '__iter__' ) : return [ DeleteResult ( delete_result ) for delete_result in result ] return [ DeleteResult ( result ) ]
11,658
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L234-L249
[ "def", "pressure", "(", "self", ")", ":", "self", ".", "_read_temperature", "(", ")", "# Algorithm from the BME280 driver", "# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c", "adc", "=", "self", ".", "_read24", "(", "_BME280_REGISTER_PRESSUREDATA", ")", "/", "16", "# lowest 4 bits get dropped", "var1", "=", "float", "(", "self", ".", "_t_fine", ")", "/", "2.0", "-", "64000.0", "var2", "=", "var1", "*", "var1", "*", "self", ".", "_pressure_calib", "[", "5", "]", "/", "32768.0", "var2", "=", "var2", "+", "var1", "*", "self", ".", "_pressure_calib", "[", "4", "]", "*", "2.0", "var2", "=", "var2", "/", "4.0", "+", "self", ".", "_pressure_calib", "[", "3", "]", "*", "65536.0", "var3", "=", "self", ".", "_pressure_calib", "[", "2", "]", "*", "var1", "*", "var1", "/", "524288.0", "var1", "=", "(", "var3", "+", "self", ".", "_pressure_calib", "[", "1", "]", "*", "var1", ")", "/", "524288.0", "var1", "=", "(", "1.0", "+", "var1", "/", "32768.0", ")", "*", "self", ".", "_pressure_calib", "[", "0", "]", "if", "var1", "==", "0", ":", "return", "0", "if", "var1", ":", "pressure", "=", "1048576.0", "-", "adc", "pressure", "=", "(", "(", "pressure", "-", "var2", "/", "4096.0", ")", "*", "6250.0", ")", "/", "var1", "var1", "=", "self", ".", "_pressure_calib", "[", "8", "]", "*", "pressure", "*", "pressure", "/", "2147483648.0", "var2", "=", "pressure", "*", "self", ".", "_pressure_calib", "[", "7", "]", "/", "32768.0", "pressure", "=", "pressure", "+", "(", "var1", "+", "var2", "+", "self", ".", "_pressure_calib", "[", "6", "]", ")", "/", "16.0", "pressure", "/=", "100", "if", "pressure", "<", "_BME280_PRESSURE_MIN_HPA", ":", "return", "_BME280_PRESSURE_MIN_HPA", "if", "pressure", ">", "_BME280_PRESSURE_MAX_HPA", ":", "return", "_BME280_PRESSURE_MAX_HPA", "return", "pressure", "else", ":", "return", "_BME280_PRESSURE_MIN_HPA" ]
Responsys . retrieveListMembers call
def retrieve_list_members ( self , list_ , query_column , field_list , ids_to_retrieve ) : list_ = list_ . get_soap_object ( self . client ) result = self . call ( 'retrieveListMembers' , list_ , query_column , field_list , ids_to_retrieve ) return RecordData . from_soap_type ( result . recordData )
11,659
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L251-L265
[ "def", "swo_set_emu_buffer_size", "(", "self", ",", "buf_size", ")", ":", "buf", "=", "ctypes", ".", "c_uint32", "(", "buf_size", ")", "res", "=", "self", ".", "_dll", ".", "JLINKARM_SWO_Control", "(", "enums", ".", "JLinkSWOCommands", ".", "SET_BUFFERSIZE_EMU", ",", "ctypes", ".", "byref", "(", "buf", ")", ")", "if", "res", "<", "0", ":", "raise", "errors", ".", "JLinkException", "(", "res", ")", "return", "None" ]
Responsys . createTable call
def create_table ( self , table , fields ) : table = table . get_soap_object ( self . client ) return self . call ( 'createTable' , table , fields )
11,660
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L268-L278
[ "def", "get_license_assignment_manager", "(", "service_instance", ")", ":", "log", ".", "debug", "(", "'Retrieving license assignment manager'", ")", "try", ":", "lic_assignment_manager", "=", "service_instance", ".", "content", ".", "licenseManager", ".", "licenseAssignmentManager", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "if", "not", "lic_assignment_manager", ":", "raise", "salt", ".", "exceptions", ".", "VMwareObjectRetrievalError", "(", "'License assignment manager was not retrieved'", ")", "return", "lic_assignment_manager" ]
Responsys . createTableWithPK call
def create_table_with_pk ( self , table , fields , primary_keys ) : table = table . get_soap_object ( self . client ) return self . call ( 'createTableWithPK' , table , fields , primary_keys )
11,661
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L280-L291
[ "def", "remove_all_callbacks", "(", "self", ")", ":", "for", "cb_id", "in", "list", "(", "self", ".", "_next_tick_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_next_tick_callback", "(", "cb_id", ")", "for", "cb_id", "in", "list", "(", "self", ".", "_timeout_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_timeout_callback", "(", "cb_id", ")", "for", "cb_id", "in", "list", "(", "self", ".", "_periodic_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_periodic_callback", "(", "cb_id", ")" ]
Responsys . deleteTable call
def delete_table ( self , table ) : table = table . get_soap_object ( self . client ) return self . call ( 'deleteTable' , table )
11,662
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L293-L302
[ "async", "def", "get_lease_async", "(", "self", ",", "partition_id", ")", ":", "try", ":", "blob", "=", "await", "self", ".", "host", ".", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "get_blob_to_text", ",", "self", ".", "lease_container_name", ",", "partition_id", ")", ")", "lease", "=", "AzureBlobLease", "(", ")", "lease", ".", "with_blob", "(", "blob", ")", "async", "def", "state", "(", ")", ":", "\"\"\"\n Allow lease to curry storage_client to get state\n \"\"\"", "try", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "res", "=", "await", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "get_blob_properties", ",", "self", ".", "lease_container_name", ",", "partition_id", ")", ")", "return", "res", ".", "properties", ".", "lease", ".", "state", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to get lease state %r %r\"", ",", "err", ",", "partition_id", ")", "lease", ".", "state", "=", "state", "return", "lease", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to get lease %r %r\"", ",", "err", ",", "partition_id", ")" ]
Responsys . deleteProfileExtensionRecords call
def delete_profile_extension_members ( self , profile_extension , query_column , ids_to_delete ) : profile_extension = profile_extension . get_soap_object ( self . client ) result = self . call ( 'deleteProfileExtensionMembers' , profile_extension , query_column , ids_to_delete ) if hasattr ( result , '__iter__' ) : return [ DeleteResult ( delete_result ) for delete_result in result ] return [ DeleteResult ( result ) ]
11,663
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L304-L321
[ "def", "_CreateSanitizedDestination", "(", "self", ",", "source_file_entry", ",", "source_path_spec", ",", "source_data_stream_name", ",", "destination_path", ")", ":", "file_system", "=", "source_file_entry", ".", "GetFileSystem", "(", ")", "path", "=", "getattr", "(", "source_path_spec", ",", "'location'", ",", "None", ")", "path_segments", "=", "file_system", ".", "SplitPath", "(", "path", ")", "# Sanitize each path segment.", "for", "index", ",", "path_segment", "in", "enumerate", "(", "path_segments", ")", ":", "path_segments", "[", "index", "]", "=", "''", ".", "join", "(", "[", "character", "if", "character", "not", "in", "self", ".", "_DIRTY_CHARACTERS", "else", "'_'", "for", "character", "in", "path_segment", "]", ")", "target_filename", "=", "path_segments", ".", "pop", "(", ")", "parent_path_spec", "=", "getattr", "(", "source_file_entry", ".", "path_spec", ",", "'parent'", ",", "None", ")", "while", "parent_path_spec", ":", "if", "parent_path_spec", ".", "type_indicator", "==", "(", "dfvfs_definitions", ".", "TYPE_INDICATOR_TSK_PARTITION", ")", ":", "path_segments", ".", "insert", "(", "0", ",", "parent_path_spec", ".", "location", "[", "1", ":", "]", ")", "break", "elif", "parent_path_spec", ".", "type_indicator", "==", "(", "dfvfs_definitions", ".", "TYPE_INDICATOR_VSHADOW", ")", ":", "path_segments", ".", "insert", "(", "0", ",", "parent_path_spec", ".", "location", "[", "1", ":", "]", ")", "parent_path_spec", "=", "getattr", "(", "parent_path_spec", ",", "'parent'", ",", "None", ")", "target_directory", "=", "os", ".", "path", ".", "join", "(", "destination_path", ",", "*", "path_segments", ")", "if", "source_data_stream_name", ":", "target_filename", "=", "'{0:s}_{1:s}'", ".", "format", "(", "target_filename", ",", "source_data_stream_name", ")", "return", "target_directory", ",", "target_filename" ]
Responsys . retrieveProfileExtensionRecords call
def retrieve_profile_extension_records ( self , profile_extension , field_list , ids_to_retrieve , query_column = 'RIID' ) : profile_extension = profile_extension . get_soap_object ( self . client ) return RecordData . from_soap_type ( self . call ( 'retrieveProfileExtensionRecords' , profile_extension , query_column , field_list , ids_to_retrieve ) )
11,664
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L323-L339
[ "def", "install_js", "(", ")", ":", "target_jsdir", "=", "join", "(", "SERVER", ",", "'static'", ",", "'js'", ")", "target_cssdir", "=", "join", "(", "SERVER", ",", "'static'", ",", "'css'", ")", "target_tslibdir", "=", "join", "(", "SERVER", ",", "'static'", ",", "'lib'", ")", "STATIC_ASSETS", "=", "[", "join", "(", "JS", ",", "'bokeh.js'", ")", ",", "join", "(", "JS", ",", "'bokeh.min.js'", ")", ",", "join", "(", "CSS", ",", "'bokeh.css'", ")", ",", "join", "(", "CSS", ",", "'bokeh.min.css'", ")", ",", "]", "if", "not", "all", "(", "exists", "(", "a", ")", "for", "a", "in", "STATIC_ASSETS", ")", ":", "print", "(", "BOKEHJS_INSTALL_FAIL", ")", "sys", ".", "exit", "(", "1", ")", "if", "exists", "(", "target_jsdir", ")", ":", "shutil", ".", "rmtree", "(", "target_jsdir", ")", "shutil", ".", "copytree", "(", "JS", ",", "target_jsdir", ")", "if", "exists", "(", "target_cssdir", ")", ":", "shutil", ".", "rmtree", "(", "target_cssdir", ")", "shutil", ".", "copytree", "(", "CSS", ",", "target_cssdir", ")", "if", "exists", "(", "target_tslibdir", ")", ":", "shutil", ".", "rmtree", "(", "target_tslibdir", ")", "if", "exists", "(", "TSLIB", ")", ":", "# keep in sync with bokehjs/src/compiler/compile.ts", "lib", "=", "{", "\"lib.es5.d.ts\"", ",", "\"lib.dom.d.ts\"", ",", "\"lib.es2015.core.d.ts\"", ",", "\"lib.es2015.promise.d.ts\"", ",", "\"lib.es2015.symbol.d.ts\"", ",", "\"lib.es2015.iterable.d.ts\"", ",", "}", "shutil", ".", "copytree", "(", "TSLIB", ",", "target_tslibdir", ",", "ignore", "=", "lambda", "_", ",", "files", ":", "[", "f", "for", "f", "in", "files", "if", "f", "not", "in", "lib", "]", ")" ]
Responsys . truncateTable call
def truncate_table ( self , table ) : table = table . get_soap_object ( self . client ) return self . call ( 'truncateTable' , table )
11,665
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L341-L350
[ "def", "_ProcessRegistryKeySource", "(", "self", ",", "source", ")", ":", "keys", "=", "source", ".", "base_source", ".", "attributes", ".", "get", "(", "\"keys\"", ",", "[", "]", ")", "if", "not", "keys", ":", "return", "interpolated_paths", "=", "artifact_utils", ".", "InterpolateListKbAttributes", "(", "input_list", "=", "keys", ",", "knowledge_base", "=", "self", ".", "knowledge_base", ",", "ignore_errors", "=", "self", ".", "ignore_interpolation_errors", ")", "glob_expressions", "=", "map", "(", "rdf_paths", ".", "GlobExpression", ",", "interpolated_paths", ")", "patterns", "=", "[", "]", "for", "pattern", "in", "glob_expressions", ":", "patterns", ".", "extend", "(", "pattern", ".", "Interpolate", "(", "knowledge_base", "=", "self", ".", "knowledge_base", ")", ")", "patterns", ".", "sort", "(", "key", "=", "len", ",", "reverse", "=", "True", ")", "file_finder_action", "=", "rdf_file_finder", ".", "FileFinderAction", ".", "Stat", "(", ")", "request", "=", "rdf_file_finder", ".", "FileFinderArgs", "(", "paths", "=", "patterns", ",", "action", "=", "file_finder_action", ",", "follow_links", "=", "True", ",", "pathtype", "=", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", "action", "=", "vfs_file_finder", ".", "RegistryKeyFromClient", "yield", "action", ",", "request" ]
Responsys . deleteTableRecords call
def delete_table_records ( self , table , query_column , ids_to_delete ) : table = table . get_soap_object ( self . client ) result = self . call ( 'deleteTableRecords' , table , query_column , ids_to_delete ) if hasattr ( result , '__iter__' ) : return [ DeleteResult ( delete_result ) for delete_result in result ] return [ DeleteResult ( result ) ]
11,666
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L352-L367
[ "async", "def", "get_lease_async", "(", "self", ",", "partition_id", ")", ":", "try", ":", "blob", "=", "await", "self", ".", "host", ".", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "get_blob_to_text", ",", "self", ".", "lease_container_name", ",", "partition_id", ")", ")", "lease", "=", "AzureBlobLease", "(", ")", "lease", ".", "with_blob", "(", "blob", ")", "async", "def", "state", "(", ")", ":", "\"\"\"\n Allow lease to curry storage_client to get state\n \"\"\"", "try", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "res", "=", "await", "loop", ".", "run_in_executor", "(", "self", ".", "executor", ",", "functools", ".", "partial", "(", "self", ".", "storage_client", ".", "get_blob_properties", ",", "self", ".", "lease_container_name", ",", "partition_id", ")", ")", "return", "res", ".", "properties", ".", "lease", ".", "state", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to get lease state %r %r\"", ",", "err", ",", "partition_id", ")", "lease", ".", "state", "=", "state", "return", "lease", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Failed to get lease %r %r\"", ",", "err", ",", "partition_id", ")" ]
Responsys . mergeTableRecords call
def merge_table_records ( self , table , record_data , match_column_names ) : table = table . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) return MergeResult ( self . call ( 'mergeTableRecords' , table , record_data , match_column_names ) )
11,667
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L369-L382
[ "def", "members", "(", "self", ",", "is_manager", "=", "None", ")", ":", "if", "not", "is_manager", ":", "return", "[", "member", "for", "member", "in", "self", ".", "_json_data", "[", "'members'", "]", "if", "member", "[", "'is_active'", "]", "]", "else", ":", "return", "[", "member", "for", "member", "in", "self", ".", "_json_data", "[", "'members'", "]", "if", "member", ".", "get", "(", "'is_active'", ",", "False", ")", "and", "member", ".", "get", "(", "'is_manager'", ",", "False", ")", "]" ]
Responsys . mergeTableRecordsWithPK call
def merge_table_records_with_pk ( self , table , record_data , insert_on_no_match , update_on_match ) : table = table . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) return MergeResult ( self . call ( 'mergeTableRecordsWithPK' , table , record_data , insert_on_no_match , update_on_match ) )
11,668
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L384-L398
[ "def", "members", "(", "self", ",", "is_manager", "=", "None", ")", ":", "if", "not", "is_manager", ":", "return", "[", "member", "for", "member", "in", "self", ".", "_json_data", "[", "'members'", "]", "if", "member", "[", "'is_active'", "]", "]", "else", ":", "return", "[", "member", "for", "member", "in", "self", ".", "_json_data", "[", "'members'", "]", "if", "member", ".", "get", "(", "'is_active'", ",", "False", ")", "and", "member", ".", "get", "(", "'is_manager'", ",", "False", ")", "]" ]
Responsys . mergeIntoProfileExtension call
def merge_into_profile_extension ( self , profile_extension , record_data , match_column , insert_on_no_match , update_on_match ) : profile_extension = profile_extension . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) results = self . call ( 'mergeIntoProfileExtension' , profile_extension , record_data , match_column , insert_on_no_match , update_on_match ) return [ RecipientResult ( result ) for result in results ]
11,669
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L400-L418
[ "def", "destroy_page", "(", "self", ",", "tab_dict", ")", ":", "# logger.info(\"destroy page %s\" % tab_dict['controller'].model.state.get_path())", "if", "tab_dict", "[", "'source_code_changed_handler_id'", "]", "is", "not", "None", ":", "handler_id", "=", "tab_dict", "[", "'source_code_changed_handler_id'", "]", "if", "tab_dict", "[", "'controller'", "]", ".", "view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "handler_is_connected", "(", "handler_id", ")", ":", "tab_dict", "[", "'controller'", "]", ".", "view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "disconnect", "(", "handler_id", ")", "else", ":", "logger", ".", "warning", "(", "\"Source code changed handler of state {0} was already removed.\"", ".", "format", "(", "tab_dict", "[", "'state_m'", "]", ")", ")", "self", ".", "remove_controller", "(", "tab_dict", "[", "'controller'", "]", ")" ]
Responsys . retrieveTableRecords call
def retrieve_table_records ( self , table , query_column , field_list , ids_to_retrieve ) : table = table . get_soap_object ( self . client ) return RecordData . from_soap_type ( self . call ( 'retrieveTableRecords' , table , query_column , field_list , ids_to_retrieve ) )
11,670
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L420-L434
[ "def", "write_lockfile", "(", "self", ",", "content", ")", ":", "s", "=", "self", ".", "_lockfile_encoder", ".", "encode", "(", "content", ")", "open_kwargs", "=", "{", "\"newline\"", ":", "self", ".", "_lockfile_newlines", ",", "\"encoding\"", ":", "\"utf-8\"", "}", "with", "vistir", ".", "contextmanagers", ".", "atomic_open_for_write", "(", "self", ".", "lockfile_location", ",", "*", "*", "open_kwargs", ")", "as", "f", ":", "f", ".", "write", "(", "s", ")", "# Write newline at end of document. GH-319.", "# Only need '\\n' here; the file object handles the rest.", "if", "not", "s", ".", "endswith", "(", "u\"\\n\"", ")", ":", "f", ".", "write", "(", "u\"\\n\"", ")" ]
Creates a package - list URL and a link base from a docroot element .
def normalize_docroot ( app , root ) : srcdir = app . env . srcdir default_version = app . config . javalink_default_version if isinstance ( root , basestring ) : ( url , base ) = _parse_docroot_str ( srcdir , root ) return { 'root' : url , 'base' : base , 'version' : default_version } else : normalized = { } normalized [ 'root' ] = _parse_docroot_str ( srcdir , root [ 'root' ] ) [ 0 ] if 'base' in root : normalized [ 'base' ] = _parse_docroot_str ( srcdir , root [ 'base' ] ) [ 1 ] else : normalized [ 'base' ] = _parse_docroot_str ( srcdir , root [ 'root' ] ) [ 1 ] if 'version' in root : normalized [ 'version' ] = root [ 'version' ] else : normalized [ 'version' ] = default_version return normalized
11,671
https://github.com/bluekeyes/sphinx-javalink/blob/490e37506efa53e95ad88a665e347536e75b6254/javalink/ref.py#L296-L324
[ "def", "same_types", "(", "self", ",", "index1", ",", "index2", ")", ":", "try", ":", "same", "=", "self", ".", "table", "[", "index1", "]", ".", "type", "==", "self", ".", "table", "[", "index2", "]", ".", "type", "!=", "SharedData", ".", "TYPES", ".", "NO_TYPE", "except", "Exception", ":", "self", ".", "error", "(", ")", "return", "same" ]
Assign pi electron and hydrogens
def assign_valence ( mol ) : for u , v , bond in mol . bonds_iter ( ) : if bond . order == 2 : mol . atom ( u ) . pi = 1 mol . atom ( v ) . pi = 1 if mol . atom ( u ) . symbol == "O" and not mol . atom ( u ) . charge : mol . atom ( v ) . carbonyl_C = 1 if mol . atom ( v ) . symbol == "O" and not mol . atom ( v ) . charge : mol . atom ( u ) . carbonyl_C = 1 elif bond . order == 3 : mol . atom ( u ) . pi = mol . atom ( v ) . pi = 2 max_nbr = { "C" : 4 , "Si" : 4 , "N" : 3 , "P" : 3 , "As" : 3 , "O" : 2 , "S" : 2 , "Se" : 2 , "F" : 1 , "Cl" : 1 , "Br" : 1 , "I" : 1 } for i , nbrs in mol . neighbors_iter ( ) : atom = mol . atom ( i ) if len ( nbrs ) == 2 and all ( bond . order == 2 for bond in nbrs . values ( ) ) : atom . pi = 2 # sp (allene, ketene) if atom . symbol in max_nbr : h_cnt = max_nbr [ atom . symbol ] - len ( nbrs ) - atom . pi + atom . charge if h_cnt > 0 : mol . atom ( i ) . add_hydrogen ( h_cnt ) mol . descriptors . add ( "Valence" )
11,672
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/descriptor.py#L10-L32
[ "def", "read_data", "(", "self", ",", "blocksize", "=", "4096", ")", ":", "frames", "=", "ctypes", ".", "c_uint", "(", "blocksize", "//", "self", ".", "_client_fmt", ".", "mBytesPerFrame", ")", "buf", "=", "ctypes", ".", "create_string_buffer", "(", "blocksize", ")", "buflist", "=", "AudioBufferList", "(", ")", "buflist", ".", "mNumberBuffers", "=", "1", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mNumberChannels", "=", "self", ".", "_client_fmt", ".", "mChannelsPerFrame", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "=", "blocksize", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", "=", "ctypes", ".", "cast", "(", "buf", ",", "ctypes", ".", "c_void_p", ")", "while", "True", ":", "check", "(", "_coreaudio", ".", "ExtAudioFileRead", "(", "self", ".", "_obj", ",", "ctypes", ".", "byref", "(", "frames", ")", ",", "ctypes", ".", "byref", "(", "buflist", ")", ")", ")", "assert", "buflist", ".", "mNumberBuffers", "==", "1", "size", "=", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "if", "not", "size", ":", "break", "data", "=", "ctypes", ".", "cast", "(", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", ")", "blob", "=", "data", "[", ":", "size", "]", "yield", "blob" ]
Assign charges in physiological condition
def assign_charge ( mol , force_recalc = False ) : # TODO: not implemented yet mol . require ( "Aromatic" ) for i , nbrs in mol . neighbors_iter ( ) : atom = mol . atom ( i ) nbrcnt = len ( nbrs ) if atom . symbol == "N" : if not atom . pi : # non-conjugated amines are anion mol . atom ( i ) . charge_phys = 1 elif nbrcnt == 1 and atom . pi == 2 : # amidine, guanidine are conjugated cation ni = list ( nbrs . keys ( ) ) [ 0 ] conj = False sp2n = None for nni , nnb in mol . neighbors ( ni ) . items ( ) : if mol . atom ( nni ) . symbol == "N" and nnb . order == 2 and not mol . atom ( nni ) . aromatic : mol . atom ( nni ) . charge_conj = 1 conj = True elif mol . atom ( nni ) . symbol == "N" and nni != i : sp2n = nni if conj : mol . atom ( i ) . charge_phys = 1 if sp2n is not None : mol . atom ( sp2n ) . charge_conj = 1 elif atom . symbol == "O" and nbrcnt == 1 and atom . pi == 2 : # oxoacid are conjugated anion ni = list ( nbrs . keys ( ) ) [ 0 ] conj = False if mol . atom ( ni ) . symbol == "N" : mol . atom ( i ) . n_oxide = True mol . atom ( ni ) . n_oxide = True for nni , nnb in mol . neighbors ( ni ) . items ( ) : if mol . atom ( nni ) . symbol in ( "O" , "S" ) and nnb . order == 2 and not mol . atom ( ni ) . n_oxide : mol . atom ( nni ) . charge_conj = - 1 conj = True if conj : mol . atom ( i ) . charge_phys = - 1 elif atom . symbol == "S" and nbrcnt == 1 : # thiophenols are anion ni = list ( nbrs . keys ( ) ) [ 0 ] if mol . atom ( ni ) . aromatic : mol . atom ( i ) . charge_phys = - 1 mol . charge_assigned = True mol . descriptors . add ( "Phys_charge" )
11,673
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/descriptor.py#L99-L146
[ "def", "parse", "(", "cls", ",", "ss", ")", ":", "up", "=", "urlparse", "(", "ss", ")", "path", "=", "up", ".", "path", "query", "=", "up", ".", "query", "if", "'?'", "in", "path", ":", "path", ",", "_", "=", "up", ".", "path", ".", "split", "(", "'?'", ")", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "path", "=", "path", "[", "1", ":", "]", "bucket", "=", "path", "options", "=", "parse_qs", "(", "query", ")", "scheme", "=", "up", ".", "scheme", "hosts", "=", "up", ".", "netloc", ".", "split", "(", "','", ")", "return", "cls", "(", "bucket", "=", "bucket", ",", "options", "=", "options", ",", "hosts", "=", "hosts", ",", "scheme", "=", "scheme", ")" ]
Get a type given its importable name .
def get_type ( type_name ) : parts = type_name . split ( '.' ) if len ( parts ) < 2 : raise SphinxError ( 'Type must be fully-qualified, ' 'of the form ``module.MyClass``. Got: {}' . format ( type_name ) ) module_name = "." . join ( parts [ 0 : - 1 ] ) name = parts [ - 1 ] return getattr ( import_module ( module_name ) , name )
11,674
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L35-L56
[ "def", "run_tornado", "(", "self", ",", "args", ")", ":", "server", "=", "self", "import", "tornado", ".", "ioloop", "import", "tornado", ".", "web", "import", "tornado", ".", "websocket", "ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "current", "(", ")", "class", "DevWebSocketHandler", "(", "tornado", ".", "websocket", ".", "WebSocketHandler", ")", ":", "def", "open", "(", "self", ")", ":", "super", "(", "DevWebSocketHandler", ",", "self", ")", ".", "open", "(", ")", "server", ".", "on_open", "(", "self", ")", "def", "on_message", "(", "self", ",", "message", ")", ":", "server", ".", "on_message", "(", "self", ",", "message", ")", "def", "on_close", "(", "self", ")", ":", "super", "(", "DevWebSocketHandler", ",", "self", ")", ".", "on_close", "(", ")", "server", ".", "on_close", "(", "self", ")", "class", "MainHandler", "(", "tornado", ".", "web", ".", "RequestHandler", ")", ":", "def", "get", "(", "self", ")", ":", "self", ".", "write", "(", "server", ".", "index_page", ")", "#: Set the call later method", "server", ".", "call_later", "=", "ioloop", ".", "call_later", "server", ".", "add_callback", "=", "ioloop", ".", "add_callback", "app", "=", "tornado", ".", "web", ".", "Application", "(", "[", "(", "r\"/\"", ",", "MainHandler", ")", ",", "(", "r\"/dev\"", ",", "DevWebSocketHandler", ")", ",", "]", ")", "app", ".", "listen", "(", "self", ".", "port", ")", "print", "(", "\"Tornado Dev server started on {}\"", ".", "format", "(", "self", ".", "port", ")", ")", "ioloop", ".", "start", "(", ")" ]
Get all configuration Fields from a Config class .
def get_task_config_fields ( config_class ) : from lsst . pex . config import Field def is_config_field ( obj ) : return isinstance ( obj , Field ) return _get_alphabetical_members ( config_class , is_config_field )
11,675
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L59-L79
[ "def", "hotstart", "(", "self", ")", ":", "if", "self", ".", "write_hotstart", ":", "hotstart_time_str", "=", "self", ".", "event_manager", ".", "simulation_end", ".", "strftime", "(", "\"%Y%m%d_%H%M\"", ")", "try", ":", "os", ".", "mkdir", "(", "'hotstart'", ")", "except", "OSError", ":", "pass", "ov_hotstart_path", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "'hotstart'", ",", "'{0}_ov_hotstart_{1}.ovh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "self", ".", "_update_card", "(", "\"WRITE_OV_HOTSTART\"", ",", "ov_hotstart_path", ",", "True", ")", "chan_hotstart_path", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "'hotstart'", ",", "'{0}_chan_hotstart_{1}'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "self", ".", "_update_card", "(", "\"WRITE_CHAN_HOTSTART\"", ",", "chan_hotstart_path", ",", "True", ")", "sm_hotstart_path", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "'hotstart'", ",", "'{0}_sm_hotstart_{1}.smh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "self", ".", "_update_card", "(", "\"WRITE_SM_HOTSTART\"", ",", "sm_hotstart_path", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"WRITE_OV_HOTSTART\"", ")", "self", ".", "_delete_card", "(", "\"WRITE_CHAN_HOTSTART\"", ")", "self", ".", "_delete_card", "(", "\"WRITE_SM_HOTSTART\"", ")", "if", "self", ".", "read_hotstart", ":", "hotstart_time_str", "=", "self", ".", "event_manager", ".", "simulation_start", ".", "strftime", "(", "\"%Y%m%d_%H%M\"", ")", "# OVERLAND", "expected_ov_hotstart", "=", "os", ".", "path", ".", "join", "(", "'hotstart'", ",", "'{0}_ov_hotstart_{1}.ovh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "expected_ov_hotstart", ")", ":", "self", ".", "_update_card", "(", "\"READ_OV_HOTSTART\"", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "expected_ov_hotstart", ")", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"READ_OV_HOTSTART\"", ")", "log", ".", "warning", "(", "\"READ_OV_HOTSTART not included as \"", "\"{0} does not exist ...\"", ".", "format", "(", "expected_ov_hotstart", ")", ")", "# CHANNEL", "expected_chan_hotstart", "=", "os", ".", "path", ".", "join", "(", "'hotstart'", ",", "'{0}_chan_hotstart_{1}'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "\"{0}.qht\"", ".", "format", "(", "expected_chan_hotstart", ")", ")", "and", "os", ".", "path", ".", "exists", "(", "\"{0}.dht\"", ".", "format", "(", "expected_chan_hotstart", ")", ")", ":", "self", ".", "_update_card", "(", "\"READ_CHAN_HOTSTART\"", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "expected_chan_hotstart", ")", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"READ_CHAN_HOTSTART\"", ")", "log", ".", "warning", "(", "\"READ_CHAN_HOTSTART not included as \"", "\"{0}.qht and/or {0}.dht does not exist ...\"", ".", "format", "(", "expected_chan_hotstart", ")", ")", "# INFILTRATION", "expected_sm_hotstart", "=", "os", ".", "path", ".", "join", "(", "'hotstart'", ",", "'{0}_sm_hotstart_{1}.smh'", ".", "format", "(", "self", ".", "project_manager", ".", "name", ",", "hotstart_time_str", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "expected_sm_hotstart", ")", ":", "self", ".", "_update_card", "(", "\"READ_SM_HOTSTART\"", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "expected_sm_hotstart", ")", ",", "True", ")", "else", ":", "self", ".", "_delete_card", "(", "\"READ_SM_HOTSTART\"", ")", "log", ".", "warning", "(", "\"READ_SM_HOTSTART not included as\"", "\" {0} does not exist ...\"", ".", "format", "(", "expected_sm_hotstart", ")", ")" ]
Get all configurable subtask fields from a Config class .
def get_subtask_fields ( config_class ) : from lsst . pex . config import ConfigurableField , RegistryField def is_subtask_field ( obj ) : return isinstance ( obj , ( ConfigurableField , RegistryField ) ) return _get_alphabetical_members ( config_class , is_subtask_field )
11,676
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L82-L103
[ "def", "_verify", "(", "vm_", ")", ":", "log", ".", "info", "(", "'Verifying credentials for %s'", ",", "vm_", "[", "'name'", "]", ")", "win_installer", "=", "config", ".", "get_cloud_config_value", "(", "'win_installer'", ",", "vm_", ",", "__opts__", ")", "if", "win_installer", ":", "log", ".", "debug", "(", "'Testing Windows authentication method for %s'", ",", "vm_", "[", "'name'", "]", ")", "if", "not", "HAS_IMPACKET", ":", "log", ".", "error", "(", "'Impacket library not found'", ")", "return", "False", "# Test Windows connection", "kwargs", "=", "{", "'host'", ":", "vm_", "[", "'ssh_host'", "]", ",", "'username'", ":", "config", ".", "get_cloud_config_value", "(", "'win_username'", ",", "vm_", ",", "__opts__", ",", "default", "=", "'Administrator'", ")", ",", "'password'", ":", "config", ".", "get_cloud_config_value", "(", "'win_password'", ",", "vm_", ",", "__opts__", ",", "default", "=", "''", ")", "}", "# Test SMB connection", "try", ":", "log", ".", "debug", "(", "'Testing SMB protocol for %s'", ",", "vm_", "[", "'name'", "]", ")", "if", "__utils__", "[", "'smb.get_conn'", "]", "(", "*", "*", "kwargs", ")", "is", "False", ":", "return", "False", "except", "(", "smbSessionError", ",", "smb3SessionError", ")", "as", "exc", ":", "log", ".", "error", "(", "'Exception: %s'", ",", "exc", ")", "return", "False", "# Test WinRM connection", "use_winrm", "=", "config", ".", "get_cloud_config_value", "(", "'use_winrm'", ",", "vm_", ",", "__opts__", ",", "default", "=", "False", ")", "if", "use_winrm", ":", "log", ".", "debug", "(", "'WinRM protocol requested for %s'", ",", "vm_", "[", "'name'", "]", ")", "if", "not", "HAS_WINRM", ":", "log", ".", "error", "(", "'WinRM library not found'", ")", "return", "False", "kwargs", "[", "'port'", "]", "=", "config", ".", "get_cloud_config_value", "(", "'winrm_port'", ",", "vm_", ",", "__opts__", ",", "default", "=", "5986", ")", "kwargs", "[", "'timeout'", "]", "=", "10", "try", ":", "log", ".", "debug", "(", "'Testing WinRM protocol for %s'", ",", "vm_", "[", "'name'", "]", ")", "return", "__utils__", "[", "'cloud.wait_for_winrm'", "]", "(", "*", "*", "kwargs", ")", "is", "not", "None", "except", "(", "ConnectionError", ",", "ConnectTimeout", ",", "ReadTimeout", ",", "SSLError", ",", "ProxyError", ",", "RetryError", ",", "InvalidSchema", ",", "WinRMTransportError", ")", "as", "exc", ":", "log", ".", "error", "(", "'Exception: %s'", ",", "exc", ")", "return", "False", "return", "True", "else", ":", "log", ".", "debug", "(", "'Testing SSH authentication method for %s'", ",", "vm_", "[", "'name'", "]", ")", "# Test SSH connection", "kwargs", "=", "{", "'host'", ":", "vm_", "[", "'ssh_host'", "]", ",", "'port'", ":", "config", ".", "get_cloud_config_value", "(", "'ssh_port'", ",", "vm_", ",", "__opts__", ",", "default", "=", "22", ")", ",", "'username'", ":", "config", ".", "get_cloud_config_value", "(", "'ssh_username'", ",", "vm_", ",", "__opts__", ",", "default", "=", "'root'", ")", ",", "'password'", ":", "config", ".", "get_cloud_config_value", "(", "'password'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", ",", "'key_filename'", ":", "config", ".", "get_cloud_config_value", "(", "'key_filename'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "config", ".", "get_cloud_config_value", "(", "'ssh_keyfile'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "None", ")", ")", ",", "'gateway'", ":", "vm_", ".", "get", "(", "'gateway'", ",", "None", ")", ",", "'maxtries'", ":", "1", "}", "log", ".", "debug", "(", "'Testing SSH protocol for %s'", ",", "vm_", "[", "'name'", "]", ")", "try", ":", "return", "__utils__", "[", "'cloud.wait_for_passwd'", "]", "(", "*", "*", "kwargs", ")", "is", "True", "except", "SaltCloudException", "as", "exc", ":", "log", ".", "error", "(", "'Exception: %s'", ",", "exc", ")", "return", "False" ]
Get members of an object sorted alphabetically .
def _get_alphabetical_members ( obj , predicate ) : fields = dict ( inspect . getmembers ( obj , predicate ) ) keys = list ( fields . keys ( ) ) keys . sort ( ) return { k : fields [ k ] for k in keys }
11,677
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L106-L138
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Make a string for the object s type
def typestring ( obj ) : obj_type = type ( obj ) return '.' . join ( ( obj_type . __module__ , obj_type . __name__ ) )
11,678
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L141-L163
[ "def", "createComboContract", "(", "self", ",", "symbol", ",", "legs", ",", "currency", "=", "\"USD\"", ",", "exchange", "=", "None", ")", ":", "exchange", "=", "legs", "[", "0", "]", ".", "m_exchange", "if", "exchange", "is", "None", "else", "exchange", "contract_tuple", "=", "(", "symbol", ",", "\"BAG\"", ",", "exchange", ",", "currency", ",", "\"\"", ",", "0.0", ",", "\"\"", ")", "contract", "=", "self", ".", "createContract", "(", "contract_tuple", ",", "comboLegs", "=", "legs", ")", "return", "contract" ]
Extract the docstring from an object as individual lines .
def get_docstring ( obj ) : docstring = getdoc ( obj , allow_inherited = True ) if docstring is None : logger = getLogger ( __name__ ) logger . warning ( "Object %s doesn't have a docstring." , obj ) docstring = 'Undocumented' # ignore is simply the number of initial lines to ignore when determining # the docstring's baseline indent level. We really want "1" here. return prepare_docstring ( docstring , ignore = 1 )
11,679
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L166-L193
[ "def", "Find", "(", "cls", ",", "setting_matcher", ",", "port_path", "=", "None", ",", "serial", "=", "None", ",", "timeout_ms", "=", "None", ")", ":", "if", "port_path", ":", "device_matcher", "=", "cls", ".", "PortPathMatcher", "(", "port_path", ")", "usb_info", "=", "port_path", "elif", "serial", ":", "device_matcher", "=", "cls", ".", "SerialMatcher", "(", "serial", ")", "usb_info", "=", "serial", "else", ":", "device_matcher", "=", "None", "usb_info", "=", "'first'", "return", "cls", ".", "FindFirst", "(", "setting_matcher", ",", "device_matcher", ",", "usb_info", "=", "usb_info", ",", "timeout_ms", "=", "timeout_ms", ")" ]
Get the first summary sentence from a docstring .
def extract_docstring_summary ( docstring ) : summary_lines = [ ] for line in docstring : if line == '' : break else : summary_lines . append ( line ) return ' ' . join ( summary_lines )
11,680
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L196-L215
[ "def", "bind_objects", "(", "self", ",", "*", "objects", ")", ":", "self", ".", "control", ".", "bind_keys", "(", "objects", ")", "self", ".", "objects", "+=", "objects" ]
Run loading of movie appearances .
def run ( self ) : # make all requests via a cache instance request_cache = cache . get_request_cache ( ) # DB session to operate in session = client . get_client ( ) . create_session ( ) # clear completion flag for this task self . mark_incomplete ( ) # list of universes seen in character appearances universes = [ ] # don't auto-flush the session for queries, this causes issues with the 'id' field of newly # created MovieAppearance instances with session . no_autoflush : # get all movies movies = session . query ( models . Movie ) . all ( ) # iterate over all movies and build appearance objects for movie in movies : # retrieve movie article, keep main article content only, parse article = request_cache . get ( "http://marvel.wikia.com" + movie . url , xpath = "//article[@id='WikiaMainContent']" , rate_limit = 0.5 ) doc = html . fromstring ( article ) # find heading for appearances, this is a span inside an h2; go to the h2 node = doc . xpath ( "//span[@id='Appearances']" ) [ 0 ] node = node . getparent ( ) # Appearance type is given by <p><b>... some text ...</b></p> tags. Sometimes the first # group of appearances carries no such label, assume it's the featured characters. appearance_type = "Featured Characters" # walk along the tree; character lists are in <ul>s, labels in <p>s; # the next h2 ends the character listing node = node . getnext ( ) while node is not None and node . tag != 'h2' : if node . tag == 'ul' and ( 'characters' in appearance_type . lower ( ) or 'villains' in appearance_type . lower ( ) ) : # starts a new list of stuff; only enter here if the previous label was for characters; # use iter() to iterate over all 'li' items (also those of nested lists) for li in node . iter ( 'li' ) : # inside the list element, find all 'a's; iterate over child nodes, don't use iter(), # since we want don't want to find 'a's of sub-elements in a nested list here for a in li : if a . tag != 'a' : continue # there are 'a's in the list that wrap imags, don't use these; also don't use # links that lead to somewhere else than the wiki if "image" in a . get ( "class" , "" ) or not a . get ( "href" ) . startswith ( "/wiki/" ) : continue match = re . search ( r'\(.*?\)' , a . get ( 'href' ) ) if match : universes . append ( match . group ( ) [ 1 : - 1 ] ) # accept the first matching href, build a new appearance object, then skip to next li try : character = session . query ( models . Character ) . filter ( models . Character . url == a . get ( "href" ) ) . one ( ) # -- start documentation include: many-to-many-generation appearance = models . MovieAppearance ( movie_id = movie . id , character_id = character . id , appearance_type = appearance_type ) session . add ( appearance ) # -- end documentation include: many-to-many-generation except NoResultFound : # none found, ignore pass # break looping over 'a's once we have found one, go to next 'li' break elif node . tag == 'p' : # new character class (or label for locations, items, ...) appearance_type = " " . join ( node . itertext ( ) ) . strip ( ) . strip ( ':' ) . strip ( ) node = node . getnext ( ) print ( "\nNumber of character appearances per universe: " ) print ( pd . Series ( data = universes ) . value_counts ( ) ) # done, save all data, finalize task session . commit ( ) session . close ( ) self . mark_complete ( )
11,681
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/pipeline.py#L635-L733
[ "def", "_close", "(", "self", ")", ":", "self", ".", "_usb_handle", ".", "releaseInterface", "(", ")", "try", ":", "# If we're using PyUSB >= 1.0 we can re-attach the kernel driver here.", "self", ".", "_usb_handle", ".", "dev", ".", "attach_kernel_driver", "(", "0", ")", "except", ":", "pass", "self", ".", "_usb_int", "=", "None", "self", ".", "_usb_handle", "=", "None", "return", "True" ]
Compute and store inflation - adjusted movie budgets
def run ( self ) : self . mark_incomplete ( ) session = client . get_client ( ) . create_session ( ) # load CPI data cpi = ConsumerPriceIndexFile ( ) . load ( ) # max year we have CPI data for max_cpi_year = cpi [ 'Year' ] . max ( ) # extract annual average only, index by year cpi = cpi . set_index ( 'Year' ) [ 'Annual' ] # process all movies for movie in session . query ( models . Movie ) . all ( ) : # we can only compute an inflation-adjusted budget if we know the year and budget if movie . year is not None and movie . budget is not None : if movie . year > max_cpi_year : # if movie is too new, don't inflation-adjust movie . budget_inflation_adjusted = movie . budget else : movie . budget_inflation_adjusted = movie . budget * cpi . loc [ max_cpi_year ] / cpi . loc [ movie . year ] # done, save all data, finalize task session . commit ( ) session . close ( ) self . mark_complete ( )
11,682
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/pipeline.py#L822-L850
[ "def", "debug", "(", "self", ")", ":", "url", "=", "'{}/debug/status'", ".", "format", "(", "self", ".", "url", ")", "data", "=", "self", ".", "_get", "(", "url", ")", "return", "data", ".", "json", "(", ")" ]
Returns the indexes in descending order of the top k score or all scores if k is None
def _argsort ( y_score , k = None ) : ranks = y_score . argsort ( ) argsort = ranks [ : : - 1 ] if k is not None : argsort = argsort [ 0 : k ] return argsort
11,683
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L16-L26
[ "def", "add_json_mask", "(", "self", ",", "start", ",", "method_str", ",", "json_producer", ")", ":", "def", "send_json", "(", "drh", ",", "rem_path", ")", ":", "obj", "=", "json_producer", "(", "drh", ",", "rem_path", ")", "if", "not", "isinstance", "(", "obj", ",", "Response", ")", ":", "obj", "=", "Response", "(", "obj", ")", "ctype", "=", "obj", ".", "get_ctype", "(", "\"application/json\"", ")", "code", "=", "obj", ".", "code", "obj", "=", "obj", ".", "response", "if", "obj", "is", "None", ":", "drh", ".", "send_error", "(", "404", ",", "\"File not found\"", ")", "return", "None", "f", "=", "BytesIO", "(", ")", "json_str", "=", "json_dumps", "(", "obj", ")", "if", "isinstance", "(", "json_str", ",", "(", "str", ",", "unicode", ")", ")", ":", "try", ":", "json_str", "=", "json_str", ".", "decode", "(", "'utf8'", ")", "except", "AttributeError", ":", "pass", "json_str", "=", "json_str", ".", "encode", "(", "'utf8'", ")", "f", ".", "write", "(", "json_str", ")", "f", ".", "flush", "(", ")", "size", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ")", "# handle ETag caching", "if", "drh", ".", "request_version", ">=", "\"HTTP/1.1\"", ":", "e_tag", "=", "\"{0:x}\"", ".", "format", "(", "zlib", ".", "crc32", "(", "f", ".", "read", "(", ")", ")", "&", "0xFFFFFFFF", ")", "f", ".", "seek", "(", "0", ")", "match", "=", "_getheader", "(", "drh", ".", "headers", ",", "'if-none-match'", ")", "if", "match", "is", "not", "None", ":", "if", "drh", ".", "check_cache", "(", "e_tag", ",", "match", ")", ":", "f", ".", "close", "(", ")", "return", "None", "drh", ".", "send_header", "(", "\"ETag\"", ",", "e_tag", ",", "end_header", "=", "True", ")", "drh", ".", "send_header", "(", "\"Cache-Control\"", ",", "\"max-age={0}\"", ".", "format", "(", "self", ".", "max_age", ")", ",", "end_header", "=", "True", ")", "drh", ".", "send_response", "(", "code", ")", "drh", ".", "send_header", "(", "\"Content-Type\"", ",", "ctype", ")", "drh", ".", "send_header", "(", "\"Content-Length\"", ",", "size", ")", "drh", ".", "end_headers", "(", ")", "return", "f", "self", ".", "_add_file_mask", "(", "start", ",", "method_str", ",", "send_json", ")" ]
Counts the number of examples . If countna is False then only count labeled examples i . e . those with y_true not NaN
def count ( y_true , y_score = None , countna = False ) : if not countna : return ( ~ np . isnan ( to_float ( y_true ) ) ) . sum ( ) else : return len ( y_true )
11,684
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L40-L48
[ "def", "run", "(", "self", ",", "*", "*", "import_params", ")", ":", "import_params", "[", "\"url\"", "]", "=", "self", ".", "url", "import_params", "[", "\"interval\"", "]", "=", "self", ".", "interval", "if", "\"connection\"", "in", "import_params", ":", "self", ".", "fields", ".", "append", "(", "\"connector\"", ")", "import_params", "[", "\"connection\"", "]", "[", "\"interval\"", "]", "=", "self", ".", "interval", "self", ".", "update_from_dict", "(", "import_params", "[", "\"connection\"", "]", ")", "self", ".", "save", "(", "force_create", "=", "True", ")", "else", ":", "return", "super", "(", "SyncTableJob", ",", "self", ")", ".", "run", "(", "params", "=", "import_params", ")" ]
Returns series whose i - th entry is the number of examples in the top i
def count_series ( y_true , y_score , countna = False ) : y_true , y_score = to_float ( y_true , y_score ) top = _argsort ( y_score ) if not countna : a = ( ~ np . isnan ( y_true [ top ] ) ) . cumsum ( ) else : a = range ( 1 , len ( y_true ) + 1 ) return pd . Series ( a , index = range ( 1 , len ( a ) + 1 ) )
11,685
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L51-L63
[ "def", "add_text", "(", "self", ",", "coords", ",", "text", ",", "color", "=", "(", "0", ",", "0", ",", "0", ")", ")", ":", "source", "=", "vtk", ".", "vtkVectorText", "(", ")", "source", ".", "SetText", "(", "text", ")", "mapper", "=", "vtk", ".", "vtkPolyDataMapper", "(", ")", "mapper", ".", "SetInputConnection", "(", "source", ".", "GetOutputPort", "(", ")", ")", "follower", "=", "vtk", ".", "vtkFollower", "(", ")", "follower", ".", "SetMapper", "(", "mapper", ")", "follower", ".", "GetProperty", "(", ")", ".", "SetColor", "(", "color", ")", "follower", ".", "SetPosition", "(", "coords", ")", "follower", ".", "SetScale", "(", "0.5", ")", "self", ".", "ren", ".", "AddActor", "(", "follower", ")", "follower", ".", "SetCamera", "(", "self", ".", "ren", ".", "GetActiveCamera", "(", ")", ")" ]
Number of positive labels divided by number of labels or zero if there are no labels
def baseline ( y_true , y_score = None ) : if len ( y_true ) > 0 : return np . nansum ( y_true ) / count ( y_true , countna = False ) else : return 0.0
11,686
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L66-L74
[ "def", "_ParseLogonApplications", "(", "self", ",", "parser_mediator", ",", "registry_key", ")", ":", "for", "application", "in", "self", ".", "_LOGON_APPLICATIONS", ":", "command_value", "=", "registry_key", ".", "GetValueByName", "(", "application", ")", "if", "not", "command_value", ":", "continue", "values_dict", "=", "{", "'Application'", ":", "application", ",", "'Command'", ":", "command_value", ".", "GetDataAsObject", "(", ")", ",", "'Trigger'", ":", "'Logon'", "}", "event_data", "=", "windows_events", ".", "WindowsRegistryEventData", "(", ")", "event_data", ".", "key_path", "=", "registry_key", ".", "path", "event_data", ".", "offset", "=", "registry_key", ".", "offset", "event_data", ".", "regvalue", "=", "values_dict", "event_data", ".", "source_append", "=", "': Winlogon'", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "registry_key", ".", "last_written_time", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Returns are under the ROC curve
def roc_auc ( y_true , y_score ) : notnull = ~ np . isnan ( y_true ) fpr , tpr , thresholds = sklearn . metrics . roc_curve ( y_true [ notnull ] , y_score [ notnull ] ) return sklearn . metrics . auc ( fpr , tpr )
11,687
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L77-L83
[ "def", "start_server", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Opens a browser-based \"", "\"client that interfaces with the \"", "\"chemical format converter.\"", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Prints all \"", "\"transmitted data streams.\"", ")", "parser", ".", "add_argument", "(", "'--port'", ",", "type", "=", "int", ",", "default", "=", "8000", ",", "help", "=", "\"The port \"", "\"on which to serve the website.\"", ")", "parser", ".", "add_argument", "(", "'--timeout'", ",", "type", "=", "int", ",", "default", "=", "5", ",", "help", "=", "\"The maximum \"", "\"time, in seconds, allowed for a process to run \"", "\"before returning an error.\"", ")", "parser", ".", "add_argument", "(", "'--workers'", ",", "type", "=", "int", ",", "default", "=", "2", ",", "help", "=", "\"The number of \"", "\"worker processes to use with the server.\"", ")", "parser", ".", "add_argument", "(", "'--no-browser'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Disables \"", "\"opening a browser window on startup.\"", ")", "global", "args", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "debug", ":", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "handlers", "=", "[", "(", "r'/'", ",", "IndexHandler", ")", ",", "(", "r'/websocket'", ",", "WebSocket", ")", ",", "(", "r'/static/(.*)'", ",", "tornado", ".", "web", ".", "StaticFileHandler", ",", "{", "'path'", ":", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "}", ")", "]", "application", "=", "tornado", ".", "web", ".", "Application", "(", "handlers", ")", "application", ".", "listen", "(", "args", ".", "port", ")", "if", "not", "args", ".", "no_browser", ":", "webbrowser", ".", "open", "(", "'http://localhost:%d/'", "%", "args", ".", "port", ",", "new", "=", "2", ")", "try", ":", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "sys", ".", "stderr", ".", "write", "(", "\"Received keyboard interrupt. Stopping server.\\n\"", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "stop", "(", ")", "sys", ".", "exit", "(", "1", ")" ]
Returns series of length k whose i - th entry is the recall in the top i
def recall_series ( y_true , y_score , k = None , value = True ) : y_true , y_score = to_float ( y_true , y_score ) top = _argsort ( y_score , k ) if not value : y_true = 1 - y_true a = np . nan_to_num ( y_true [ top ] ) . cumsum ( ) return pd . Series ( a , index = np . arange ( 1 , len ( a ) + 1 ) )
11,688
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L143-L154
[ "def", "generation", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "state", "is", "not", "MemberState", ".", "STABLE", ":", "return", "None", "return", "self", ".", "_generation" ]
Rotate and return an image according to its Exif information .
def autorotate ( image , orientation = None ) : orientation_value = orientation if orientation else image . _getexif ( ) . get ( EXIF_KEYS . get ( 'Orientation' ) ) if orientation_value is None : raise ImDirectException ( "No orientation available in Exif " "tag or given explicitly." ) if orientation_value in ( 1 , 2 ) : i = image elif orientation_value in ( 3 , 4 ) : i = image . transpose ( Image . ROTATE_180 ) elif orientation_value in ( 5 , 6 ) : i = image . transpose ( Image . ROTATE_270 ) elif orientation_value in ( 7 , 8 ) : i = image . transpose ( Image . ROTATE_90 ) else : i = image if orientation_value in ( 2 , 4 , 5 , 7 ) : i = i . transpose ( Image . FLIP_LEFT_RIGHT ) return i
11,689
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L47-L89
[ "def", "_SetHeader", "(", "self", ",", "new_values", ")", ":", "row", "=", "self", ".", "row_class", "(", ")", "row", ".", "row", "=", "0", "for", "v", "in", "new_values", ":", "row", "[", "v", "]", "=", "v", "self", ".", "_table", "[", "0", "]", "=", "row" ]
Opens identifies the given image file and rotates it if it is a JPEG .
def imdirect_open ( fp ) : img = pil_open ( fp , 'r' ) if img . format == 'JPEG' : # Read Exif tag on image. if isinstance ( fp , string_types ) : exif = piexif . load ( text_type_to_use ( fp ) ) else : fp . seek ( 0 ) exif = piexif . load ( fp . read ( ) ) # If orientation field is missing or equal to 1, nothing needs to be done. orientation_value = exif . get ( '0th' , { } ) . get ( piexif . ImageIFD . Orientation ) if orientation_value is None or orientation_value == 1 : return img # Otherwise, rotate the image and update the exif accordingly. img_rot = autorotate ( img ) exif = update_exif_for_rotated_image ( exif ) # Now, lets restore the output image to # PIL.JpegImagePlugin.JpegImageFile class with the correct, # updated Exif information. # Save image as JPEG to get a correct byte representation of # the image and then read it back. with io . BytesIO ( ) as bio : img_rot . save ( bio , format = 'jpeg' , exif = piexif . dump ( exif ) ) bio . seek ( 0 ) img_rot_new = pil_open ( bio , 'r' ) # Since we use a BytesIO we need to avoid the lazy # loading of the PIL image. Therefore, we explicitly # load the data here. img_rot_new . load ( ) img = img_rot_new return img
11,690
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L168-L218
[ "def", "_write_value_failed", "(", "self", ",", "dbus_error", ")", ":", "error", "=", "_error_from_dbus_error", "(", "dbus_error", ")", "self", ".", "service", ".", "device", ".", "characteristic_write_value_failed", "(", "characteristic", "=", "self", ",", "error", "=", "error", ")" ]
Monkey patching PIL . Image . open method
def monkey_patch ( enabled = True ) : if enabled : Image . open = imdirect_open else : Image . open = pil_open
11,691
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L221-L232
[ "def", "_grid_widgets", "(", "self", ")", ":", "scrollbar_column", "=", "0", "if", "self", ".", "__compound", "is", "tk", ".", "LEFT", "else", "2", "self", ".", "listbox", ".", "grid", "(", "row", "=", "0", ",", "column", "=", "1", ",", "sticky", "=", "\"nswe\"", ")", "self", ".", "scrollbar", ".", "grid", "(", "row", "=", "0", ",", "column", "=", "scrollbar_column", ",", "sticky", "=", "\"ns\"", ")" ]
Saves an image using PIL preserving the exif information .
def save_with_exif_info ( img , * args , * * kwargs ) : if 'exif' in kwargs : exif = kwargs . pop ( 'exif' ) else : exif = img . info . get ( 'exif' ) img . save ( * args , exif = exif , * * kwargs )
11,692
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L235-L248
[ "def", "start", "(", "self", ")", ":", "for", "client", "in", "self", ".", "_snippet_clients", ".", "values", "(", ")", ":", "if", "not", "client", ".", "is_alive", ":", "self", ".", "_device", ".", "log", ".", "debug", "(", "'Starting SnippetClient<%s>.'", ",", "client", ".", "package", ")", "client", ".", "start_app_and_connect", "(", ")", "else", ":", "self", ".", "_device", ".", "log", ".", "debug", "(", "'Not startng SnippetClient<%s> because it is already alive.'", ",", "client", ".", "package", ")" ]
Create a resource
def create ( context , resource , * * kwargs ) : data = utils . sanitize_kwargs ( * * kwargs ) uri = '%s/%s' % ( context . dci_cs_api , resource ) r = context . session . post ( uri , timeout = HTTP_TIMEOUT , json = data ) return r
11,693
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L22-L28
[ "def", "_timestamp_regulator", "(", "self", ")", ":", "unified_timestamps", "=", "_PrettyDefaultDict", "(", "list", ")", "staged_files", "=", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", "for", "timestamp_basename", "in", "self", ".", "__timestamps_unregulated", ":", "if", "len", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ">", "1", ":", "# File has been splitted", "timestamp_name", "=", "''", ".", "join", "(", "timestamp_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "staged_splitted_files_of_timestamp", "=", "list", "(", "filter", "(", "lambda", "staged_file", ":", "(", "timestamp_name", "==", "staged_file", "[", ":", "-", "3", "]", "and", "all", "(", "[", "(", "x", "in", "set", "(", "map", "(", "str", ",", "range", "(", "10", ")", ")", ")", ")", "for", "x", "in", "staged_file", "[", "-", "3", ":", "]", "]", ")", ")", ",", "staged_files", ")", ")", "if", "len", "(", "staged_splitted_files_of_timestamp", ")", "==", "0", ":", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "timestamp_basename", ")", "]", "=", "{", "\"reason\"", ":", "\"Missing staged file\"", ",", "\"current_staged_files\"", ":", "staged_files", "}", "continue", "staged_splitted_files_of_timestamp", ".", "sort", "(", ")", "unified_timestamp", "=", "list", "(", ")", "for", "staging_digits", ",", "splitted_file", "in", "enumerate", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ":", "prev_splits_sec", "=", "0", "if", "int", "(", "staging_digits", ")", "!=", "0", ":", "prev_splits_sec", "=", "self", ".", "_get_audio_duration_seconds", "(", "\"{}/staging/{}{:03d}\"", ".", "format", "(", "self", ".", "src_dir", ",", "timestamp_name", ",", "staging_digits", "-", "1", ")", ")", "for", "word_block", "in", "splitted_file", ":", "unified_timestamp", ".", "append", "(", "_WordBlock", "(", "word", "=", "word_block", ".", "word", ",", "start", "=", "round", "(", "word_block", ".", "start", "+", "prev_splits_sec", ",", "2", ")", ",", "end", "=", "round", "(", "word_block", ".", "end", "+", "prev_splits_sec", ",", "2", ")", ")", ")", "unified_timestamps", "[", "str", "(", "timestamp_basename", ")", "]", "+=", "unified_timestamp", "else", ":", "unified_timestamps", "[", "timestamp_basename", "]", "+=", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", "[", "0", "]", "self", ".", "__timestamps", ".", "update", "(", "unified_timestamps", ")", "self", ".", "__timestamps_unregulated", "=", "_PrettyDefaultDict", "(", "list", ")" ]
List a specific resource
def get ( context , resource , * * kwargs ) : uri = '%s/%s/%s' % ( context . dci_cs_api , resource , kwargs . pop ( 'id' ) ) r = context . session . get ( uri , timeout = HTTP_TIMEOUT , params = kwargs ) return r
11,694
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L69-L73
[ "def", "periodic_ping", "(", "self", ")", "->", "None", ":", "if", "self", ".", "is_closing", "(", ")", "and", "self", ".", "ping_callback", "is", "not", "None", ":", "self", ".", "ping_callback", ".", "stop", "(", ")", "return", "# Check for timeout on pong. Make sure that we really have", "# sent a recent ping in case the machine with both server and", "# client has been suspended since the last ping.", "now", "=", "IOLoop", ".", "current", "(", ")", ".", "time", "(", ")", "since_last_pong", "=", "now", "-", "self", ".", "last_pong", "since_last_ping", "=", "now", "-", "self", ".", "last_ping", "assert", "self", ".", "ping_interval", "is", "not", "None", "assert", "self", ".", "ping_timeout", "is", "not", "None", "if", "(", "since_last_ping", "<", "2", "*", "self", ".", "ping_interval", "and", "since_last_pong", ">", "self", ".", "ping_timeout", ")", ":", "self", ".", "close", "(", ")", "return", "self", ".", "write_ping", "(", "b\"\"", ")", "self", ".", "last_ping", "=", "now" ]
Retrieve data field from a resource
def get_data ( context , resource , * * kwargs ) : url_suffix = '' if 'keys' in kwargs and kwargs [ 'keys' ] : url_suffix = '/?keys=%s' % ',' . join ( kwargs . pop ( 'keys' ) ) uri = '%s/%s/%s/data%s' % ( context . dci_cs_api , resource , kwargs . pop ( 'id' ) , url_suffix ) r = context . session . get ( uri , timeout = HTTP_TIMEOUT , params = kwargs ) return r
11,695
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L76-L87
[ "def", "_match_cubes", "(", "ccube_clean", ",", "ccube_dirty", ",", "bexpcube_clean", ",", "bexpcube_dirty", ",", "hpx_order", ")", ":", "if", "hpx_order", "==", "ccube_clean", ".", "hpx", ".", "order", ":", "ccube_clean_at_order", "=", "ccube_clean", "else", ":", "ccube_clean_at_order", "=", "ccube_clean", ".", "ud_grade", "(", "hpx_order", ",", "preserve_counts", "=", "True", ")", "if", "hpx_order", "==", "ccube_dirty", ".", "hpx", ".", "order", ":", "ccube_dirty_at_order", "=", "ccube_dirty", "else", ":", "ccube_dirty_at_order", "=", "ccube_dirty", ".", "ud_grade", "(", "hpx_order", ",", "preserve_counts", "=", "True", ")", "if", "hpx_order", "==", "bexpcube_clean", ".", "hpx", ".", "order", ":", "bexpcube_clean_at_order", "=", "bexpcube_clean", "else", ":", "bexpcube_clean_at_order", "=", "bexpcube_clean", ".", "ud_grade", "(", "hpx_order", ",", "preserve_counts", "=", "True", ")", "if", "hpx_order", "==", "bexpcube_dirty", ".", "hpx", ".", "order", ":", "bexpcube_dirty_at_order", "=", "bexpcube_dirty", "else", ":", "bexpcube_dirty_at_order", "=", "bexpcube_dirty", ".", "ud_grade", "(", "hpx_order", ",", "preserve_counts", "=", "True", ")", "if", "ccube_dirty_at_order", ".", "hpx", ".", "nest", "!=", "ccube_clean", ".", "hpx", ".", "nest", ":", "ccube_dirty_at_order", "=", "ccube_dirty_at_order", ".", "swap_scheme", "(", ")", "if", "bexpcube_clean_at_order", ".", "hpx", ".", "nest", "!=", "ccube_clean", ".", "hpx", ".", "nest", ":", "bexpcube_clean_at_order", "=", "bexpcube_clean_at_order", ".", "swap_scheme", "(", ")", "if", "bexpcube_dirty_at_order", ".", "hpx", ".", "nest", "!=", "ccube_clean", ".", "hpx", ".", "nest", ":", "bexpcube_dirty_at_order", "=", "bexpcube_dirty_at_order", ".", "swap_scheme", "(", ")", "ret_dict", "=", "dict", "(", "ccube_clean", "=", "ccube_clean_at_order", ",", "ccube_dirty", "=", "ccube_dirty_at_order", ",", "bexpcube_clean", "=", "bexpcube_clean_at_order", ",", "bexpcube_dirty", "=", "bexpcube_dirty_at_order", ")", "return", "ret_dict" ]
Update a specific resource
def update ( context , resource , * * kwargs ) : etag = kwargs . pop ( 'etag' ) id = kwargs . pop ( 'id' ) data = utils . sanitize_kwargs ( * * kwargs ) uri = '%s/%s/%s' % ( context . dci_cs_api , resource , id ) r = context . session . put ( uri , timeout = HTTP_TIMEOUT , headers = { 'If-match' : etag } , json = data ) return r
11,696
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L90-L99
[ "def", "periodic_ping", "(", "self", ")", "->", "None", ":", "if", "self", ".", "is_closing", "(", ")", "and", "self", ".", "ping_callback", "is", "not", "None", ":", "self", ".", "ping_callback", ".", "stop", "(", ")", "return", "# Check for timeout on pong. Make sure that we really have", "# sent a recent ping in case the machine with both server and", "# client has been suspended since the last ping.", "now", "=", "IOLoop", ".", "current", "(", ")", ".", "time", "(", ")", "since_last_pong", "=", "now", "-", "self", ".", "last_pong", "since_last_ping", "=", "now", "-", "self", ".", "last_ping", "assert", "self", ".", "ping_interval", "is", "not", "None", "assert", "self", ".", "ping_timeout", "is", "not", "None", "if", "(", "since_last_ping", "<", "2", "*", "self", ".", "ping_interval", "and", "since_last_pong", ">", "self", ".", "ping_timeout", ")", ":", "self", ".", "close", "(", ")", "return", "self", ".", "write_ping", "(", "b\"\"", ")", "self", ".", "last_ping", "=", "now" ]
Delete a specific resource
def delete ( context , resource , id , * * kwargs ) : etag = kwargs . pop ( 'etag' , None ) id = id subresource = kwargs . pop ( 'subresource' , None ) subresource_id = kwargs . pop ( 'subresource_id' , None ) uri = '%s/%s/%s' % ( context . dci_cs_api , resource , id ) if subresource : uri = '%s/%s/%s' % ( uri , subresource , subresource_id ) r = context . session . delete ( uri , timeout = HTTP_TIMEOUT , headers = { 'If-match' : etag } ) return r
11,697
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L102-L116
[ "def", "periodic_ping", "(", "self", ")", "->", "None", ":", "if", "self", ".", "is_closing", "(", ")", "and", "self", ".", "ping_callback", "is", "not", "None", ":", "self", ".", "ping_callback", ".", "stop", "(", ")", "return", "# Check for timeout on pong. Make sure that we really have", "# sent a recent ping in case the machine with both server and", "# client has been suspended since the last ping.", "now", "=", "IOLoop", ".", "current", "(", ")", ".", "time", "(", ")", "since_last_pong", "=", "now", "-", "self", ".", "last_pong", "since_last_ping", "=", "now", "-", "self", ".", "last_ping", "assert", "self", ".", "ping_interval", "is", "not", "None", "assert", "self", ".", "ping_timeout", "is", "not", "None", "if", "(", "since_last_ping", "<", "2", "*", "self", ".", "ping_interval", "and", "since_last_pong", ">", "self", ".", "ping_timeout", ")", ":", "self", ".", "close", "(", ")", "return", "self", ".", "write_ping", "(", "b\"\"", ")", "self", ".", "last_ping", "=", "now" ]
Purge resource type .
def purge ( context , resource , * * kwargs ) : uri = '%s/%s/purge' % ( context . dci_cs_api , resource ) if 'force' in kwargs and kwargs [ 'force' ] : r = context . session . post ( uri , timeout = HTTP_TIMEOUT ) else : r = context . session . get ( uri , timeout = HTTP_TIMEOUT ) return r
11,698
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L119-L126
[ "def", "encrypt", "(", "self", ",", "plaintext", ",", "nonce", ",", "encoder", "=", "encoding", ".", "RawEncoder", ")", ":", "if", "len", "(", "nonce", ")", "!=", "self", ".", "NONCE_SIZE", ":", "raise", "ValueError", "(", "\"The nonce must be exactly %s bytes long\"", "%", "self", ".", "NONCE_SIZE", ")", "ciphertext", "=", "libnacl", ".", "crypto_box_afternm", "(", "plaintext", ",", "nonce", ",", "self", ".", "_shared_key", ",", ")", "encoded_nonce", "=", "encoder", ".", "encode", "(", "nonce", ")", "encoded_ciphertext", "=", "encoder", ".", "encode", "(", "ciphertext", ")", "return", "EncryptedMessage", ".", "_from_parts", "(", "encoded_nonce", ",", "encoded_ciphertext", ",", "encoder", ".", "encode", "(", "nonce", "+", "ciphertext", ")", ",", ")" ]
Parse rST - formatted string content into docutils nodes
def parse_rst_content ( content , state ) : # http://www.sphinx-doc.org/en/master/extdev/markupapi.html # #parsing-directive-content-as-rest container_node = nodes . section ( ) container_node . document = state . document viewlist = ViewList ( ) for i , line in enumerate ( content . splitlines ( ) ) : viewlist . append ( line , source = '' , offset = i ) with switch_source_input ( state , viewlist ) : state . nested_parse ( viewlist , 0 , container_node ) return container_node . children
11,699
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L14-L41
[ "def", "on_end_validation", "(", "self", ",", "event", ")", ":", "self", ".", "Enable", "(", ")", "self", ".", "Show", "(", ")", "self", ".", "magic_gui_frame", ".", "Destroy", "(", ")" ]