query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Get the latest representation of the current model .
def refresh ( self ) : client = self . _get_client ( ) endpoint = self . _endpoint . format ( resource_id = self . resource_id or "" , parent_id = self . parent_id or "" , grandparent_id = self . grandparent_id or "" ) response = client . get_resource ( endpoint ) self . _reset_model ( response )
8,600
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L245-L253
[ "async", "def", "open_session", "(", "self", ",", "request", ":", "BaseRequestWebsocket", ")", "->", "Session", ":", "return", "await", "ensure_coroutine", "(", "self", ".", "session_interface", ".", "open_session", ")", "(", "self", ",", "request", ")" ]
Apply all the changes on the current model .
def commit ( self , if_match = None , wait = True , timeout = None ) : if not self . _changes : LOG . debug ( "No changes available for %s: %s" , self . __class__ . __name__ , self . resource_id ) return LOG . debug ( "Apply all the changes on the current %s: %s" , self . __class__ . __name__ , self . resource_id ) client = self . _get_client ( ) endpoint = self . _endpoint . format ( resource_id = self . resource_id or "" , parent_id = self . parent_id or "" , grandparent_id = self . grandparent_id or "" ) request_body = self . dump ( include_read_only = False ) response = client . update_resource ( endpoint , data = request_body , if_match = if_match ) elapsed_time = 0 while wait : self . refresh ( ) # Update the representation of the current model if self . is_ready ( ) : break elapsed_time += CONFIG . HNV . retry_interval if timeout and elapsed_time > timeout : raise exception . TimeOut ( "The request timed out." ) time . sleep ( CONFIG . HNV . retry_interval ) else : self . _reset_model ( response ) # NOTE(alexcoman): In order to keep backwards compatibility the # `method: commit` will return a reference to itself. # An example for that can be the following use case: # label = client.Model().commit() return self
8,601
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L255-L303
[ "def", "_WaitForStartup", "(", "self", ",", "deadline", ")", ":", "start", "=", "time", ".", "time", "(", ")", "sleep", "=", "0.05", "def", "Elapsed", "(", ")", ":", "return", "time", ".", "time", "(", ")", "-", "start", "while", "True", ":", "try", ":", "response", ",", "_", "=", "self", ".", "_http", ".", "request", "(", "self", ".", "_host", ")", "if", "response", ".", "status", "==", "200", ":", "logging", ".", "info", "(", "'emulator responded after %f seconds'", ",", "Elapsed", "(", ")", ")", "return", "True", "except", "(", "socket", ".", "error", ",", "httplib", ".", "ResponseNotReady", ")", ":", "pass", "if", "Elapsed", "(", ")", ">=", "deadline", ":", "# Out of time; give up.", "return", "False", "else", ":", "time", ".", "sleep", "(", "sleep", ")", "sleep", "*=", "2" ]
Set or update the fields value .
def _set_fields ( self , fields ) : super ( _BaseHNVModel , self ) . _set_fields ( fields ) if not self . resource_ref : endpoint = self . _endpoint . format ( resource_id = self . resource_id , parent_id = self . parent_id , grandparent_id = self . grandparent_id ) self . resource_ref = re . sub ( "(/networking/v[0-9]+)" , "" , endpoint )
8,602
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L322-L329
[ "def", "regularize_hidden", "(", "p0", ",", "P", ",", "reversible", "=", "True", ",", "stationary", "=", "False", ",", "C", "=", "None", ",", "eps", "=", "None", ")", ":", "# input", "n", "=", "P", ".", "shape", "[", "0", "]", "if", "eps", "is", "None", ":", "# default output probability, in order to avoid zero columns", "eps", "=", "0.01", "/", "n", "# REGULARIZE P", "P", "=", "np", ".", "maximum", "(", "P", ",", "eps", ")", "# and renormalize", "P", "/=", "P", ".", "sum", "(", "axis", "=", "1", ")", "[", ":", ",", "None", "]", "# ensure reversibility", "if", "reversible", ":", "P", "=", "_tmatrix_disconnected", ".", "enforce_reversible_on_closed", "(", "P", ")", "# REGULARIZE p0", "if", "stationary", ":", "_tmatrix_disconnected", ".", "stationary_distribution", "(", "P", ",", "C", "=", "C", ")", "else", ":", "p0", "=", "np", ".", "maximum", "(", "p0", ",", "eps", ")", "p0", "/=", "p0", ".", "sum", "(", ")", "return", "p0", ",", "P" ]
Return the associated resource .
def get_resource ( self ) : references = { "resource_id" : None , "parent_id" : None , "grandparent_id" : None } for model_cls , regexp in self . _regexp . iteritems ( ) : match = regexp . search ( self . resource_ref ) if match is not None : references . update ( match . groupdict ( ) ) return model_cls . get ( * * references ) raise exception . NotFound ( "No model available for %(resource_ref)r" , resource_ref = self . resource_ref )
8,603
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L364-L375
[ "def", "density_hub", "(", "self", ",", "weather_df", ")", ":", "if", "self", ".", "density_model", "!=", "'interpolation_extrapolation'", ":", "temperature_hub", "=", "self", ".", "temperature_hub", "(", "weather_df", ")", "# Calculation of density in kg/m³ at hub height", "if", "self", ".", "density_model", "==", "'barometric'", ":", "logging", ".", "debug", "(", "'Calculating density using barometric height '", "'equation.'", ")", "closest_height", "=", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "min", "(", "range", "(", "len", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", ")", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "i", "]", "-", "self", ".", "power_plant", ".", "hub_height", ")", ")", "]", "density_hub", "=", "density", ".", "barometric", "(", "weather_df", "[", "'pressure'", "]", "[", "closest_height", "]", ",", "closest_height", ",", "self", ".", "power_plant", ".", "hub_height", ",", "temperature_hub", ")", "elif", "self", ".", "density_model", "==", "'ideal_gas'", ":", "logging", ".", "debug", "(", "'Calculating density using ideal gas equation.'", ")", "closest_height", "=", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "min", "(", "range", "(", "len", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", ")", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "weather_df", "[", "'pressure'", "]", ".", "columns", "[", "i", "]", "-", "self", ".", "power_plant", ".", "hub_height", ")", ")", "]", "density_hub", "=", "density", ".", "ideal_gas", "(", "weather_df", "[", "'pressure'", "]", "[", "closest_height", "]", ",", "closest_height", ",", "self", ".", "power_plant", ".", "hub_height", ",", "temperature_hub", ")", "elif", "self", ".", "density_model", "==", "'interpolation_extrapolation'", ":", "logging", ".", "debug", "(", "'Calculating density using linear inter- or '", "'extrapolation.'", ")", "density_hub", "=", "tools", ".", "linear_interpolation_extrapolation", "(", "weather_df", "[", "'density'", "]", ",", "self", ".", "power_plant", ".", "hub_height", ")", "else", ":", "raise", "ValueError", "(", "\"'{0}' is an invalid value. \"", ".", "format", "(", "self", ".", "density_model", ")", "+", "\"`density_model` \"", "+", "\"must be 'barometric', 'ideal_gas' or \"", "+", "\"'interpolation_extrapolation'.\"", ")", "return", "density_hub" ]
depending on the number of data points compute a best guess for an optimal number of bins
def _get_nr_bins ( count ) : if count <= 30 : # use the square-root choice, used by Excel and Co k = np . ceil ( np . sqrt ( count ) ) else : # use Sturges' formula k = np . ceil ( np . log2 ( count ) ) + 1 return int ( k )
8,604
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/histograms.py#L14-L26
[ "def", "lock", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "object", "=", "self", ".", "get_object", "(", ")", "success_url", "=", "self", ".", "get_success_url", "(", ")", "self", ".", "object", ".", "status", "=", "Topic", ".", "TOPIC_LOCKED", "self", ".", "object", ".", "save", "(", ")", "messages", ".", "success", "(", "self", ".", "request", ",", "self", ".", "success_message", ")", "return", "HttpResponseRedirect", "(", "success_url", ")" ]
Generate histograms for one or more keys in the given container .
def plot_histograms ( ertobj , keys , * * kwargs ) : # you can either provide a DataFrame or an ERT object if isinstance ( ertobj , pd . DataFrame ) : df = ertobj else : df = ertobj . data if df . shape [ 0 ] == 0 : raise Exception ( 'No data present, cannot plot' ) if isinstance ( keys , str ) : keys = [ keys , ] figures = { } merge_figs = kwargs . get ( 'merge' , True ) if merge_figs : nr_x = 2 nr_y = len ( keys ) size_x = 15 / 2.54 size_y = 5 * nr_y / 2.54 fig , axes_all = plt . subplots ( nr_y , nr_x , figsize = ( size_x , size_y ) ) axes_all = np . atleast_2d ( axes_all ) for row_nr , key in enumerate ( keys ) : print ( 'Generating histogram plot for key: {0}' . format ( key ) ) subdata_raw = df [ key ] . values subdata = subdata_raw [ ~ np . isnan ( subdata_raw ) ] subdata = subdata [ np . isfinite ( subdata ) ] subdata_log10_with_nan = np . log10 ( subdata [ subdata > 0 ] ) subdata_log10 = subdata_log10_with_nan [ ~ np . isnan ( subdata_log10_with_nan ) ] subdata_log10 = subdata_log10 [ np . isfinite ( subdata_log10 ) ] if merge_figs : axes = axes_all [ row_nr ] . squeeze ( ) else : fig , axes = plt . subplots ( 1 , 2 , figsize = ( 10 / 2.54 , 5 / 2.54 ) ) ax = axes [ 0 ] ax . hist ( subdata , _get_nr_bins ( subdata . size ) , ) ax . set_xlabel ( units . get_label ( key ) ) ax . set_ylabel ( 'count' ) ax . xaxis . set_major_locator ( mpl . ticker . MaxNLocator ( 5 ) ) ax . tick_params ( axis = 'both' , which = 'major' , labelsize = 6 ) ax . tick_params ( axis = 'both' , which = 'minor' , labelsize = 6 ) if subdata_log10 . size > 0 : ax = axes [ 1 ] ax . hist ( subdata_log10 , _get_nr_bins ( subdata . size ) , ) ax . set_xlabel ( r'$log_{10}($' + units . get_label ( key ) + ')' ) ax . set_ylabel ( 'count' ) ax . xaxis . set_major_locator ( mpl . ticker . MaxNLocator ( 5 ) ) else : pass # del(axes[1]) fig . tight_layout ( ) if not merge_figs : figures [ key ] = fig if merge_figs : figures [ 'all' ] = fig return figures
8,605
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/histograms.py#L29-L130
[ "def", "poissonVectorRDD", "(", "sc", ",", "mean", ",", "numRows", ",", "numCols", ",", "numPartitions", "=", "None", ",", "seed", "=", "None", ")", ":", "return", "callMLlibFunc", "(", "\"poissonVectorRDD\"", ",", "sc", ".", "_jsc", ",", "float", "(", "mean", ")", ",", "numRows", ",", "numCols", ",", "numPartitions", ",", "seed", ")" ]
Produce histograms grouped by the extra dimensions .
def plot_histograms_extra_dims ( dataobj , keys , * * kwargs ) : if isinstance ( dataobj , pd . DataFrame ) : df_raw = dataobj else : df_raw = dataobj . data if kwargs . get ( 'subquery' , False ) : df = df_raw . query ( kwargs . get ( 'subquery' ) ) else : df = df_raw split_timestamps = True if split_timestamps : group_timestamps = df . groupby ( 'timestep' ) N_ts = len ( group_timestamps . groups . keys ( ) ) else : group_timestamps = ( 'all' , df ) N_ts = 1 columns = keys N_c = len ( columns ) plot_log10 = kwargs . get ( 'log10plot' , False ) if plot_log10 : transformers = [ 'lin' , 'log10' ] N_log10 = 2 else : transformers = [ 'lin' , ] N_log10 = 1 # determine layout of plots Nx_max = kwargs . get ( 'Nx' , 4 ) N = N_ts * N_c * N_log10 Nx = min ( Nx_max , N ) Ny = int ( np . ceil ( N / Nx ) ) size_x = 5 * Nx / 2.54 size_y = 5 * Ny / 2.54 fig , axes = plt . subplots ( Ny , Nx , figsize = ( size_x , size_y ) , sharex = True , sharey = True ) axes = np . atleast_2d ( axes ) index = 0 for ts_name , tgroup in group_timestamps : for column in columns : for transformer in transformers : # print('{0}-{1}-{2}'.format(ts_name, column, transformer)) subdata_raw = tgroup [ column ] . values subdata = subdata_raw [ ~ np . isnan ( subdata_raw ) ] subdata = subdata [ np . isfinite ( subdata ) ] if transformer == 'log10' : subdata_log10_with_nan = np . log10 ( subdata [ subdata > 0 ] ) subdata_log10 = subdata_log10_with_nan [ ~ np . isnan ( subdata_log10_with_nan ) ] subdata_log10 = subdata_log10 [ np . isfinite ( subdata_log10 ) ] subdata = subdata_log10 ax = axes . flat [ index ] ax . hist ( subdata , _get_nr_bins ( subdata . size ) , ) ax . set_xlabel ( units . get_label ( column ) ) ax . set_ylabel ( 'count' ) ax . xaxis . set_major_locator ( mpl . ticker . MaxNLocator ( 3 ) ) ax . tick_params ( axis = 'both' , which = 'major' , labelsize = 6 ) ax . tick_params ( axis = 'both' , which = 'minor' , labelsize = 6 ) ax . set_title ( "timestep: %d" % ts_name ) index += 1 # remove some labels for ax in axes [ : , 1 : ] . flat : ax . set_ylabel ( '' ) for ax in axes [ : - 1 , : ] . flat : ax . set_xlabel ( '' ) fig . tight_layout ( ) return fig
8,606
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/histograms.py#L133-L252
[ "def", "_scrub_generated_timestamps", "(", "self", ",", "target_workdir", ")", ":", "for", "root", ",", "_", ",", "filenames", "in", "safe_walk", "(", "target_workdir", ")", ":", "for", "filename", "in", "filenames", ":", "source", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "with", "open", "(", "source", ",", "'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "if", "len", "(", "lines", ")", "<", "1", ":", "return", "with", "open", "(", "source", ",", "'w'", ")", "as", "f", ":", "if", "not", "self", ".", "_COMMENT_WITH_TIMESTAMP_RE", ".", "match", "(", "lines", "[", "0", "]", ")", ":", "f", ".", "write", "(", "lines", "[", "0", "]", ")", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "f", ".", "write", "(", "line", ")" ]
Extract substring of letters for which predicate is True
def parse_substring ( allele , pred , max_len = None ) : result = "" pos = 0 if max_len is None : max_len = len ( allele ) else : max_len = min ( max_len , len ( allele ) ) while pos < max_len and pred ( allele [ pos ] ) : result += allele [ pos ] pos += 1 return result , allele [ pos : ]
8,607
https://github.com/openvax/mhcnames/blob/71694b9d620db68ceee44da1b8422ff436f15bd3/mhcnames/parsing_helpers.py#L18-L31
[ "def", "xrdb", "(", "xrdb_files", "=", "None", ")", ":", "xrdb_files", "=", "xrdb_files", "or", "[", "os", ".", "path", ".", "join", "(", "CACHE_DIR", ",", "\"colors.Xresources\"", ")", "]", "if", "shutil", ".", "which", "(", "\"xrdb\"", ")", "and", "OS", "!=", "\"Darwin\"", ":", "for", "file", "in", "xrdb_files", ":", "subprocess", ".", "run", "(", "[", "\"xrdb\"", ",", "\"-merge\"", ",", "\"-quiet\"", ",", "file", "]", ")" ]
just pull files from PG
def fetch ( self ) : if not self . local_path : self . make_local_path ( ) fetcher = BookFetcher ( self ) fetcher . fetch ( )
8,608
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/book.py#L174-L180
[ "def", "remove_from_space_size", "(", "self", ",", "removal_bytes", ")", ":", "# type: (int) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'This Volume Descriptor is not yet initialized'", ")", "# The 'removal' parameter is expected to be in bytes, but the space", "# size we track is in extents. Round up to the next extent.", "self", ".", "space_size", "-=", "utils", ".", "ceiling_div", "(", "removal_bytes", ",", "self", ".", "log_block_size", ")" ]
turn fetched files into a local repo make auxiliary files
def make ( self ) : logger . debug ( "preparing to add all git files" ) num_added = self . local_repo . add_all_files ( ) if num_added : self . local_repo . commit ( "Initial import from Project Gutenberg" ) file_handler = NewFilesHandler ( self ) file_handler . add_new_files ( ) num_added = self . local_repo . add_all_files ( ) if num_added : self . local_repo . commit ( "Updates Readme, contributing, license files, cover, metadata." )
8,609
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/book.py#L193-L208
[ "def", "get_severity", "(", "self", ",", "alert", ")", ":", "query", "=", "{", "'environment'", ":", "alert", ".", "environment", ",", "'resource'", ":", "alert", ".", "resource", ",", "'$or'", ":", "[", "{", "'event'", ":", "alert", ".", "event", ",", "'severity'", ":", "{", "'$ne'", ":", "alert", ".", "severity", "}", "}", ",", "{", "'event'", ":", "{", "'$ne'", ":", "alert", ".", "event", "}", ",", "'correlate'", ":", "alert", ".", "event", "}", "]", ",", "'customer'", ":", "alert", ".", "customer", "}", "r", "=", "self", ".", "get_db", "(", ")", ".", "alerts", ".", "find_one", "(", "query", ",", "projection", "=", "{", "'severity'", ":", "1", ",", "'_id'", ":", "0", "}", ")", "return", "r", "[", "'severity'", "]", "if", "r", "else", "None" ]
create a github repo and push the local repo into it
def push ( self ) : self . github_repo . create_and_push ( ) self . _repo = self . github_repo . repo return self . _repo
8,610
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/book.py#L213-L218
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
tag and commit
def tag ( self , version = 'bump' , message = '' ) : self . clone_from_github ( ) self . github_repo . tag ( version , message = message )
8,611
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/book.py#L225-L229
[ "def", "cache_url_config", "(", "cls", ",", "url", ",", "backend", "=", "None", ")", ":", "url", "=", "urlparse", "(", "url", ")", "if", "not", "isinstance", "(", "url", ",", "cls", ".", "URL_CLASS", ")", "else", "url", "location", "=", "url", ".", "netloc", ".", "split", "(", "','", ")", "if", "len", "(", "location", ")", "==", "1", ":", "location", "=", "location", "[", "0", "]", "config", "=", "{", "'BACKEND'", ":", "cls", ".", "CACHE_SCHEMES", "[", "url", ".", "scheme", "]", ",", "'LOCATION'", ":", "location", ",", "}", "# Add the drive to LOCATION", "if", "url", ".", "scheme", "==", "'filecache'", ":", "config", ".", "update", "(", "{", "'LOCATION'", ":", "url", ".", "netloc", "+", "url", ".", "path", ",", "}", ")", "if", "url", ".", "path", "and", "url", ".", "scheme", "in", "[", "'memcache'", ",", "'pymemcache'", "]", ":", "config", ".", "update", "(", "{", "'LOCATION'", ":", "'unix:'", "+", "url", ".", "path", ",", "}", ")", "elif", "url", ".", "scheme", ".", "startswith", "(", "'redis'", ")", ":", "if", "url", ".", "hostname", ":", "scheme", "=", "url", ".", "scheme", ".", "replace", "(", "'cache'", ",", "''", ")", "else", ":", "scheme", "=", "'unix'", "locations", "=", "[", "scheme", "+", "'://'", "+", "loc", "+", "url", ".", "path", "for", "loc", "in", "url", ".", "netloc", ".", "split", "(", "','", ")", "]", "config", "[", "'LOCATION'", "]", "=", "locations", "[", "0", "]", "if", "len", "(", "locations", ")", "==", "1", "else", "locations", "if", "url", ".", "query", ":", "config_options", "=", "{", "}", "for", "k", ",", "v", "in", "parse_qs", "(", "url", ".", "query", ")", ".", "items", "(", ")", ":", "opt", "=", "{", "k", ".", "upper", "(", ")", ":", "_cast", "(", "v", "[", "0", "]", ")", "}", "if", "k", ".", "upper", "(", ")", "in", "cls", ".", "_CACHE_BASE_OPTIONS", ":", "config", ".", "update", "(", "opt", ")", "else", ":", "config_options", ".", "update", "(", "opt", ")", "config", "[", "'OPTIONS'", "]", "=", "config_options", "if", "backend", ":", "config", "[", "'BACKEND'", "]", "=", "backend", "return", "config" ]
Takes a string and sanitizes it for Github s url name format
def format_title ( self ) : def asciify ( _title ) : _title = unicodedata . normalize ( 'NFD' , unicode ( _title ) ) ascii = True out = [ ] ok = u"1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM- '," for ch in _title : if ch in ok : out . append ( ch ) elif unicodedata . category ( ch ) [ 0 ] == ( "L" ) : #a letter out . append ( hex ( ord ( ch ) ) ) ascii = False elif ch in u'\r\n\t' : out . append ( u'-' ) return ( ascii , sub ( "[ ',-]+" , '-' , "" . join ( out ) ) ) ( ascii , _title ) = asciify ( self . meta . title ) if not ascii and self . meta . alternative_title : ( ascii , _title2 ) = asciify ( self . meta . alternative_title ) if ascii : _title = _title2 title_length = 99 - len ( str ( self . book_id ) ) - 1 if len ( _title ) > title_length : # if the title was shortened, replace the trailing _ with an ellipsis repo_title = "{0}__{1}" . format ( _title [ : title_length ] , self . book_id ) else : repo_title = "{0}_{1}" . format ( _title [ : title_length ] , self . book_id ) logger . debug ( "%s %s" % ( len ( repo_title ) , repo_title ) ) self . meta . metadata [ '_repo' ] = repo_title return repo_title
8,612
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/book.py#L266-L296
[ "def", "merge_cts_records", "(", "file_name", ",", "crypto_idfp", ",", "crypto_idfps", ")", ":", "db", "=", "XonoticDB", ".", "load_path", "(", "file_name", ")", "db", ".", "merge_cts_records", "(", "crypto_idfp", ",", "crypto_idfps", ")", "db", ".", "save", "(", "file_name", ")" ]
Make a request from the API .
def _request ( self , path , method , body = None ) : url = '/' . join ( [ _SERVER , path ] ) ( resp , content ) = _HTTP . request ( url , method , headers = self . _headers , body = body ) content_type = resp . get ( 'content-type' ) if content_type and content_type . startswith ( 'application/json' ) : content = json . loads ( content . decode ( 'UTF-8' ) ) return ( resp , content )
8,613
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/__init__.py#L32-L42
[ "def", "sql_column_like_drug", "(", "self", ",", "column_name", ":", "str", ")", "->", "str", ":", "clauses", "=", "[", "\"{col} LIKE {fragment}\"", ".", "format", "(", "col", "=", "column_name", ",", "fragment", "=", "sql_string_literal", "(", "f", ")", ")", "for", "f", "in", "self", ".", "sql_like_fragments", "]", "return", "\"({})\"", ".", "format", "(", "\" OR \"", ".", "join", "(", "clauses", ")", ")" ]
Make a PUT request from the API .
def put ( self , path , payload ) : body = json . dumps ( payload ) return self . _request ( path , 'PUT' , body )
8,614
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/__init__.py#L52-L55
[ "def", "_find_cgroup_mounts", "(", ")", ":", "try", ":", "with", "open", "(", "'/proc/mounts'", ",", "'rt'", ")", "as", "mountsFile", ":", "for", "mount", "in", "mountsFile", ":", "mount", "=", "mount", ".", "split", "(", "' '", ")", "if", "mount", "[", "2", "]", "==", "'cgroup'", ":", "mountpoint", "=", "mount", "[", "1", "]", "options", "=", "mount", "[", "3", "]", "for", "option", "in", "options", ".", "split", "(", "','", ")", ":", "if", "option", "in", "ALL_KNOWN_SUBSYSTEMS", ":", "yield", "(", "option", ",", "mountpoint", ")", "except", "IOError", ":", "logging", ".", "exception", "(", "'Cannot read /proc/mounts'", ")" ]
Make a POST request from the API .
def post ( self , path , payload ) : body = json . dumps ( payload ) return self . _request ( path , 'POST' , body )
8,615
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/__init__.py#L57-L60
[ "def", "fetch_related", "(", "self", ",", "ids", ")", ":", "if", "not", "ids", ":", "return", "[", "]", "meta", "=", "'&id='", ".", "join", "(", "id", "[", "'Id'", "]", "for", "id", "in", "ids", ")", "url", "=", "(", "'{url}/meta/any?id={meta}'", "'&include=bundle-metadata&include=stats'", "'&include=supported-series&include=extra-info'", "'&include=bundle-unit-count&include=owner'", ")", ".", "format", "(", "url", "=", "self", ".", "url", ",", "meta", "=", "meta", ")", "data", "=", "self", ".", "_get", "(", "url", ")", "return", "data", ".", "json", "(", ")", ".", "values", "(", ")" ]
Create a new injector that inherits the state from this injector .
def create_child ( self , modules ) : binder = self . _binder . create_child ( ) return Injector ( modules , binder = binder , stage = self . _stage )
8,616
https://github.com/dstanek/snake-guice/blob/d20b62de3ee31e84119c801756398c35ed803fb3/snakeguice/injector.py#L92-L99
[ "def", "format_raw_data", "(", "self", ",", "tpe", ",", "raw_data", ")", ":", "if", "tpe", "==", "'text'", ":", "formatted_raw_data", "=", "self", ".", "parse_text_to_dict", "(", "raw_data", ")", "elif", "tpe", "==", "'file'", ":", "formatted_raw_data", "=", "self", ".", "parse_file_to_dict", "(", "raw_data", ")", "else", ":", "formatted_raw_data", "=", "{", "'ERROR'", ":", "'unknown data type'", ",", "'data'", ":", "[", "raw_data", "]", "}", "return", "formatted_raw_data" ]
Validate a message given a schema .
def validate ( self , message , schema_name ) : err = None try : jsonschema . validate ( message , self . schemas [ schema_name ] ) except KeyError : msg = ( f'Schema "{schema_name}" was not found (available: ' f'{", ".join(self.schemas.keys())})' ) err = { 'msg' : msg } except jsonschema . ValidationError as e : msg = ( f'Given message was not valid against the schema ' f'"{schema_name}": {e.message}' ) err = { 'msg' : msg } if err : logging . error ( * * err ) raise exceptions . InvalidMessageError ( err [ 'msg' ] )
8,617
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/schema/validate.py#L111-L141
[ "def", "register_variable", "(", "self", ",", "v", ",", "key", ",", "eternal", "=", "True", ")", ":", "if", "type", "(", "key", ")", "is", "not", "tuple", ":", "raise", "TypeError", "(", "\"Variable tracking key must be a tuple\"", ")", "if", "eternal", ":", "self", ".", "eternal_tracked_variables", "[", "key", "]", "=", "v", "else", ":", "self", ".", "temporal_tracked_variables", "=", "dict", "(", "self", ".", "temporal_tracked_variables", ")", "ctrkey", "=", "key", "+", "(", "None", ",", ")", "ctrval", "=", "self", ".", "temporal_tracked_variables", ".", "get", "(", "ctrkey", ",", "0", ")", "+", "1", "self", ".", "temporal_tracked_variables", "[", "ctrkey", "]", "=", "ctrval", "tempkey", "=", "key", "+", "(", "ctrval", ",", ")", "self", ".", "temporal_tracked_variables", "[", "tempkey", "]", "=", "v" ]
evaluates functions from right to left .
def compose ( * functions ) : def inner ( func1 , func2 ) : return lambda * x , * * y : func1 ( func2 ( * x , * * y ) ) return functools . reduce ( inner , functions )
8,618
https://github.com/fabianvf/schema-transformer/blob/1ddce4f7615de71593a1adabee4dbfc4ae086433/schema_transformer/helpers.py#L19-L40
[ "def", "_get_device_template", "(", "disk", ",", "disk_info", ",", "template", "=", "None", ")", ":", "def", "_require_disk_opts", "(", "*", "args", ")", ":", "for", "arg", "in", "args", ":", "if", "arg", "not", "in", "disk_info", ":", "raise", "SaltCloudSystemExit", "(", "'The disk {0} requires a {1}\\\n argument'", ".", "format", "(", "disk", ",", "arg", ")", ")", "_require_disk_opts", "(", "'disk_type'", ",", "'size'", ")", "size", "=", "disk_info", "[", "'size'", "]", "disk_type", "=", "disk_info", "[", "'disk_type'", "]", "if", "disk_type", "==", "'clone'", ":", "if", "'image'", "in", "disk_info", ":", "clone_image", "=", "disk_info", "[", "'image'", "]", "else", ":", "clone_image", "=", "get_template_image", "(", "kwargs", "=", "{", "'name'", ":", "template", "}", ")", "clone_image_id", "=", "get_image_id", "(", "kwargs", "=", "{", "'name'", ":", "clone_image", "}", ")", "temp", "=", "'DISK=[IMAGE={0}, IMAGE_ID={1}, CLONE=YES,\\\n SIZE={2}]'", ".", "format", "(", "clone_image", ",", "clone_image_id", ",", "size", ")", "return", "temp", "if", "disk_type", "==", "'volatile'", ":", "_require_disk_opts", "(", "'type'", ")", "v_type", "=", "disk_info", "[", "'type'", "]", "temp", "=", "'DISK=[TYPE={0}, SIZE={1}]'", ".", "format", "(", "v_type", ",", "size", ")", "if", "v_type", "==", "'fs'", ":", "_require_disk_opts", "(", "'format'", ")", "format", "=", "disk_info", "[", "'format'", "]", "temp", "=", "'DISK=[TYPE={0}, SIZE={1}, FORMAT={2}]'", ".", "format", "(", "v_type", ",", "size", ",", "format", ")", "return", "temp" ]
Validating if the instance should be logged or is excluded
def validate_instance ( instance ) : excludes = settings . AUTOMATED_LOGGING [ 'exclude' ] [ 'model' ] for excluded in excludes : if ( excluded in [ instance . _meta . app_label . lower ( ) , instance . __class__ . __name__ . lower ( ) ] or instance . __module__ . lower ( ) . startswith ( excluded ) ) : return False return True
8,619
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/__init__.py#L23-L33
[ "def", "save_scatter_table", "(", "self", ",", "fn", ",", "description", "=", "\"\"", ")", ":", "data", "=", "{", "\"description\"", ":", "description", ",", "\"time\"", ":", "datetime", ".", "now", "(", ")", ",", "\"psd_scatter\"", ":", "(", "self", ".", "num_points", ",", "self", ".", "D_max", ",", "self", ".", "_psd_D", ",", "self", ".", "_S_table", ",", "self", ".", "_Z_table", ",", "self", ".", "_angular_table", ",", "self", ".", "_m_table", ",", "self", ".", "geometries", ")", ",", "\"version\"", ":", "tmatrix_aux", ".", "VERSION", "}", "pickle", ".", "dump", "(", "data", ",", "file", "(", "fn", ",", "'w'", ")", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")" ]
Get current user object from middleware
def get_current_user ( ) : thread_local = AutomatedLoggingMiddleware . thread_local if hasattr ( thread_local , 'current_user' ) : user = thread_local . current_user if isinstance ( user , AnonymousUser ) : user = None else : user = None return user
8,620
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/__init__.py#L36-L46
[ "def", "mangle_volume", "(", "citation_elements", ")", ":", "volume_re", "=", "re", ".", "compile", "(", "ur\"(\\d+)([A-Z])\"", ",", "re", ".", "U", "|", "re", ".", "I", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "matches", "=", "volume_re", ".", "match", "(", "el", "[", "'volume'", "]", ")", "if", "matches", ":", "el", "[", "'volume'", "]", "=", "matches", ".", "group", "(", "2", ")", "+", "matches", ".", "group", "(", "1", ")", "return", "citation_elements" ]
Get current application and path object from middleware
def get_current_environ ( ) : thread_local = AutomatedLoggingMiddleware . thread_local if hasattr ( thread_local , 'request_uri' ) : request_uri = thread_local . request_uri else : request_uri = None if hasattr ( thread_local , 'application' ) : application = thread_local . application application = Application . objects . get_or_create ( name = application ) [ 0 ] else : application = None if hasattr ( thread_local , 'method' ) : method = thread_local . method else : method = None if hasattr ( thread_local , 'status' ) : status = thread_local . status else : status = None return request_uri , application , method , status
8,621
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/__init__.py#L49-L73
[ "def", "checkIfRemoteIsNewer", "(", "self", ",", "localfile", ",", "remote_size", ",", "remote_modify", ")", ":", "is_remote_newer", "=", "False", "status", "=", "os", ".", "stat", "(", "localfile", ")", "LOG", ".", "info", "(", "\"\\nLocal file size: %i\"", "\"\\nLocal Timestamp: %s\"", ",", "status", "[", "ST_SIZE", "]", ",", "datetime", ".", "fromtimestamp", "(", "status", ".", "st_mtime", ")", ")", "remote_dt", "=", "Bgee", ".", "_convert_ftp_time_to_iso", "(", "remote_modify", ")", "if", "remote_dt", "!=", "datetime", ".", "fromtimestamp", "(", "status", ".", "st_mtime", ")", "or", "status", "[", "ST_SIZE", "]", "!=", "int", "(", "remote_size", ")", ":", "is_remote_newer", "=", "True", "LOG", ".", "info", "(", "\"Object on server is has different size %i and/or date %s\"", ",", "remote_size", ",", "remote_dt", ")", "return", "is_remote_newer" ]
This is the standard logging processor .
def processor ( status , sender , instance , updated = None , addition = '' ) : logger = logging . getLogger ( __name__ ) if validate_instance ( instance ) : user = get_current_user ( ) application = instance . _meta . app_label model_name = instance . __class__ . __name__ level = settings . AUTOMATED_LOGGING [ 'loglevel' ] [ 'model' ] if status == 'change' : corrected = 'changed' elif status == 'add' : corrected = 'added' elif status == 'delete' : corrected = 'deleted' logger . log ( level , ( '%s %s %s(%s) in %s%s' % ( user , corrected , instance , model_name , application , addition ) ) . replace ( " " , " " ) , extra = { 'action' : 'model' , 'data' : { 'status' : status , 'user' : user , 'sender' : sender , 'instance' : instance , 'update_fields' : updated } } )
8,622
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/__init__.py#L76-L106
[ "def", "find_covalent_bonds", "(", "ampal", ",", "max_range", "=", "2.2", ",", "threshold", "=", "1.1", ",", "tag", "=", "True", ")", ":", "sectors", "=", "gen_sectors", "(", "ampal", ".", "get_atoms", "(", ")", ",", "max_range", "*", "1.1", ")", "bonds", "=", "[", "]", "for", "sector", "in", "sectors", ".", "values", "(", ")", ":", "atoms", "=", "itertools", ".", "combinations", "(", "sector", ",", "2", ")", "bonds", ".", "extend", "(", "covalent_bonds", "(", "atoms", ",", "threshold", "=", "threshold", ")", ")", "bond_set", "=", "list", "(", "set", "(", "bonds", ")", ")", "if", "tag", ":", "for", "bond", "in", "bond_set", ":", "a", ",", "b", "=", "bond", ".", "a", ",", "bond", ".", "b", "if", "'covalent_bonds'", "not", "in", "a", ".", "tags", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "b", "]", "else", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "b", ")", "if", "'covalent_bonds'", "not", "in", "b", ".", "tags", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "a", "]", "else", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "a", ")", "return", "bond_set" ]
Returns a list of all the current category s parents .
def parents ( self ) : parents = [ ] if self . parent is None : return [ ] category = self while category . parent is not None : parents . append ( category . parent ) category = category . parent return parents [ : : - 1 ]
8,623
https://github.com/bunchesofdonald/django-hermes/blob/ff5395a7b5debfd0756aab43db61f7a6cfa06aea/hermes/models.py#L72-L84
[ "def", "get_rsa_key", "(", "self", ",", "username", ")", ":", "try", ":", "resp", "=", "self", ".", "session", ".", "post", "(", "'https://steamcommunity.com/login/getrsakey/'", ",", "timeout", "=", "15", ",", "data", "=", "{", "'username'", ":", "username", ",", "'donotchache'", ":", "int", "(", "time", "(", ")", "*", "1000", ")", ",", "}", ",", ")", ".", "json", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "HTTPError", "(", "str", "(", "e", ")", ")", "return", "resp" ]
Returns the topmost parent of the current category .
def root_parent ( self , category = None ) : return next ( filter ( lambda c : c . is_root , self . hierarchy ( ) ) )
8,624
https://github.com/bunchesofdonald/django-hermes/blob/ff5395a7b5debfd0756aab43db61f7a6cfa06aea/hermes/models.py#L89-L91
[ "def", "send_string", "(", "self", ",", "string", ":", "str", ")", ":", "if", "not", "string", ":", "return", "string", "=", "string", ".", "replace", "(", "'\\n'", ",", "\"<enter>\"", ")", "string", "=", "string", ".", "replace", "(", "'\\t'", ",", "\"<tab>\"", ")", "_logger", ".", "debug", "(", "\"Send via event interface\"", ")", "self", ".", "__clearModifiers", "(", ")", "modifiers", "=", "[", "]", "for", "section", "in", "KEY_SPLIT_RE", ".", "split", "(", "string", ")", ":", "if", "len", "(", "section", ")", ">", "0", ":", "if", "Key", ".", "is_key", "(", "section", "[", ":", "-", "1", "]", ")", "and", "section", "[", "-", "1", "]", "==", "'+'", "and", "section", "[", ":", "-", "1", "]", "in", "MODIFIERS", ":", "# Section is a modifier application (modifier followed by '+')", "modifiers", ".", "append", "(", "section", "[", ":", "-", "1", "]", ")", "else", ":", "if", "len", "(", "modifiers", ")", ">", "0", ":", "# Modifiers ready for application - send modified key", "if", "Key", ".", "is_key", "(", "section", ")", ":", "self", ".", "interface", ".", "send_modified_key", "(", "section", ",", "modifiers", ")", "modifiers", "=", "[", "]", "else", ":", "self", ".", "interface", ".", "send_modified_key", "(", "section", "[", "0", "]", ",", "modifiers", ")", "if", "len", "(", "section", ")", ">", "1", ":", "self", ".", "interface", ".", "send_string", "(", "section", "[", "1", ":", "]", ")", "modifiers", "=", "[", "]", "else", ":", "# Normal string/key operation", "if", "Key", ".", "is_key", "(", "section", ")", ":", "self", ".", "interface", ".", "send_key", "(", "section", ")", "else", ":", "self", ".", "interface", ".", "send_string", "(", "section", ")", "self", ".", "__reapplyModifiers", "(", ")" ]
Indicate if this RunState is currently active .
def active ( self ) -> bool : states = self . _client . get_state ( self . _state_url ) [ 'states' ] for state in states : state = state [ 'State' ] if int ( state [ 'Id' ] ) == self . _state_id : # yes, the ZM API uses the *string* "1" for this... return state [ 'IsActive' ] == "1" return False
8,625
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/run_state.py#L26-L34
[ "def", "create_temp_project_avatar", "(", "self", ",", "project", ",", "filename", ",", "size", ",", "avatar_img", ",", "contentType", "=", "None", ",", "auto_confirm", "=", "False", ")", ":", "size_from_file", "=", "os", ".", "path", ".", "getsize", "(", "filename", ")", "if", "size", "!=", "size_from_file", ":", "size", "=", "size_from_file", "params", "=", "{", "'filename'", ":", "filename", ",", "'size'", ":", "size", "}", "headers", "=", "{", "'X-Atlassian-Token'", ":", "'no-check'", "}", "if", "contentType", "is", "not", "None", ":", "headers", "[", "'content-type'", "]", "=", "contentType", "else", ":", "# try to detect content-type, this may return None", "headers", "[", "'content-type'", "]", "=", "self", ".", "_get_mime_type", "(", "avatar_img", ")", "url", "=", "self", ".", "_get_url", "(", "'project/'", "+", "project", "+", "'/avatar/temporary'", ")", "r", "=", "self", ".", "_session", ".", "post", "(", "url", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "data", "=", "avatar_img", ")", "cropping_properties", "=", "json_loads", "(", "r", ")", "if", "auto_confirm", ":", "return", "self", ".", "confirm_project_avatar", "(", "project", ",", "cropping_properties", ")", "else", ":", "return", "cropping_properties" ]
Convert a value to the most reasonable unit .
def to_reasonable_unit ( value , units , round_digits = 2 ) : def to_unit ( unit ) : return float ( value ) / unit [ 1 ] exponents = [ abs ( Decimal ( to_unit ( u ) ) . adjusted ( ) - 1 ) for u in units ] best = min ( enumerate ( exponents ) , key = itemgetter ( 1 ) ) [ 0 ] return dict ( val = round ( to_unit ( units [ best ] ) , round_digits ) , label = units [ best ] [ 0 ] , multiplier = units [ best ] [ 1 ] )
8,626
https://github.com/mgk/urwid_timed_progress/blob/b7292e78a58f35f285736988c48e815e71fa2060/urwid_timed_progress/__init__.py#L170-L185
[ "def", "remove_armor", "(", "armored_data", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "armored_data", ")", "lines", "=", "stream", ".", "readlines", "(", ")", "[", "3", ":", "-", "1", "]", "data", "=", "base64", ".", "b64decode", "(", "b''", ".", "join", "(", "lines", ")", ")", "payload", ",", "checksum", "=", "data", "[", ":", "-", "3", "]", ",", "data", "[", "-", "3", ":", "]", "assert", "util", ".", "crc24", "(", "payload", ")", "==", "checksum", "return", "payload" ]
Return extended progress bar text
def get_text ( self ) : done_units = to_reasonable_unit ( self . done , self . units ) current = round ( self . current / done_units [ 'multiplier' ] , 2 ) percent = int ( self . current * 100 / self . done ) return '{0:.2f} of {1:.2f} {2} ({3}%)' . format ( current , done_units [ 'val' ] , done_units [ 'label' ] , percent )
8,627
https://github.com/mgk/urwid_timed_progress/blob/b7292e78a58f35f285736988c48e815e71fa2060/urwid_timed_progress/__init__.py#L19-L27
[ "def", "MakeRequest", "(", "self", ",", "data", ")", ":", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"grr_client_sent_bytes\"", ",", "len", "(", "data", ")", ")", "# Verify the response is as it should be from the control endpoint.", "response", "=", "self", ".", "http_manager", ".", "OpenServerEndpoint", "(", "path", "=", "\"control?api=%s\"", "%", "config", ".", "CONFIG", "[", "\"Network.api\"", "]", ",", "verify_cb", "=", "self", ".", "VerifyServerControlResponse", ",", "data", "=", "data", ",", "headers", "=", "{", "\"Content-Type\"", ":", "\"binary/octet-stream\"", "}", ")", "if", "response", ".", "code", "==", "406", ":", "self", ".", "InitiateEnrolment", "(", ")", "return", "response", "if", "response", ".", "code", "==", "200", ":", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"grr_client_received_bytes\"", ",", "len", "(", "response", ".", "data", ")", ")", "return", "response", "# An unspecified error occured.", "return", "response" ]
Add to the current progress amount
def add_progress ( self , delta , done = None ) : if done is not None : self . done = done self . bar . current = max ( min ( self . done , self . current + delta ) , 0 ) self . rate_display . set_text ( self . rate_text ) self . remaining_time_display . set_text ( self . remaining_time_text ) return self . current == self . done
8,628
https://github.com/mgk/urwid_timed_progress/blob/b7292e78a58f35f285736988c48e815e71fa2060/urwid_timed_progress/__init__.py#L90-L107
[ "def", "get_model_indexes", "(", "model", ",", "add_reserver_flag", "=", "True", ")", ":", "import", "uliweb", ".", "orm", "as", "orm", "from", "sqlalchemy", ".", "engine", ".", "reflection", "import", "Inspector", "indexes", "=", "[", "]", "engine", "=", "model", ".", "get_engine", "(", ")", ".", "engine", "insp", "=", "Inspector", ".", "from_engine", "(", "engine", ")", "for", "index", "in", "insp", ".", "get_indexes", "(", "model", ".", "tablename", ")", ":", "d", "=", "{", "}", "d", "[", "'name'", "]", "=", "index", "[", "'name'", "]", "d", "[", "'unique'", "]", "=", "index", "[", "'unique'", "]", "d", "[", "'fields'", "]", "=", "index", "[", "'column_names'", "]", "if", "add_reserver_flag", ":", "d", "[", "'_reserved'", "]", "=", "True", "indexes", ".", "append", "(", "d", ")", "return", "indexes" ]
Check for validity of token and refresh if none or expired .
async def valid_token_set ( self ) : is_valid = False if self . _auth_client . token : # Account for a token near expiration now = datetime . datetime . utcnow ( ) skew = datetime . timedelta ( seconds = 60 ) if self . _auth_client . expiry > ( now + skew ) : is_valid = True return is_valid
8,629
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L72-L82
[ "def", "init_dispatcher_logger", "(", ")", ":", "logger_file_path", "=", "'dispatcher.log'", "if", "dispatcher_env_vars", ".", "NNI_LOG_DIRECTORY", "is", "not", "None", ":", "logger_file_path", "=", "os", ".", "path", ".", "join", "(", "dispatcher_env_vars", ".", "NNI_LOG_DIRECTORY", ",", "logger_file_path", ")", "init_logger", "(", "logger_file_path", ",", "dispatcher_env_vars", ".", "NNI_LOG_LEVEL", ")" ]
Make an asynchronous HTTP request .
async def request ( self , method , url , params = None , headers = None , data = None , json = None , token_refresh_attempts = 2 , * * kwargs ) : if all ( [ data , json ] ) : msg = ( '"data" and "json" request parameters can not be used ' 'at the same time' ) logging . warn ( msg ) raise exceptions . GCPHTTPError ( msg ) req_headers = headers or { } req_headers . update ( _utils . DEFAULT_REQUEST_HEADERS ) req_kwargs = { 'params' : params , 'headers' : req_headers , } if data : req_kwargs [ 'data' ] = data if json : req_kwargs [ 'json' ] = json if token_refresh_attempts : if not await self . valid_token_set ( ) : await self . _auth_client . refresh_token ( ) token_refresh_attempts -= 1 req_headers . update ( { 'Authorization' : f'Bearer {self._auth_client.token}' } ) request_id = kwargs . get ( 'request_id' , uuid . uuid4 ( ) ) logging . debug ( _utils . REQ_LOG_FMT . format ( request_id = request_id , method = method . upper ( ) , url = url , kwargs = req_kwargs ) ) try : async with self . _session . request ( method , url , * * req_kwargs ) as resp : log_kw = { 'request_id' : request_id , 'method' : method . upper ( ) , 'url' : resp . url , 'status' : resp . status , 'reason' : resp . reason } logging . debug ( _utils . RESP_LOG_FMT . format ( * * log_kw ) ) if resp . status in REFRESH_STATUS_CODES : logging . warning ( f'[{request_id}] HTTP Status Code {resp.status}' f' returned requesting {resp.url}: {resp.reason}' ) if token_refresh_attempts : logging . info ( f'[{request_id}] Attempting request to {resp.url} ' 'again.' ) return await self . request ( method , url , token_refresh_attempts = token_refresh_attempts , request_id = request_id , * * req_kwargs ) logging . warning ( f'[{request_id}] Max attempts refreshing auth token ' f'exhausted while requesting {resp.url}' ) resp . raise_for_status ( ) return await resp . text ( ) except aiohttp . ClientResponseError as e : # bad HTTP status; avoid leaky abstractions and wrap HTTP errors # with our own msg = f'[{request_id}] HTTP error response from {resp.url}: {e}' logging . error ( msg , exc_info = e ) raise exceptions . GCPHTTPResponseError ( msg , resp . status ) except exceptions . GCPHTTPResponseError as e : # from recursive call raise e except Exception as e : msg = f'[{request_id}] Request call failed: {e}' logging . error ( msg , exc_info = e ) raise exceptions . GCPHTTPError ( msg )
8,630
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L84-L189
[ "def", "_find_cgroup_mounts", "(", ")", ":", "try", ":", "with", "open", "(", "'/proc/mounts'", ",", "'rt'", ")", "as", "mountsFile", ":", "for", "mount", "in", "mountsFile", ":", "mount", "=", "mount", ".", "split", "(", "' '", ")", "if", "mount", "[", "2", "]", "==", "'cgroup'", ":", "mountpoint", "=", "mount", "[", "1", "]", "options", "=", "mount", "[", "3", "]", "for", "option", "in", "options", ".", "split", "(", "','", ")", ":", "if", "option", "in", "ALL_KNOWN_SUBSYSTEMS", ":", "yield", "(", "option", ",", "mountpoint", ")", "except", "IOError", ":", "logging", ".", "exception", "(", "'Cannot read /proc/mounts'", ")" ]
Get a URL and return its JSON response .
async def get_json ( self , url , json_callback = None , * * kwargs ) : if not json_callback : json_callback = json . loads response = await self . request ( method = 'get' , url = url , * * kwargs ) return json_callback ( response )
8,631
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L191-L206
[ "def", "quantile_1D", "(", "data", ",", "weights", ",", "quantile", ")", ":", "# Check the data", "if", "not", "isinstance", "(", "data", ",", "np", ".", "matrix", ")", ":", "data", "=", "np", ".", "asarray", "(", "data", ")", "if", "not", "isinstance", "(", "weights", ",", "np", ".", "matrix", ")", ":", "weights", "=", "np", ".", "asarray", "(", "weights", ")", "nd", "=", "data", ".", "ndim", "if", "nd", "!=", "1", ":", "raise", "TypeError", "(", "\"data must be a one dimensional array\"", ")", "ndw", "=", "weights", ".", "ndim", "if", "ndw", "!=", "1", ":", "raise", "TypeError", "(", "\"weights must be a one dimensional array\"", ")", "if", "data", ".", "shape", "!=", "weights", ".", "shape", ":", "raise", "TypeError", "(", "\"the length of data and weights must be the same\"", ")", "if", "(", "(", "quantile", ">", "1.", ")", "or", "(", "quantile", "<", "0.", ")", ")", ":", "raise", "ValueError", "(", "\"quantile must have a value between 0. and 1.\"", ")", "# Sort the data", "ind_sorted", "=", "np", ".", "argsort", "(", "data", ")", "sorted_data", "=", "data", "[", "ind_sorted", "]", "sorted_weights", "=", "weights", "[", "ind_sorted", "]", "# Compute the auxiliary arrays", "Sn", "=", "np", ".", "cumsum", "(", "sorted_weights", ")", "# TODO: Check that the weights do not sum zero", "#assert Sn != 0, \"The sum of the weights must not be zero\"", "Pn", "=", "(", "Sn", "-", "0.5", "*", "sorted_weights", ")", "/", "Sn", "[", "-", "1", "]", "# Get the value of the weighted median", "return", "np", ".", "interp", "(", "quantile", ",", "Pn", ",", "sorted_data", ")" ]
Aggregate data from all pages of an API query .
async def get_all ( self , url , params = None ) : if not params : params = { } items = [ ] next_page_token = None while True : if next_page_token : params [ 'pageToken' ] = next_page_token response = await self . get_json ( url , params = params ) items . append ( response ) next_page_token = response . get ( 'nextPageToken' ) if not next_page_token : break return items
8,632
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L208-L232
[ "def", "update_version_descriptor", "(", "self", ",", "task", ",", "releasetype", ",", "descriptor", ",", "verbrowser", ",", "commentbrowser", ")", ":", "if", "task", "is", "None", ":", "null", "=", "treemodel", ".", "TreeItem", "(", "None", ")", "verbrowser", ".", "set_model", "(", "treemodel", ".", "TreeModel", "(", "null", ")", ")", "return", "m", "=", "self", ".", "create_version_model", "(", "task", ",", "releasetype", ",", "descriptor", ")", "verbrowser", ".", "set_model", "(", "m", ")", "commentbrowser", ".", "set_model", "(", "m", ")" ]
Report if there is an existing config file
def check_config ( ) : configfile = ConfigFile ( ) global data if data . keys ( ) > 0 : # FIXME: run a better check of this file print ( "gitberg config file exists" ) print ( "\twould you like to edit your gitberg config file?" ) else : print ( "No config found" ) print ( "\twould you like to create a gitberg config file?" ) answer = input ( "--> [Y/n]" ) # By default, the answer is yes, as denoted by the capital Y if not answer : answer = 'Y' # If yes, generate a new configuration # to be written out as yaml if answer in 'Yy' : print ( "Running gitberg config generator ..." ) # config.exists_or_make() config_gen = ConfigGenerator ( current = data ) config_gen . ask ( ) # print(config_gen.answers) data = config_gen . answers configfile . write ( ) print ( "Config written to {}" . format ( configfile . file_path ) )
8,633
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/config.py#L94-L122
[ "def", "unindex_layers_with_issues", "(", "self", ",", "use_cache", "=", "False", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Issue", ",", "Layer", ",", "Service", "from", "django", ".", "contrib", ".", "contenttypes", ".", "models", "import", "ContentType", "layer_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Layer", ")", "service_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Service", ")", "for", "issue", "in", "Issue", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "layer_type", ".", "id", ")", ":", "unindex_layer", "(", "issue", ".", "content_object", ".", "id", ",", "use_cache", ")", "for", "issue", "in", "Issue", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "service_type", ".", "id", ")", ":", "for", "layer", "in", "issue", ".", "content_object", ".", "layer_set", ".", "all", "(", ")", ":", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", ")" ]
Sample code to retrieve the data .
async def main ( ) : async with aiohttp . ClientSession ( ) as session : data = Luftdaten ( SENSOR_ID , loop , session ) await data . get_data ( ) if not await data . validate_sensor ( ) : print ( "Station is not available:" , data . sensor_id ) return if data . values and data . meta : # Print the sensor values print ( "Sensor values:" , data . values ) # Print the coordinates fo the sensor print ( "Location:" , data . meta [ 'latitude' ] , data . meta [ 'longitude' ] )
8,634
https://github.com/fabaff/python-luftdaten/blob/30be973257fccb19baa8dbd55206da00f62dc81c/example.py#L11-L26
[ "def", "create_doc_jar", "(", "self", ",", "target", ",", "open_jar", ",", "version", ")", ":", "javadoc", "=", "self", ".", "_java_doc", "(", "target", ")", "scaladoc", "=", "self", ".", "_scala_doc", "(", "target", ")", "if", "javadoc", "or", "scaladoc", ":", "jar_path", "=", "self", ".", "artifact_path", "(", "open_jar", ",", "version", ",", "suffix", "=", "'-javadoc'", ")", "with", "self", ".", "open_jar", "(", "jar_path", ",", "overwrite", "=", "True", ",", "compressed", "=", "True", ")", "as", "open_jar", ":", "def", "add_docs", "(", "docs", ")", ":", "if", "docs", ":", "for", "basedir", ",", "doc_files", "in", "docs", ".", "items", "(", ")", ":", "for", "doc_file", "in", "doc_files", ":", "open_jar", ".", "write", "(", "os", ".", "path", ".", "join", "(", "basedir", ",", "doc_file", ")", ",", "doc_file", ")", "add_docs", "(", "javadoc", ")", "add_docs", "(", "scaladoc", ")", "return", "jar_path", "else", ":", "return", "None" ]
Fetch all instances in a GCE project .
async def list_instances ( self , project , page_size = 100 , instance_filter = None ) : url = ( f'{self.BASE_URL}{self.api_version}/projects/{project}' '/aggregated/instances' ) params = { 'maxResults' : page_size } if instance_filter : params [ 'filter' ] = instance_filter responses = await self . list_all ( url , params ) instances = self . _parse_rsps_for_instances ( responses ) return instances
8,635
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gce.py#L89-L118
[ "def", "compute_path", "(", "self", ")", ":", "self", ".", "_setup_dtw", "(", ")", "if", "self", ".", "dtw", "is", "None", ":", "self", ".", "log", "(", "u\"Inner self.dtw is None => returning None\"", ")", "return", "None", "self", ".", "log", "(", "u\"Computing path...\"", ")", "wave_path", "=", "self", ".", "dtw", ".", "compute_path", "(", ")", "self", ".", "log", "(", "u\"Computing path... done\"", ")", "self", ".", "log", "(", "u\"Translating path to full wave indices...\"", ")", "real_indices", "=", "numpy", ".", "array", "(", "[", "t", "[", "0", "]", "for", "t", "in", "wave_path", "]", ")", "synt_indices", "=", "numpy", ".", "array", "(", "[", "t", "[", "1", "]", "for", "t", "in", "wave_path", "]", ")", "if", "self", ".", "rconf", ".", "mmn", ":", "self", ".", "log", "(", "u\"Translating real indices with masked_middle_map...\"", ")", "real_indices", "=", "self", ".", "real_wave_mfcc", ".", "masked_middle_map", "[", "real_indices", "]", "real_indices", "[", "0", "]", "=", "self", ".", "real_wave_mfcc", ".", "head_length", "self", ".", "log", "(", "u\"Translating real indices with masked_middle_map... done\"", ")", "self", ".", "log", "(", "u\"Translating synt indices with masked_middle_map...\"", ")", "synt_indices", "=", "self", ".", "synt_wave_mfcc", ".", "masked_middle_map", "[", "synt_indices", "]", "self", ".", "log", "(", "u\"Translating synt indices with masked_middle_map... done\"", ")", "else", ":", "self", ".", "log", "(", "u\"Translating real indices by adding head_length...\"", ")", "real_indices", "+=", "self", ".", "real_wave_mfcc", ".", "head_length", "self", ".", "log", "(", "u\"Translating real indices by adding head_length... done\"", ")", "self", ".", "log", "(", "u\"Nothing to do with synt indices\"", ")", "self", ".", "log", "(", "u\"Translating path to full wave indices... done\"", ")", "return", "(", "real_indices", ",", "synt_indices", ")" ]
Given a parsed beta chain of a class II MHC infer the most frequent corresponding alpha chain .
def infer_alpha_chain ( beta ) : if beta . gene . startswith ( "DRB" ) : return AlleleName ( species = "HLA" , gene = "DRA1" , allele_family = "01" , allele_code = "01" ) elif beta . gene . startswith ( "DPB" ) : # Most common alpha chain for DP is DPA*01:03 but we really # need to change this logic to use a lookup table of pairwise # frequencies for inferring the alpha-beta pairing return AlleleName ( species = "HLA" , gene = "DPA1" , allele_family = "01" , allele_code = "03" ) elif beta . gene . startswith ( "DQB" ) : # Most common DQ alpha (according to wikipedia) # DQA1*01:02 return AlleleName ( species = "HLA" , gene = "DQA1" , allele_family = "01" , allele_code = "02" ) return None
8,636
https://github.com/openvax/mhcnames/blob/71694b9d620db68ceee44da1b8422ff436f15bd3/mhcnames/class2.py#L21-L39
[ "def", "wait", "(", "self", ",", "sensor_name", ",", "condition_or_value", ",", "timeout", "=", "5", ")", ":", "sensor_name", "=", "escape_name", "(", "sensor_name", ")", "sensor", "=", "self", ".", "sensor", "[", "sensor_name", "]", "try", ":", "yield", "sensor", ".", "wait", "(", "condition_or_value", ",", "timeout", ")", "except", "tornado", ".", "gen", ".", "TimeoutError", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "False", ")", "else", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "True", ")" ]
Create a new Zendesk ticket
def create_ticket ( subject , tags , ticket_body , requester_email = None , custom_fields = [ ] ) : payload = { 'ticket' : { 'subject' : subject , 'comment' : { 'body' : ticket_body } , 'group_id' : settings . ZENDESK_GROUP_ID , 'tags' : tags , 'custom_fields' : custom_fields } } if requester_email : payload [ 'ticket' ] [ 'requester' ] = { 'name' : 'Sender: %s' % requester_email . split ( '@' ) [ 0 ] , 'email' : requester_email , } else : payload [ 'ticket' ] [ 'requester_id' ] = settings . ZENDESK_REQUESTER_ID requests . post ( get_ticket_endpoint ( ) , data = json . dumps ( payload ) , auth = zendesk_auth ( ) , headers = { 'content-type' : 'application/json' } ) . raise_for_status ( )
8,637
https://github.com/ministryofjustice/django-zendesk-tickets/blob/8c1332b5536dc1cf967b612aad5d07e02439d280/zendesk_tickets/client.py#L19-L45
[ "def", "_SeparateTypes", "(", "self", ",", "metadata_value_pairs", ")", ":", "registry_pairs", "=", "[", "]", "file_pairs", "=", "[", "]", "match_pairs", "=", "[", "]", "for", "metadata", ",", "result", "in", "metadata_value_pairs", ":", "if", "(", "result", ".", "stat_entry", ".", "pathspec", ".", "pathtype", "==", "rdf_paths", ".", "PathSpec", ".", "PathType", ".", "REGISTRY", ")", ":", "registry_pairs", ".", "append", "(", "(", "metadata", ",", "result", ".", "stat_entry", ")", ")", "else", ":", "file_pairs", ".", "append", "(", "(", "metadata", ",", "result", ")", ")", "match_pairs", ".", "extend", "(", "[", "(", "metadata", ",", "match", ")", "for", "match", "in", "result", ".", "matches", "]", ")", "return", "registry_pairs", ",", "file_pairs", ",", "match_pairs" ]
Display a message
def message ( message , title = '' ) : return backend_api . opendialog ( "message" , dict ( message = message , title = title ) )
8,638
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L9-L19
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
A dialog to get a file name . The default argument specifies a file path .
def ask_file ( message = 'Select file for open.' , default = '' , title = '' , save = False ) : return backend_api . opendialog ( "ask_file" , dict ( message = message , default = default , title = title , save = save ) )
8,639
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L82-L98
[ "def", "wncond", "(", "left", ",", "right", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "left", "=", "ctypes", ".", "c_double", "(", "left", ")", "right", "=", "ctypes", ".", "c_double", "(", "right", ")", "libspice", ".", "wncond_c", "(", "left", ",", "right", ",", "ctypes", ".", "byref", "(", "window", ")", ")", "return", "window" ]
A dialog to get a directory name . Returns the name of a directory or None if user chose to cancel . If the default argument specifies a directory name and that directory exists then the dialog box will start with that directory .
def ask_folder ( message = 'Select folder.' , default = '' , title = '' ) : return backend_api . opendialog ( "ask_folder" , dict ( message = message , default = default , title = title ) )
8,640
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L101-L113
[ "def", "get_max_bitlen", "(", "self", ")", ":", "payload_max_bitlen", "=", "self", ".", "max_size", "*", "self", ".", "value_type", ".", "get_max_bitlen", "(", ")", "return", "{", "self", ".", "MODE_DYNAMIC", ":", "payload_max_bitlen", "+", "self", ".", "max_size", ".", "bit_length", "(", ")", ",", "self", ".", "MODE_STATIC", ":", "payload_max_bitlen", "}", "[", "self", ".", "mode", "]" ]
Display a message with choices of OK and Cancel .
def ask_ok_cancel ( message = '' , default = 0 , title = '' ) : return backend_api . opendialog ( "ask_ok_cancel" , dict ( message = message , default = default , title = title ) )
8,641
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L151-L166
[ "def", "full_path", "(", "path", ")", ":", "return", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "path", ")", ")", ")" ]
Display a message with choices of Yes and No .
def ask_yes_no ( message = '' , default = 0 , title = '' ) : return backend_api . opendialog ( "ask_yes_no" , dict ( message = message , default = default , title = title ) )
8,642
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L169-L184
[ "def", "_GetDelayImportTimestamps", "(", "self", ",", "pefile_object", ")", ":", "delay_import_timestamps", "=", "[", "]", "if", "not", "hasattr", "(", "pefile_object", ",", "'DIRECTORY_ENTRY_DELAY_IMPORT'", ")", ":", "return", "delay_import_timestamps", "for", "importdata", "in", "pefile_object", ".", "DIRECTORY_ENTRY_DELAY_IMPORT", ":", "dll_name", "=", "importdata", ".", "dll", "try", ":", "dll_name", "=", "dll_name", ".", "decode", "(", "'ascii'", ")", "except", "UnicodeDecodeError", ":", "dll_name", "=", "dll_name", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'replace'", ")", "timestamp", "=", "getattr", "(", "importdata", ".", "struct", ",", "'dwTimeStamp'", ",", "0", ")", "delay_import_timestamps", ".", "append", "(", "[", "dll_name", ",", "timestamp", "]", ")", "return", "delay_import_timestamps" ]
Register a receiver .
def register ( self , receiver_id , receiver ) : assert receiver_id not in self . receivers self . receivers [ receiver_id ] = receiver ( receiver_id )
8,643
https://github.com/inveniosoftware/invenio-webhooks/blob/f407cb2245464543ee474a81189fb9d3978bdde5/invenio_webhooks/ext.py#L45-L48
[ "def", "coordination_geometry_symmetry_measures_fallback_random", "(", "self", ",", "coordination_geometry", ",", "NRANDOM", "=", "10", ",", "points_perfect", "=", "None", ")", ":", "permutations_symmetry_measures", "=", "[", "None", "]", "*", "NRANDOM", "permutations", "=", "list", "(", ")", "algos", "=", "list", "(", ")", "perfect2local_maps", "=", "list", "(", ")", "local2perfect_maps", "=", "list", "(", ")", "for", "iperm", "in", "range", "(", "NRANDOM", ")", ":", "perm", "=", "np", ".", "random", ".", "permutation", "(", "coordination_geometry", ".", "coordination_number", ")", "permutations", ".", "append", "(", "perm", ")", "p2l", "=", "{", "}", "l2p", "=", "{", "}", "for", "i_p", ",", "pp", "in", "enumerate", "(", "perm", ")", ":", "p2l", "[", "i_p", "]", "=", "pp", "l2p", "[", "pp", "]", "=", "i_p", "perfect2local_maps", ".", "append", "(", "p2l", ")", "local2perfect_maps", ".", "append", "(", "l2p", ")", "points_distorted", "=", "self", ".", "local_geometry", ".", "points_wcs_ctwcc", "(", "permutation", "=", "perm", ")", "sm_info", "=", "symmetry_measure", "(", "points_distorted", "=", "points_distorted", ",", "points_perfect", "=", "points_perfect", ")", "sm_info", "[", "'translation_vector'", "]", "=", "self", ".", "local_geometry", ".", "centroid_with_centre", "permutations_symmetry_measures", "[", "iperm", "]", "=", "sm_info", "algos", ".", "append", "(", "'APPROXIMATE_FALLBACK'", ")", "return", "permutations_symmetry_measures", ",", "permutations", ",", "algos", ",", "local2perfect_maps", ",", "perfect2local_maps" ]
Retrieve the information for a scheduleRule entity .
def get ( self , sched_rule_id ) : path = '/' . join ( [ 'schedulerule' , sched_rule_id ] ) return self . rachio . get ( path )
8,644
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/schedulerule.py#L33-L36
[ "def", "undefine", "(", "self", ")", ":", "if", "lib", ".", "EnvUndefglobal", "(", "self", ".", "_env", ",", "self", ".", "_glb", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")", "self", ".", "_env", "=", "None" ]
Parse message according to schema .
def parse ( self , message , schema ) : func = { 'audit-log' : self . _parse_audit_log_msg , 'event' : self . _parse_event_msg , } [ schema ] return func ( message )
8,645
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/schema/parse.py#L69-L85
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Start a zone .
def start ( self , zone_id , duration ) : path = 'zone/start' payload = { 'id' : zone_id , 'duration' : duration } return self . rachio . put ( path , payload )
8,646
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L11-L15
[ "def", "removeMigrationRequest", "(", "self", ",", "migration_rqst", ")", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "try", ":", "tran", "=", "conn", ".", "begin", "(", ")", "self", ".", "mgrremove", ".", "execute", "(", "conn", ",", "migration_rqst", ")", "tran", ".", "commit", "(", ")", "except", "dbsException", "as", "he", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "raise", "except", "Exception", "as", "ex", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "raise", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
Start multiple zones .
def startMultiple ( self , zones ) : path = 'zone/start_multiple' payload = { 'zones' : zones } return self . rachio . put ( path , payload )
8,647
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L17-L21
[ "def", "_wrap_client_error", "(", "e", ")", ":", "error_code", "=", "e", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "message", "=", "e", ".", "response", "[", "'Error'", "]", "[", "'Message'", "]", "if", "error_code", "==", "'BadRequestException'", ":", "if", "\"Failed to copy S3 object. Access denied:\"", "in", "message", ":", "match", "=", "re", ".", "search", "(", "'bucket=(.+?), key=(.+?)$'", ",", "message", ")", "if", "match", ":", "return", "S3PermissionsRequired", "(", "bucket", "=", "match", ".", "group", "(", "1", ")", ",", "key", "=", "match", ".", "group", "(", "2", ")", ")", "if", "\"Invalid S3 URI\"", "in", "message", ":", "return", "InvalidS3UriError", "(", "message", "=", "message", ")", "return", "ServerlessRepoClientError", "(", "message", "=", "message", ")" ]
Retrieve the information for a zone entity .
def get ( self , zone_id ) : path = '/' . join ( [ 'zone' , zone_id ] ) return self . rachio . get ( path )
8,648
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L27-L30
[ "def", "preprocess", "(", "msg_body", ",", "delimiter", ",", "content_type", "=", "'text/plain'", ")", ":", "msg_body", "=", "_replace_link_brackets", "(", "msg_body", ")", "msg_body", "=", "_wrap_splitter_with_newline", "(", "msg_body", ",", "delimiter", ",", "content_type", ")", "return", "msg_body" ]
Start the schedule .
def start ( self ) : zones = [ { "id" : data [ 0 ] , "duration" : data [ 1 ] , "sortOrder" : count } for ( count , data ) in enumerate ( self . _zones , 1 ) ] self . _api . startMultiple ( zones )
8,649
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L45-L49
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "return", "False", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: is closing\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ")", ")", "if", "self", ".", "_console", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_console", ",", "self", ".", "_project", ")", "self", ".", "_console", "=", "None", "if", "self", ".", "_wrap_console", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_internal_console_port", ",", "self", ".", "_project", ")", "self", ".", "_internal_console_port", "=", "None", "if", "self", ".", "_aux", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_aux", ",", "self", ".", "_project", ")", "self", ".", "_aux", "=", "None", "self", ".", "_closed", "=", "True", "return", "True" ]
Do not allow translations longer than the max_lenght of the field to be translated .
def clean_translation ( self ) : translation = self . cleaned_data [ 'translation' ] if self . instance and self . instance . content_object : # do not allow string longer than translatable field obj = self . instance . content_object field = obj . _meta . get_field ( self . instance . field ) max_length = field . max_length if max_length and len ( translation ) > max_length : raise forms . ValidationError ( _ ( 'The entered translation is too long. You entered ' '%(entered)s chars, max length is %(maxlength)s' ) % { 'entered' : len ( translation ) , 'maxlength' : max_length , } ) else : raise forms . ValidationError ( _ ( 'Can not store translation. First create all translation' ' for this object' ) ) return translation
8,650
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/admin.py#L54-L80
[ "def", "connect", "(", "self", ")", ":", "distributed_logger", ".", "info", "(", "'Connecting registry proxy to ZMQ socket %s'", ",", "self", ".", "socket_addr", ")", "self", ".", "zmq_context", "=", "zmq", ".", "Context", "(", ")", "sock", "=", "self", ".", "zmq_context", ".", "socket", "(", "zmq", ".", "PUB", ")", "sock", ".", "set_hwm", "(", "0", ")", "sock", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "0", ")", "sock", ".", "connect", "(", "self", ".", "socket_addr", ")", "distributed_logger", ".", "info", "(", "'Connected registry proxy to ZMQ socket %s'", ",", "self", ".", "socket_addr", ")", "def", "_reset_socket", "(", "values", ")", ":", "for", "value", "in", "values", ":", "try", ":", "_reset_socket", "(", "value", ".", "values", "(", ")", ")", "except", "AttributeError", ":", "value", ".", "socket", "=", "sock", "distributed_logger", ".", "debug", "(", "'Resetting socket on metrics proxies'", ")", "_reset_socket", "(", "self", ".", "stats", ".", "values", "(", ")", ")", "self", ".", "socket", "=", "sock", "distributed_logger", ".", "debug", "(", "'Reset socket on metrics proxies'", ")" ]
Yields merge rules as key - value pairs in which the first element is a JSON path as a tuple and the second element is a list of merge properties whose values are true .
def _get_merge_rules ( properties , path = None ) : if path is None : path = ( ) for key , value in properties . items ( ) : new_path = path + ( key , ) types = _get_types ( value ) # `omitWhenMerged` supersedes all other rules. # See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#omit-when-merged if value . get ( 'omitWhenMerged' ) or value . get ( 'mergeStrategy' ) == 'ocdsOmit' : yield ( new_path , { 'omitWhenMerged' } ) # `wholeListMerge` supersedes any nested rules. # See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#whole-list-merge elif 'array' in types and ( value . get ( 'wholeListMerge' ) or value . get ( 'mergeStrategy' ) == 'ocdsVersion' ) : yield ( new_path , { 'wholeListMerge' } ) elif 'object' in types and 'properties' in value : yield from _get_merge_rules ( value [ 'properties' ] , path = new_path ) elif 'array' in types and 'items' in value : item_types = _get_types ( value [ 'items' ] ) # See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#objects if any ( item_type != 'object' for item_type in item_types ) : yield ( new_path , { 'wholeListMerge' } ) elif 'object' in item_types and 'properties' in value [ 'items' ] : # See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#whole-list-merge if 'id' not in value [ 'items' ] [ 'properties' ] : yield ( new_path , { 'wholeListMerge' } ) else : yield from _get_merge_rules ( value [ 'items' ] [ 'properties' ] , path = new_path )
8,651
https://github.com/open-contracting/ocds-merge/blob/09ef170b24f3fd13bdb1e33043d22de5f0448a9d/ocdsmerge/merge.py#L60-L92
[ "def", "to_raw_address", "(", "addr", ",", "section", ")", ":", "return", "addr", "-", "section", ".", "header", ".", "VirtualAddress", "+", "section", ".", "header", ".", "PointerToRawData" ]
Returns merge rules as key - value pairs in which the key is a JSON path as a tuple and the value is a list of merge properties whose values are true .
def get_merge_rules ( schema = None ) : schema = schema or get_release_schema_url ( get_tags ( ) [ - 1 ] ) if isinstance ( schema , dict ) : deref_schema = jsonref . JsonRef . replace_refs ( schema ) else : deref_schema = _get_merge_rules_from_url_or_path ( schema ) return dict ( _get_merge_rules ( deref_schema [ 'properties' ] ) )
8,652
https://github.com/open-contracting/ocds-merge/blob/09ef170b24f3fd13bdb1e33043d22de5f0448a9d/ocdsmerge/merge.py#L106-L116
[ "def", "to_raw_address", "(", "addr", ",", "section", ")", ":", "return", "addr", "-", "section", ".", "header", ".", "VirtualAddress", "+", "section", ".", "header", ".", "PointerToRawData" ]
Unflattens a processed object into a JSON object .
def unflatten ( processed , merge_rules ) : unflattened = OrderedDict ( ) for key in processed : current_node = unflattened for end , part in enumerate ( key , 1 ) : # If this is a path to an item of an array. # See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#identifier-merge if isinstance ( part , IdValue ) : # If the `id` of an object in the array matches, change into it. for node in current_node : if isinstance ( node , IdDict ) and node . identifier == part . identifier : current_node = node break # Otherwise, append a new object, and change into it. else : new_node = IdDict ( ) new_node . identifier = part . identifier # If the original object had an `id` value, set it. if part . original_value is not None : new_node [ 'id' ] = part . original_value current_node . append ( new_node ) current_node = new_node continue # Otherwise, this is a path to a property of an object. node = current_node . get ( part ) # If this is a path to a node we visited before, change into it. If it's an `id` field, it's already been # set to its original value. if node is not None : current_node = node continue # If this is a full path, copy the data. if len ( key ) == end : # Omit null'ed fields. if processed [ key ] is not None : current_node [ part ] = processed [ key ] continue # If the path is to a new array, start a new array, and change into it. if isinstance ( key [ end ] , IdValue ) : new_node = [ ] # If the path is to a new object, start a new object, and change into it. else : new_node = OrderedDict ( ) current_node [ part ] = new_node current_node = new_node return unflattened
8,653
https://github.com/open-contracting/ocds-merge/blob/09ef170b24f3fd13bdb1e33043d22de5f0448a9d/ocdsmerge/merge.py#L180-L234
[ "def", "remover", "(", "self", ",", "id_brand", ")", ":", "if", "not", "is_valid_int_param", "(", "id_brand", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Brand is invalid or was not informed.'", ")", "url", "=", "'brand/'", "+", "str", "(", "id_brand", ")", "+", "'/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'DELETE'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Merges a list of releases into a compiledRelease .
def merge ( releases , schema = None , merge_rules = None ) : if not merge_rules : merge_rules = get_merge_rules ( schema ) merged = OrderedDict ( { ( 'tag' , ) : [ 'compiled' ] } ) for release in sorted ( releases , key = lambda release : release [ 'date' ] ) : release = release . copy ( ) ocid = release [ 'ocid' ] date = release [ 'date' ] # Prior to OCDS 1.1.4, `tag` didn't set "omitWhenMerged": true. release . pop ( 'tag' , None ) # becomes ["compiled"] flat = flatten ( release , merge_rules ) processed = process_flattened ( flat ) # Add an `id` and `date`. merged [ ( 'id' , ) ] = '{}-{}' . format ( ocid , date ) merged [ ( 'date' , ) ] = date # In OCDS 1.0, `ocid` incorrectly sets "mergeStrategy": "ocdsOmit". merged [ ( 'ocid' , ) ] = ocid merged . update ( processed ) return unflatten ( merged , merge_rules )
8,654
https://github.com/open-contracting/ocds-merge/blob/09ef170b24f3fd13bdb1e33043d22de5f0448a9d/ocdsmerge/merge.py#L277-L305
[ "def", "_get_partition_info", "(", "storage_system", ",", "device_path", ")", ":", "try", ":", "partition_infos", "=", "storage_system", ".", "RetrieveDiskPartitionInfo", "(", "devicePath", "=", "[", "device_path", "]", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "log", ".", "trace", "(", "'partition_info = %s'", ",", "partition_infos", "[", "0", "]", ")", "return", "partition_infos", "[", "0", "]" ]
Merges a list of releases into a versionedRelease .
def merge_versioned ( releases , schema = None , merge_rules = None ) : if not merge_rules : merge_rules = get_merge_rules ( schema ) merged = OrderedDict ( ) for release in sorted ( releases , key = lambda release : release [ 'date' ] ) : release = release . copy ( ) # Don't version the OCID. ocid = release . pop ( 'ocid' ) merged [ ( 'ocid' , ) ] = ocid releaseID = release [ 'id' ] date = release [ 'date' ] # Prior to OCDS 1.1.4, `tag` didn't set "omitWhenMerged": true. tag = release . pop ( 'tag' , None ) flat = flatten ( release , merge_rules ) processed = process_flattened ( flat ) for key , value in processed . items ( ) : # If value is unchanged, don't add to history. if key in merged and value == merged [ key ] [ - 1 ] [ 'value' ] : continue if key not in merged : merged [ key ] = [ ] merged [ key ] . append ( OrderedDict ( [ ( 'releaseID' , releaseID ) , ( 'releaseDate' , date ) , ( 'releaseTag' , tag ) , ( 'value' , value ) , ] ) ) return unflatten ( merged , merge_rules )
8,655
https://github.com/open-contracting/ocds-merge/blob/09ef170b24f3fd13bdb1e33043d22de5f0448a9d/ocdsmerge/merge.py#L308-L346
[ "def", "get_experiment_from_key", "(", "self", ",", "experiment_key", ")", ":", "experiment", "=", "self", ".", "experiment_key_map", ".", "get", "(", "experiment_key", ")", "if", "experiment", ":", "return", "experiment", "self", ".", "logger", ".", "error", "(", "'Experiment key \"%s\" is not in datafile.'", "%", "experiment_key", ")", "self", ".", "error_handler", ".", "handle_error", "(", "exceptions", ".", "InvalidExperimentException", "(", "enums", ".", "Errors", ".", "INVALID_EXPERIMENT_KEY_ERROR", ")", ")", "return", "None" ]
Split list into chunks of the given size . Original order is preserved .
def chunks ( items , size ) : return [ items [ i : i + size ] for i in range ( 0 , len ( items ) , size ) ]
8,656
https://github.com/F483/btctxstore/blob/5790ace3a3d4c9bcc759e7c931fc4a57d40b6c25/btctxstore/common.py#L22-L30
[ "def", "authenticate_with_access_token", "(", "access_token", ")", ":", "credentials", "=", "Credentials", "(", "access_token", "=", "access_token", ")", "client", "=", "YamcsClient", "(", "'localhost:8090'", ",", "credentials", "=", "credentials", ")", "for", "link", "in", "client", ".", "list_data_links", "(", "'simulator'", ")", ":", "print", "(", "link", ")" ]
Login to the ZoneMinder API .
def login ( self ) : _LOGGER . debug ( "Attempting to login to ZoneMinder" ) login_post = { 'view' : 'console' , 'action' : 'login' } if self . _username : login_post [ 'username' ] = self . _username if self . _password : login_post [ 'password' ] = self . _password req = requests . post ( urljoin ( self . _server_url , 'index.php' ) , data = login_post , verify = self . _verify_ssl ) self . _cookies = req . cookies # Login calls returns a 200 response on both failure and success. # The only way to tell if you logged in correctly is to issue an api # call. req = requests . get ( urljoin ( self . _server_url , 'api/host/getVersion.json' ) , cookies = self . _cookies , timeout = ZoneMinder . DEFAULT_TIMEOUT , verify = self . _verify_ssl ) if not req . ok : _LOGGER . error ( "Connection error logging into ZoneMinder" ) return False return True
8,657
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L35-L62
[ "def", "_get_query", "(", "self", ",", "callback", ",", "schema", ",", "query", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "query", ":", "query", "=", "Query", "(", ")", "ret", "=", "None", "with", "self", ".", "connection", "(", "*", "*", "kwargs", ")", "as", "connection", ":", "kwargs", "[", "'connection'", "]", "=", "connection", "try", ":", "if", "connection", ".", "in_transaction", "(", ")", ":", "# we wrap SELECT queries in a transaction if we are in a transaction because", "# it could cause data loss if it failed by causing the db to discard", "# anything in the current transaction if the query isn't wrapped,", "# go ahead, ask me how I know this", "with", "self", ".", "transaction", "(", "*", "*", "kwargs", ")", ":", "ret", "=", "callback", "(", "schema", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "ret", "=", "callback", "(", "schema", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "if", "self", ".", "handle_error", "(", "schema", ",", "e", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "callback", "(", "schema", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "raise_error", "(", "e", ",", "exc_info", ")", "return", "ret" ]
Perform a request to the ZoneMinder API .
def _zm_request ( self , method , api_url , data = None , timeout = DEFAULT_TIMEOUT ) -> dict : try : # Since the API uses sessions that expire, sometimes we need to # re-auth if the call fails. for _ in range ( ZoneMinder . LOGIN_RETRIES ) : req = requests . request ( method , urljoin ( self . _server_url , api_url ) , data = data , cookies = self . _cookies , timeout = timeout , verify = self . _verify_ssl ) if not req . ok : self . login ( ) else : break else : _LOGGER . error ( 'Unable to get API response from ZoneMinder' ) try : return req . json ( ) except ValueError : _LOGGER . exception ( 'JSON decode exception caught while' 'attempting to decode "%s"' , req . text ) return { } except requests . exceptions . ConnectionError : _LOGGER . exception ( 'Unable to connect to ZoneMinder' ) return { }
8,658
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L72-L100
[ "def", "sql_column_like_drug", "(", "self", ",", "column_name", ":", "str", ")", "->", "str", ":", "clauses", "=", "[", "\"{col} LIKE {fragment}\"", ".", "format", "(", "col", "=", "column_name", ",", "fragment", "=", "sql_string_literal", "(", "f", ")", ")", "for", "f", "in", "self", ".", "sql_like_fragments", "]", "return", "\"({})\"", ".", "format", "(", "\" OR \"", ".", "join", "(", "clauses", ")", ")" ]
Get a list of Monitors from the ZoneMinder API .
def get_monitors ( self ) -> List [ Monitor ] : raw_monitors = self . _zm_request ( 'get' , ZoneMinder . MONITOR_URL ) if not raw_monitors : _LOGGER . warning ( "Could not fetch monitors from ZoneMinder" ) return [ ] monitors = [ ] for raw_result in raw_monitors [ 'monitors' ] : _LOGGER . debug ( "Initializing camera %s" , raw_result [ 'Monitor' ] [ 'Id' ] ) monitors . append ( Monitor ( self , raw_result ) ) return monitors
8,659
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L102-L115
[ "def", "sql_column_like_drug", "(", "self", ",", "column_name", ":", "str", ")", "->", "str", ":", "clauses", "=", "[", "\"{col} LIKE {fragment}\"", ".", "format", "(", "col", "=", "column_name", ",", "fragment", "=", "sql_string_literal", "(", "f", ")", ")", "for", "f", "in", "self", ".", "sql_like_fragments", "]", "return", "\"({})\"", ".", "format", "(", "\" OR \"", ".", "join", "(", "clauses", ")", ")" ]
Get a list of RunStates from the ZoneMinder API .
def get_run_states ( self ) -> List [ RunState ] : raw_states = self . get_state ( 'api/states.json' ) if not raw_states : _LOGGER . warning ( "Could not fetch runstates from ZoneMinder" ) return [ ] run_states = [ ] for i in raw_states [ 'states' ] : raw_state = i [ 'State' ] _LOGGER . info ( "Initializing runstate %s" , raw_state [ 'Id' ] ) run_states . append ( RunState ( self , raw_state ) ) return run_states
8,660
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L117-L130
[ "def", "sql_column_like_drug", "(", "self", ",", "column_name", ":", "str", ")", "->", "str", ":", "clauses", "=", "[", "\"{col} LIKE {fragment}\"", ".", "format", "(", "col", "=", "column_name", ",", "fragment", "=", "sql_string_literal", "(", "f", ")", ")", "for", "f", "in", "self", ".", "sql_like_fragments", "]", "return", "\"({})\"", ".", "format", "(", "\" OR \"", ".", "join", "(", "clauses", ")", ")" ]
Get the name of the active run state from the ZoneMinder API .
def get_active_state ( self ) -> Optional [ str ] : for state in self . get_run_states ( ) : if state . active : return state . name return None
8,661
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L132-L137
[ "def", "remove", "(", "self", ",", "email", ")", ":", "if", "email", "in", "self", ".", "_collaborators", ":", "if", "self", ".", "_collaborators", "[", "email", "]", "==", "ShareRequestValue", ".", "Add", ":", "del", "self", ".", "_collaborators", "[", "email", "]", "else", ":", "self", ".", "_collaborators", "[", "email", "]", "=", "ShareRequestValue", ".", "Remove", "self", ".", "_dirty", "=", "True" ]
Set the ZoneMinder run state to the given state name via ZM API .
def set_active_state ( self , state_name ) : _LOGGER . info ( 'Setting ZoneMinder run state to state %s' , state_name ) return self . _zm_request ( 'GET' , 'api/states/change/{}.json' . format ( state_name ) , timeout = 120 )
8,662
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L139-L152
[ "def", "fromtarfile", "(", "cls", ",", "tarfile", ")", ":", "buf", "=", "tarfile", ".", "fileobj", ".", "read", "(", "BLOCKSIZE", ")", "obj", "=", "cls", ".", "frombuf", "(", "buf", ",", "tarfile", ".", "encoding", ",", "tarfile", ".", "errors", ")", "obj", ".", "offset", "=", "tarfile", ".", "fileobj", ".", "tell", "(", ")", "-", "BLOCKSIZE", "return", "obj", ".", "_proc_member", "(", "tarfile", ")" ]
Indicate if this ZoneMinder service is currently available .
def is_available ( self ) -> bool : status_response = self . get_state ( 'api/host/daemonCheck.json' ) if not status_response : return False return status_response . get ( 'result' ) == 1
8,663
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L171-L180
[ "def", "extract", "(", "features", ",", "groups", ",", "weight_method", "=", "default_weight_method", ",", "num_bins", "=", "default_num_bins", ",", "edge_range", "=", "default_edge_range", ",", "trim_outliers", "=", "default_trim_behaviour", ",", "trim_percentile", "=", "default_trim_percentile", ",", "use_original_distribution", "=", "False", ",", "relative_to_all", "=", "False", ",", "asymmetric", "=", "False", ",", "return_networkx_graph", "=", "default_return_networkx_graph", ",", "out_weights_path", "=", "default_out_weights_path", ")", ":", "# parameter check", "features", ",", "groups", ",", "num_bins", ",", "edge_range", ",", "group_ids", ",", "num_groups", ",", "num_links", "=", "check_params", "(", "features", ",", "groups", ",", "num_bins", ",", "edge_range", ",", "trim_outliers", ",", "trim_percentile", ")", "weight_func", ",", "use_orig_distr", ",", "non_symmetric", "=", "check_weight_method", "(", "weight_method", ",", "use_original_distribution", ",", "asymmetric", ")", "# using the same bin edges for all nodes/groups to ensure correspondence", "# NOTE: common bin edges is important for the disances to be any meaningful", "edges", "=", "compute_bin_edges", "(", "features", ",", "num_bins", ",", "edge_range", ",", "trim_outliers", ",", "trim_percentile", ",", "use_orig_distr", ")", "# handling special", "if", "relative_to_all", ":", "result", "=", "non_pairwise", ".", "relative_to_all", "(", "features", ",", "groups", ",", "edges", ",", "weight_func", ",", "use_orig_distr", ",", "group_ids", ",", "num_groups", ",", "return_networkx_graph", ",", "out_weights_path", ")", "else", ":", "result", "=", "pairwise_extract", "(", "features", ",", "groups", ",", "edges", ",", "weight_func", ",", "use_orig_distr", ",", "group_ids", ",", "num_groups", ",", "num_links", ",", "non_symmetric", ",", "return_networkx_graph", ",", "out_weights_path", ")", "# this can be a networkx graph or numpy array depending on request", "return", "result" ]
Build the server url making sure it ends in a trailing slash .
def _build_server_url ( server_host , server_path ) -> str : server_url = urljoin ( server_host , server_path ) if server_url [ - 1 ] == '/' : return server_url return '{}/' . format ( server_url )
8,664
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L193-L198
[ "def", "refreshResults", "(", "self", ")", ":", "if", "(", "self", ".", "currentMode", "(", ")", "==", "XOrbBrowserWidget", ".", "Mode", ".", "Detail", ")", ":", "self", ".", "refreshDetails", "(", ")", "elif", "(", "self", ".", "currentMode", "(", ")", "==", "XOrbBrowserWidget", ".", "Mode", ".", "Card", ")", ":", "self", ".", "refreshCards", "(", ")", "else", ":", "self", ".", "refreshThumbnails", "(", ")" ]
Retrieve the information for a flexscheduleRule entity .
def get ( self , flex_sched_rule_id ) : path = '/' . join ( [ 'flexschedulerule' , flex_sched_rule_id ] ) return self . rachio . get ( path )
8,665
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/flexschedulerule.py#L11-L14
[ "def", "upload_document_fileobj", "(", "file_obj", ",", "file_name", ",", "session", ",", "documents_resource", ",", "log", "=", "None", ")", ":", "try", ":", "fields", "=", "documents_resource", ".", "init_multipart_upload", "(", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "raise_api_error", "(", "e", ".", "response", ",", "state", "=", "\"init\"", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "raise_connectivity_error", "(", "file_name", ")", "s3_upload", "=", "_s3_intermediate_upload", "(", "file_obj", ",", "file_name", ",", "fields", ",", "session", ",", "documents_resource", ".", "_client", ".", "_root_url", "+", "fields", "[", "\"callback_url\"", "]", ",", "# full callback url", ")", "document_id", "=", "s3_upload", ".", "get", "(", "\"document_id\"", ",", "\"<UUID not yet assigned>\"", ")", "logging", ".", "info", "(", "\"{}: finished as document {}\"", ".", "format", "(", "file_name", ",", "document_id", ")", ")", "return", "document_id" ]
Uses the fetch make push subcommands to mirror Project Gutenberg to a github3 api
def upload_all_books ( book_id_start , book_id_end , rdf_library = None ) : # TODO refactor appname into variable logger . info ( "starting a gitberg mass upload: {0} -> {1}" . format ( book_id_start , book_id_end ) ) for book_id in range ( int ( book_id_start ) , int ( book_id_end ) + 1 ) : cache = { } errors = 0 try : if int ( book_id ) in missing_pgid : print ( u'missing\t{}' . format ( book_id ) ) continue upload_book ( book_id , rdf_library = rdf_library , cache = cache ) except Exception as e : print ( u'error\t{}' . format ( book_id ) ) logger . error ( u"Error processing: {}\r{}" . format ( book_id , e ) ) errors += 1 if errors > 10 : print ( 'error limit reached!' ) break
8,666
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/workflow.py#L15-L41
[ "def", "add", "(", "self", ",", "crash", ",", "allow_duplicates", "=", "True", ")", ":", "# Filter out duplicated crashes, if requested.", "if", "not", "allow_duplicates", ":", "signature", "=", "pickle", ".", "dumps", "(", "crash", ".", "signature", ",", "protocol", "=", "0", ")", "if", "self", ".", "_session", ".", "query", "(", "CrashDTO", ".", "id", ")", ".", "filter_by", "(", "signature", "=", "signature", ")", ".", "count", "(", ")", ">", "0", ":", "return", "# Fill out a new row for the crashes table.", "crash_id", "=", "self", ".", "__add_crash", "(", "crash", ")", "# Fill out new rows for the memory dump.", "self", ".", "__add_memory", "(", "crash_id", ",", "crash", ".", "memoryMap", ")", "# On success set the row ID for the Crash object.", "# WARNING: In nested calls, make sure to delete", "# this property before a session rollback!", "crash", ".", "_rowid", "=", "crash_id" ]
Uses the fetch make push subcommands to add a list of pg books
def upload_list ( book_id_list , rdf_library = None ) : with open ( book_id_list , 'r' ) as f : cache = { } for book_id in f : book_id = book_id . strip ( ) try : if int ( book_id ) in missing_pgid : print ( u'missing\t{}' . format ( book_id ) ) continue upload_book ( book_id , rdf_library = rdf_library , cache = cache ) except Exception as e : print ( u'error\t{}' . format ( book_id ) ) logger . error ( u"Error processing: {}\r{}" . format ( book_id , e ) )
8,667
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/workflow.py#L43-L57
[ "def", "_restart_session", "(", "self", ",", "session", ")", ":", "# remove old session key, if socket is None, that means the", "# session was closed by user and there is no need to restart.", "if", "session", ".", "socket", "is", "not", "None", ":", "self", ".", "log", ".", "info", "(", "\"Attempting restart session for Monitor Id %s.\"", "%", "session", ".", "monitor_id", ")", "del", "self", ".", "sessions", "[", "session", ".", "socket", ".", "fileno", "(", ")", "]", "session", ".", "stop", "(", ")", "session", ".", "start", "(", ")", "self", ".", "sessions", "[", "session", ".", "socket", ".", "fileno", "(", ")", "]", "=", "session" ]
Create all translations objects for this Translatable instance .
def translate ( self ) : translations = [ ] for lang in settings . LANGUAGES : # do not create an translations for default language. # we will use the original model for this if lang [ 0 ] == self . _get_default_language ( ) : continue # create translations for all fields of each language if self . translatable_slug is not None : if self . translatable_slug not in self . translatable_fields : self . translatable_fields = self . translatable_fields + ( self . translatable_slug , ) for field in self . translatable_fields : trans , created = Translation . objects . get_or_create ( object_id = self . id , content_type = ContentType . objects . get_for_model ( self ) , field = field , lang = lang [ 0 ] , ) translations . append ( trans ) return translations
8,668
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L57-L84
[ "def", "_run_cmdfinalization_hooks", "(", "self", ",", "stop", ":", "bool", ",", "statement", ":", "Optional", "[", "Statement", "]", ")", "->", "bool", ":", "with", "self", ".", "sigint_protection", ":", "if", "not", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", "and", "self", ".", "stdout", ".", "isatty", "(", ")", ":", "# Before the next command runs, fix any terminal problems like those", "# caused by certain binary characters having been printed to it.", "import", "subprocess", "proc", "=", "subprocess", ".", "Popen", "(", "[", "'stty'", ",", "'sane'", "]", ")", "proc", ".", "communicate", "(", ")", "try", ":", "data", "=", "plugin", ".", "CommandFinalizationData", "(", "stop", ",", "statement", ")", "for", "func", "in", "self", ".", "_cmdfinalization_hooks", ":", "data", "=", "func", "(", "data", ")", "# retrieve the final value of stop, ignoring any", "# modifications to the statement", "return", "data", ".", "stop", "except", "Exception", "as", "ex", ":", "self", ".", "perror", "(", "ex", ")" ]
Return the complete list of translation objects of a Translatable instance
def translations_objects ( self , lang ) : return Translation . objects . filter ( object_id = self . id , content_type = ContentType . objects . get_for_model ( self ) , lang = lang )
8,669
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L86-L101
[ "def", "weighted_hamming", "(", "b1", ",", "b2", ")", ":", "assert", "(", "len", "(", "b1", ")", "==", "len", "(", "b2", ")", ")", "hamming", "=", "0", "for", "i", "in", "range", "(", "len", "(", "b1", ")", ")", ":", "if", "b1", "[", "i", "]", "!=", "b2", "[", "i", "]", ":", "# differences at more significant (leftward) bits", "# are more important", "if", "i", ">", "0", ":", "hamming", "+=", "1", "+", "1.0", "/", "i", "# This weighting is completely arbitrary", "return", "hamming" ]
Return the list of translation strings of a Translatable instance in a dictionary form
def translations ( self , lang ) : key = self . _get_translations_cache_key ( lang ) trans_dict = cache . get ( key , { } ) if self . translatable_slug is not None : if self . translatable_slug not in self . translatable_fields : self . translatable_fields = self . translatable_fields + ( self . translatable_slug , ) if not trans_dict : for field in self . translatable_fields : # we use get_translation method to be sure that it will # fall back and get the default value if needed trans_dict [ field ] = self . get_translation ( lang , field ) cache . set ( key , trans_dict ) return trans_dict
8,670
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L103-L128
[ "def", "weighted_hamming", "(", "b1", ",", "b2", ")", ":", "assert", "(", "len", "(", "b1", ")", "==", "len", "(", "b2", ")", ")", "hamming", "=", "0", "for", "i", "in", "range", "(", "len", "(", "b1", ")", ")", ":", "if", "b1", "[", "i", "]", "!=", "b2", "[", "i", "]", ":", "# differences at more significant (leftward) bits", "# are more important", "if", "i", ">", "0", ":", "hamming", "+=", "1", "+", "1.0", "/", "i", "# This weighting is completely arbitrary", "return", "hamming" ]
Return the translation object of an specific field in a Translatable istance
def get_translation_obj ( self , lang , field , create = False ) : trans = None try : trans = Translation . objects . get ( object_id = self . id , content_type = ContentType . objects . get_for_model ( self ) , lang = lang , field = field , ) except Translation . DoesNotExist : if create : trans = Translation . objects . create ( object_id = self . id , content_type = ContentType . objects . get_for_model ( self ) , lang = lang , field = field , ) return trans
8,671
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L130-L160
[ "def", "defaults_scope", "(", "*", "*", "kwargs", ")", ":", "_assert_value_not_string", "(", "'summary_collections'", ",", "kwargs", ")", "_assert_value_not_string", "(", "'variable_collections'", ",", "kwargs", ")", "_check_defaults", "(", "kwargs", ")", "global", "_defaults", "old_defaults", "=", "_defaults", "_defaults", "=", "chain_dict", ".", "ChainDict", "(", "_defaults", ")", "_defaults", ".", "update", "(", "kwargs", ")", "# Special logic to support summary_collections.", "# This is added here because introducing more scopes would add more confusion", "# than overloading this one a bit.", "books", "=", "bookkeeper", ".", "for_default_graph", "(", ")", "if", "'summary_collections'", "in", "_defaults", ":", "books", ".", "summary_collections", "=", "_defaults", "[", "'summary_collections'", "]", "else", ":", "books", ".", "reset_summary_collections", "(", ")", "try", ":", "yield", "_defaults", "finally", ":", "_defaults", "=", "old_defaults" ]
Return the translation string of an specific field in a Translatable istance
def get_translation ( self , lang , field ) : # Read from cache key = self . _get_translation_cache_key ( lang , field ) trans = cache . get ( key , '' ) if not trans : trans_obj = self . get_translation_obj ( lang , field ) trans = getattr ( trans_obj , 'translation' , '' ) # if there's no translation text fall back to the model field if not trans : trans = getattr ( self , field , '' ) # update cache cache . set ( key , trans ) return trans
8,672
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L162-L188
[ "def", "open", "(", "self", ",", "file_path", ")", ":", "if", "self", ".", "is_opened", "(", ")", "and", "self", ".", "workbook", ".", "file_path", "==", "file_path", ":", "self", ".", "_logger", ".", "logger", ".", "debug", "(", "\"workbook already opened: {}\"", ".", "format", "(", "self", ".", "workbook", ".", "file_path", ")", ")", "return", "self", ".", "close", "(", ")", "self", ".", "_open", "(", "file_path", ")" ]
Store a translation string in the specified field for a Translatable istance
def set_translation ( self , lang , field , text ) : # Do not allow user to set a translations in the default language auto_slug_obj = None if lang == self . _get_default_language ( ) : raise CanNotTranslate ( _ ( 'You are not supposed to translate the default language. ' 'Use the model fields for translations in default language' ) ) # Get translation, if it does not exits create one trans_obj = self . get_translation_obj ( lang , field , create = True ) trans_obj . translation = text trans_obj . save ( ) # check if the field has an autoslugfield and create the translation if INSTALLED_AUTOSLUG : if self . translatable_slug : try : auto_slug_obj = self . _meta . get_field ( self . translatable_slug ) . populate_from except AttributeError : pass if auto_slug_obj : tobj = self . get_translation_obj ( lang , self . translatable_slug , create = True ) translation = self . get_translation ( lang , auto_slug_obj ) tobj . translation = slugify ( translation ) tobj . save ( ) # Update cache for this specif translations key = self . _get_translation_cache_key ( lang , field ) cache . set ( key , text ) # remove cache for translations dict cache . delete ( self . _get_translations_cache_key ( lang ) ) return trans_obj
8,673
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L190-L237
[ "def", "open", "(", "self", ",", "file_path", ")", ":", "if", "self", ".", "is_opened", "(", ")", "and", "self", ".", "workbook", ".", "file_path", "==", "file_path", ":", "self", ".", "_logger", ".", "logger", ".", "debug", "(", "\"workbook already opened: {}\"", ".", "format", "(", "self", ".", "workbook", ".", "file_path", ")", ")", "return", "self", ".", "close", "(", ")", "self", ".", "_open", "(", "file_path", ")" ]
Print on admin change list the link to see all translations for this object
def translations_link ( self ) : translation_type = ContentType . objects . get_for_model ( Translation ) link = urlresolvers . reverse ( 'admin:%s_%s_changelist' % ( translation_type . app_label , translation_type . model ) , ) object_type = ContentType . objects . get_for_model ( self ) link += '?content_type__id__exact=%s&object_id=%s' % ( object_type . id , self . id ) return '<a href="%s">translate</a>' % link
8,674
https://github.com/angvp/django-klingon/blob/6716fcb7e98d7d27d41c72c4036d3593f1cc04c2/klingon/models.py#L239-L254
[ "def", "get_mem", "(", "device_handle", ")", ":", "try", ":", "memory_info", "=", "pynvml", ".", "nvmlDeviceGetMemoryInfo", "(", "device_handle", ")", "return", "memory_info", ".", "used", "*", "100.0", "/", "memory_info", ".", "total", "except", "pynvml", ".", "NVMLError", ":", "return", "None" ]
Comparing old and new object to determin which fields changed how
def comparison_callback ( sender , instance , * * kwargs ) : if validate_instance ( instance ) and settings . AUTOMATED_LOGGING [ 'to_database' ] : try : old = sender . objects . get ( pk = instance . pk ) except Exception : return None try : mdl = ContentType . objects . get_for_model ( instance ) cur , ins = old . __dict__ , instance . __dict__ old , new = { } , { } for k in cur . keys ( ) : # _ fields are not real model fields, only state or cache fields # getting filtered if re . match ( '(_)(.*?)' , k ) : continue changed = False if k in ins . keys ( ) : if cur [ k ] != ins [ k ] : changed = True new [ k ] = ModelObject ( ) new [ k ] . value = str ( ins [ k ] ) new [ k ] . save ( ) try : new [ k ] . type = ContentType . objects . get_for_model ( ins [ k ] ) except Exception : logger = logging . getLogger ( __name__ ) logger . debug ( 'Could not dermin the content type of the field' ) new [ k ] . field = Field . objects . get_or_create ( name = k , model = mdl ) [ 0 ] new [ k ] . save ( ) else : changed = True if changed : old [ k ] = ModelObject ( ) old [ k ] . value = str ( cur [ k ] ) old [ k ] . save ( ) try : old [ k ] . type = ContentType . objects . get_for_model ( cur [ k ] ) except Exception : logger = logging . getLogger ( __name__ ) logger . debug ( 'Could not dermin the content type of the field' ) old [ k ] . field = Field . objects . get_or_create ( name = k , model = mdl ) [ 0 ] old [ k ] . save ( ) if old or new : changelog = ModelChangelog ( ) changelog . save ( ) changelog . modification = ModelModification ( ) changelog . modification . save ( ) changelog . modification . previously . add ( * old . values ( ) ) changelog . modification . currently . add ( * new . values ( ) ) changelog . information = ModelObject ( ) changelog . information . save ( ) changelog . information . value = repr ( instance ) changelog . information . type = ContentType . objects . get_for_model ( instance ) changelog . information . save ( ) changelog . save ( ) instance . al_chl = changelog return instance except Exception as e : print ( e ) logger = logging . getLogger ( __name__ ) logger . warning ( 'automated_logging recorded an exception that should not have happended' )
8,675
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/database.py#L18-L92
[ "def", "mol_supplier", "(", "lines", ",", "no_halt", ",", "assign_descriptors", ")", ":", "def", "sdf_block", "(", "lns", ")", ":", "mol", "=", "[", "]", "opt", "=", "[", "]", "is_mol", "=", "True", "for", "line", "in", "lns", ":", "if", "line", ".", "startswith", "(", "\"$$$$\"", ")", ":", "yield", "mol", "[", ":", "]", ",", "opt", "[", ":", "]", "is_mol", "=", "True", "mol", ".", "clear", "(", ")", "opt", ".", "clear", "(", ")", "elif", "line", ".", "startswith", "(", "\"M END\"", ")", ":", "is_mol", "=", "False", "elif", "is_mol", ":", "mol", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "else", ":", "opt", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "if", "mol", ":", "yield", "mol", ",", "opt", "for", "i", ",", "(", "mol", ",", "opt", ")", "in", "enumerate", "(", "sdf_block", "(", "lines", ")", ")", ":", "try", ":", "c", "=", "molecule", "(", "mol", ")", "if", "assign_descriptors", ":", "molutil", ".", "assign_descriptors", "(", "c", ")", "except", "ValueError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Unsupported symbol: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported symbol: {}\"", ".", "format", "(", "err", ")", ")", "except", "RuntimeError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Failed to minimize ring: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Failed to minimize ring: {}\"", ".", "format", "(", "err", ")", ")", "except", ":", "if", "no_halt", ":", "print", "(", "\"Unexpected error (#{} in v2000reader)\"", ".", "format", "(", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c", "continue", "else", ":", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise", "Exception", "(", "\"Unsupported Error\"", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c" ]
Save object & link logging entry
def save_callback ( sender , instance , created , update_fields , * * kwargs ) : if validate_instance ( instance ) : status = 'add' if created is True else 'change' change = '' if status == 'change' and 'al_chl' in instance . __dict__ . keys ( ) : changelog = instance . al_chl . modification change = ' to following changed: {}' . format ( changelog ) processor ( status , sender , instance , update_fields , addition = change )
8,676
https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/database.py#L97-L107
[ "def", "configure", "(", "self", ",", "max_versions", "=", "10", ",", "cas_required", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'max_versions'", ":", "max_versions", ",", "}", "if", "cas_required", "is", "not", "None", ":", "params", "[", "'cas_required'", "]", "=", "cas_required", "api_path", "=", "'/v1/{mount_point}/config'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
The requires_refcount decorator adds a check prior to call func to verify that there is an active handle . if there is no such handle a NoHandleException exception is thrown .
def requires_refcount ( cls , func ) : @ functools . wraps ( func ) def requires_active_handle ( * args , * * kwargs ) : if cls . refcount ( ) == 0 : raise NoHandleException ( ) # You probably want to encase your code in a 'with LibZFSHandle():' block... return func ( * args , * * kwargs ) return requires_active_handle
8,677
https://github.com/Xaroth/libzfs-python/blob/146e5f28de5971bb6eb64fd82b098c5f302f0b33/libzfs/handle.py#L49-L59
[ "def", "__parse", "(", "self", ",", "aliases", ",", "email_to_employer", ",", "domain_to_employer", ")", ":", "self", ".", "__parse_organizations", "(", "domain_to_employer", ")", "self", ".", "__parse_identities", "(", "aliases", ",", "email_to_employer", ")" ]
The auto decorator wraps func in a context manager so that a handle is obtained .
def auto ( cls , func ) : @ functools . wraps ( func ) def auto_claim_handle ( * args , * * kwargs ) : with cls ( ) : return func ( * args , * * kwargs ) return auto_claim_handle
8,678
https://github.com/Xaroth/libzfs-python/blob/146e5f28de5971bb6eb64fd82b098c5f302f0b33/libzfs/handle.py#L62-L75
[ "def", "create_index", "(", "index_name", ",", "index_config", ",", "client", ")", ":", "client", ".", "create", "(", "index", "=", "index_name", ",", "body", "=", "index_config", ")" ]
Get a GPubsubPublisher client .
def get_gpubsub_publisher ( config , metrics , changes_channel , * * kw ) : builder = gpubsub_publisher . GPubsubPublisherBuilder ( config , metrics , changes_channel , * * kw ) return builder . build_publisher ( )
8,679
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/janitor/__init__.py#L34-L53
[ "def", "cancel", "(", "self", ")", ":", "self", ".", "_transaction", "=", "False", "try", ":", "cancel", "=", "self", ".", "_con", ".", "cancel", "except", "AttributeError", ":", "pass", "else", ":", "cancel", "(", ")" ]
Get a GDNSReconciler client .
def get_reconciler ( config , metrics , rrset_channel , changes_channel , * * kw ) : builder = reconciler . GDNSReconcilerBuilder ( config , metrics , rrset_channel , changes_channel , * * kw ) return builder . build_reconciler ( )
8,680
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/janitor/__init__.py#L56-L77
[ "def", "chdir", "(", "directory", ")", ":", "cur", "=", "os", ".", "getcwd", "(", ")", "try", ":", "yield", "os", ".", "chdir", "(", "directory", ")", "finally", ":", "os", ".", "chdir", "(", "cur", ")" ]
Get a GCEAuthority client .
def get_authority ( config , metrics , rrset_channel , * * kwargs ) : builder = authority . GCEAuthorityBuilder ( config , metrics , rrset_channel , * * kwargs ) return builder . build_authority ( )
8,681
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/janitor/__init__.py#L80-L98
[ "def", "read", "(", "self", ",", "filename", ",", "rowprefix", "=", "None", ",", "colprefix", "=", "None", ",", "delim", "=", "\":\"", ")", ":", "self", ".", "matrix", "=", "scipy", ".", "io", ".", "mmread", "(", "filename", ")", "with", "open", "(", "filename", "+", "\".rownames\"", ")", "as", "in_handle", ":", "self", ".", "rownames", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "in_handle", "]", "if", "rowprefix", ":", "self", ".", "rownames", "=", "[", "rowprefix", "+", "delim", "+", "x", "for", "x", "in", "self", ".", "rownames", "]", "with", "open", "(", "filename", "+", "\".colnames\"", ")", "as", "in_handle", ":", "self", ".", "colnames", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "in_handle", "]", "if", "colprefix", ":", "self", ".", "colnames", "=", "[", "colprefix", "+", "delim", "+", "x", "for", "x", "in", "self", ".", "colnames", "]" ]
Refresh oauth access token attached to this HTTP session .
async def refresh_token ( self ) : url , headers , body = self . _setup_token_request ( ) request_id = uuid . uuid4 ( ) logging . debug ( _utils . REQ_LOG_FMT . format ( request_id = request_id , method = 'POST' , url = url , kwargs = None ) ) async with self . _session . post ( url , headers = headers , data = body ) as resp : log_kw = { 'request_id' : request_id , 'method' : 'POST' , 'url' : resp . url , 'status' : resp . status , 'reason' : resp . reason , } logging . debug ( _utils . RESP_LOG_FMT . format ( * * log_kw ) ) # avoid leaky abstractions and wrap http errors with our own try : resp . raise_for_status ( ) except aiohttp . ClientResponseError as e : msg = f'[{request_id}] Issue connecting to {resp.url}: {e}' logging . error ( msg , exc_info = e ) raise exceptions . GCPHTTPResponseError ( msg , resp . status ) response = await resp . json ( ) try : self . token = response [ 'access_token' ] except KeyError : msg = '[{request_id}] No access token in response.' logging . error ( msg ) raise exceptions . GCPAuthError ( msg ) self . expiry = _client . _parse_expiry ( response )
8,682
https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/auth.py#L168-L208
[ "def", "_get_starting_paths", "(", "self", ",", "curdir", ")", ":", "results", "=", "[", "curdir", "]", "if", "not", "self", ".", "_is_parent", "(", "curdir", ")", "and", "not", "self", ".", "_is_this", "(", "curdir", ")", ":", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "curdir", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "fullpath", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "fullpath", ")", "if", "basename", ":", "matcher", "=", "self", ".", "_get_matcher", "(", "basename", ")", "results", "=", "[", "os", ".", "path", ".", "basename", "(", "name", ")", "for", "name", "in", "self", ".", "_glob_dir", "(", "dirname", ",", "matcher", ",", "self", ")", "]", "return", "results" ]
Retrieve the information for a device entity .
def get ( self , dev_id ) : path = '/' . join ( [ 'device' , dev_id ] ) return self . rachio . get ( path )
8,683
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L11-L14
[ "def", "fill_sampling", "(", "slice_list", ",", "N", ")", ":", "A", "=", "[", "len", "(", "s", ".", "inliers", ")", "for", "s", "in", "slice_list", "]", "N_max", "=", "np", ".", "sum", "(", "A", ")", "if", "N", ">", "N_max", ":", "raise", "ValueError", "(", "\"Tried to draw {:d} samples from a pool of only {:d} items\"", ".", "format", "(", "N", ",", "N_max", ")", ")", "samples_from", "=", "np", ".", "zeros", "(", "(", "len", "(", "A", ")", ",", ")", ",", "dtype", "=", "'int'", ")", "# Number of samples to draw from each group", "remaining", "=", "N", "while", "remaining", ">", "0", ":", "remaining_groups", "=", "np", ".", "flatnonzero", "(", "samples_from", "-", "np", ".", "array", "(", "A", ")", ")", "if", "remaining", "<", "len", "(", "remaining_groups", ")", ":", "np", ".", "random", ".", "shuffle", "(", "remaining_groups", ")", "for", "g", "in", "remaining_groups", "[", ":", "remaining", "]", ":", "samples_from", "[", "g", "]", "+=", "1", "else", ":", "# Give each group the allowed number of samples. Constrain to their max size.", "to_each", "=", "max", "(", "1", ",", "int", "(", "remaining", "/", "len", "(", "remaining_groups", ")", ")", ")", "samples_from", "=", "np", ".", "min", "(", "np", ".", "vstack", "(", "(", "samples_from", "+", "to_each", ",", "A", ")", ")", ",", "axis", "=", "0", ")", "# Update remaining count", "remaining", "=", "int", "(", "N", "-", "np", ".", "sum", "(", "samples_from", ")", ")", "if", "not", "remaining", "==", "0", ":", "raise", "ValueError", "(", "\"Still {:d} samples left! This is an error in the selection.\"", ")", "# Construct index list of selected samples", "samples", "=", "[", "]", "for", "s", ",", "a", ",", "n", "in", "zip", "(", "slice_list", ",", "A", ",", "samples_from", ")", ":", "if", "a", "==", "n", ":", "samples", ".", "append", "(", "np", ".", "array", "(", "s", ".", "inliers", ")", ")", "# all", "elif", "a", "==", "0", ":", "samples", ".", "append", "(", "np", ".", "arange", "(", "[", "]", ")", ")", "else", ":", "chosen", "=", "np", ".", "random", ".", "choice", "(", "s", ".", "inliers", ",", "n", ",", "replace", "=", "False", ")", "samples", ".", "append", "(", "np", ".", "array", "(", "chosen", ")", ")", "return", "samples" ]
Retrieve events for a device entity .
def getEvent ( self , dev_id , starttime , endtime ) : path = 'device/%s/event?startTime=%s&endTime=%s' % ( dev_id , starttime , endtime ) return self . rachio . get ( path )
8,684
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L21-L25
[ "def", "load_local_file", "(", "self", ",", "file_path", ",", "namespace", "=", "None", ",", "graph", "=", "None", ",", "*", "*", "kwargs", ")", ":", "time_start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "url", "=", "self", ".", "_make_url", "(", "namespace", ")", "params", "=", "{", "}", "if", "graph", ":", "params", "[", "'context-uri'", "]", "=", "graph", "new_path", "=", "[", "]", "container_dir", "=", "pick", "(", "kwargs", ".", "get", "(", "'container_dir'", ")", ",", "self", ".", "container_dir", ")", "if", "container_dir", ":", "new_path", ".", "append", "(", "self", ".", "container_dir", ")", "new_path", ".", "append", "(", "file_path", ")", "params", "[", "'uri'", "]", "=", "\"file:///%s\"", "%", "os", ".", "path", ".", "join", "(", "*", "new_path", ")", "log", ".", "debug", "(", "\" loading %s into blazegraph\"", ",", "file_path", ")", "result", "=", "requests", ".", "post", "(", "url", "=", "url", ",", "params", "=", "params", ")", "if", "result", ".", "status_code", ">", "300", ":", "raise", "SyntaxError", "(", "result", ".", "text", ")", "log", ".", "info", "(", "\"loaded '%s' in time: %s blazegraph response: %s\"", ",", "file_path", ",", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "time_start", ",", "self", ".", "format_response", "(", "result", ".", "text", ")", ")", "return", "result" ]
Retrieve current and predicted forecast .
def getForecast ( self , dev_id , units ) : assert units in [ 'US' , 'METRIC' ] , 'units must be either US or METRIC' path = 'device/%s/forecast?units=%s' % ( dev_id , units ) return self . rachio . get ( path )
8,685
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L32-L36
[ "def", "replace_postgres_db", "(", "self", ",", "file_url", ")", ":", "self", ".", "print_message", "(", "\"Replacing postgres database\"", ")", "if", "file_url", ":", "self", ".", "print_message", "(", "\"Sourcing data from online backup file '%s'\"", "%", "file_url", ")", "source_file", "=", "self", ".", "download_file_from_url", "(", "self", ".", "args", ".", "source_app", ",", "file_url", ")", "elif", "self", ".", "databases", "[", "'source'", "]", "[", "'name'", "]", ":", "self", ".", "print_message", "(", "\"Sourcing data from database '%s'\"", "%", "self", ".", "databases", "[", "'source'", "]", "[", "'name'", "]", ")", "source_file", "=", "self", ".", "dump_database", "(", ")", "else", ":", "self", ".", "print_message", "(", "\"Sourcing data from local backup file %s\"", "%", "self", ".", "args", ".", "file", ")", "source_file", "=", "self", ".", "args", ".", "file", "self", ".", "drop_database", "(", ")", "self", ".", "create_database", "(", ")", "source_file", "=", "self", ".", "unzip_file_if_necessary", "(", "source_file", ")", "self", ".", "print_message", "(", "\"Importing '%s' into database '%s'\"", "%", "(", "source_file", ",", "self", ".", "databases", "[", "'destination'", "]", "[", "'name'", "]", ")", ")", "args", "=", "[", "\"pg_restore\"", ",", "\"--no-acl\"", ",", "\"--no-owner\"", ",", "\"--dbname=%s\"", "%", "self", ".", "databases", "[", "'destination'", "]", "[", "'name'", "]", ",", "source_file", ",", "]", "args", ".", "extend", "(", "self", ".", "databases", "[", "'destination'", "]", "[", "'args'", "]", ")", "subprocess", ".", "check_call", "(", "args", ")" ]
Stop all watering on device .
def stopWater ( self , dev_id ) : path = 'device/stop_water' payload = { 'id' : dev_id } return self . rachio . put ( path , payload )
8,686
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L38-L42
[ "def", "interpolate_string", "(", "self", ",", "testString", ",", "section", ")", ":", "# First check if any interpolation is needed and abort if not", "reObj", "=", "re", ".", "search", "(", "r\"\\$\\{.*?\\}\"", ",", "testString", ")", "while", "reObj", ":", "# Not really sure how this works, but this will obtain the first", "# instance of a string contained within ${....}", "repString", "=", "(", "reObj", ")", ".", "group", "(", "0", ")", "[", "2", ":", "-", "1", "]", "# Need to test which of the two formats we have", "splitString", "=", "repString", ".", "split", "(", "'|'", ")", "if", "len", "(", "splitString", ")", "==", "1", ":", "try", ":", "testString", "=", "testString", ".", "replace", "(", "'${'", "+", "repString", "+", "'}'", ",", "self", ".", "get", "(", "section", ",", "splitString", "[", "0", "]", ")", ")", "except", "ConfigParser", ".", "NoOptionError", ":", "print", "(", "\"Substitution failed\"", ")", "raise", "if", "len", "(", "splitString", ")", "==", "2", ":", "try", ":", "testString", "=", "testString", ".", "replace", "(", "'${'", "+", "repString", "+", "'}'", ",", "self", ".", "get", "(", "splitString", "[", "0", "]", ",", "splitString", "[", "1", "]", ")", ")", "except", "ConfigParser", ".", "NoOptionError", ":", "print", "(", "\"Substitution failed\"", ")", "raise", "reObj", "=", "re", ".", "search", "(", "r\"\\$\\{.*?\\}\"", ",", "testString", ")", "return", "testString" ]
Rain delay device .
def rainDelay ( self , dev_id , duration ) : path = 'device/rain_delay' payload = { 'id' : dev_id , 'duration' : duration } return self . rachio . put ( path , payload )
8,687
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L44-L48
[ "def", "updateItem", "(", "self", ",", "itemParameters", ",", "clearEmptyFields", "=", "False", ",", "data", "=", "None", ",", "metadata", "=", "None", ",", "text", "=", "None", ",", "serviceUrl", "=", "None", ",", "multipart", "=", "False", ")", ":", "thumbnail", "=", "None", "largeThumbnail", "=", "None", "files", "=", "{", "}", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "}", "if", "clearEmptyFields", ":", "params", "[", "\"clearEmptyFields\"", "]", "=", "clearEmptyFields", "if", "serviceUrl", "is", "not", "None", ":", "params", "[", "'url'", "]", "=", "serviceUrl", "if", "text", "is", "not", "None", ":", "params", "[", "'text'", "]", "=", "text", "if", "isinstance", "(", "itemParameters", ",", "ItemParameter", ")", "==", "False", ":", "raise", "AttributeError", "(", "\"itemParameters must be of type parameter.ItemParameter\"", ")", "keys_to_delete", "=", "[", "'id'", ",", "'owner'", ",", "'size'", ",", "'numComments'", ",", "'numRatings'", ",", "'avgRating'", ",", "'numViews'", ",", "'overwrite'", "]", "dictItem", "=", "itemParameters", ".", "value", "for", "key", "in", "keys_to_delete", ":", "if", "key", "in", "dictItem", ":", "del", "dictItem", "[", "key", "]", "for", "key", "in", "dictItem", ":", "if", "key", "==", "\"thumbnail\"", ":", "files", "[", "'thumbnail'", "]", "=", "dictItem", "[", "'thumbnail'", "]", "elif", "key", "==", "\"largeThumbnail\"", ":", "files", "[", "'largeThumbnail'", "]", "=", "dictItem", "[", "'largeThumbnail'", "]", "elif", "key", "==", "\"metadata\"", ":", "metadata", "=", "dictItem", "[", "'metadata'", "]", "if", "os", ".", "path", ".", "basename", "(", "metadata", ")", "!=", "'metadata.xml'", ":", "tempxmlfile", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "\"metadata.xml\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "tempxmlfile", ")", "==", "True", ":", "os", ".", "remove", "(", "tempxmlfile", ")", "import", "shutil", "shutil", ".", "copy", "(", "metadata", ",", "tempxmlfile", ")", "metadata", "=", "tempxmlfile", "files", "[", "'metadata'", "]", "=", "dictItem", "[", "'metadata'", "]", "else", ":", "params", "[", "key", "]", "=", "dictItem", "[", "key", "]", "if", "data", "is", "not", "None", ":", "files", "[", "'file'", "]", "=", "data", "if", "metadata", "and", "os", ".", "path", ".", "isfile", "(", "metadata", ")", ":", "files", "[", "'metadata'", "]", "=", "metadata", "url", "=", "\"%s/update\"", "%", "self", ".", "root", "if", "multipart", ":", "itemID", "=", "self", ".", "id", "params", "[", "'multipart'", "]", "=", "True", "params", "[", "'fileName'", "]", "=", "os", ".", "path", ".", "basename", "(", "data", ")", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "itemPartJSON", "=", "self", ".", "addByPart", "(", "filePath", "=", "data", ")", "res", "=", "self", ".", "commit", "(", "wait", "=", "True", ",", "additionalParams", "=", "{", "'type'", ":", "self", ".", "type", "}", ")", "else", ":", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "files", "=", "files", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "force_form_post", "=", "True", ")", "self", ".", "__init", "(", ")", "return", "self" ]
Turn ON all features of the device .
def on ( self , dev_id ) : path = 'device/on' payload = { 'id' : dev_id } return self . rachio . put ( path , payload )
8,688
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L50-L57
[ "def", "_attempt_resumable_upload", "(", "self", ",", "key", ",", "fp", ",", "file_length", ",", "headers", ",", "cb", ",", "num_cb", ")", ":", "(", "server_start", ",", "server_end", ")", "=", "self", ".", "SERVER_HAS_NOTHING", "conn", "=", "key", ".", "bucket", ".", "connection", "if", "self", ".", "tracker_uri", ":", "# Try to resume existing resumable upload.", "try", ":", "(", "server_start", ",", "server_end", ")", "=", "(", "self", ".", "_query_server_pos", "(", "conn", ",", "file_length", ")", ")", "self", ".", "server_has_bytes", "=", "server_start", "key", "=", "key", "if", "conn", ".", "debug", ">=", "1", ":", "print", "'Resuming transfer.'", "except", "ResumableUploadException", ",", "e", ":", "if", "conn", ".", "debug", ">=", "1", ":", "print", "'Unable to resume transfer (%s).'", "%", "e", ".", "message", "self", ".", "_start_new_resumable_upload", "(", "key", ",", "headers", ")", "else", ":", "self", ".", "_start_new_resumable_upload", "(", "key", ",", "headers", ")", "# upload_start_point allows the code that instantiated the", "# ResumableUploadHandler to find out the point from which it started", "# uploading (e.g., so it can correctly compute throughput).", "if", "self", ".", "upload_start_point", "is", "None", ":", "self", ".", "upload_start_point", "=", "server_end", "if", "server_end", "==", "file_length", ":", "# Boundary condition: complete file was already uploaded (e.g.,", "# user interrupted a previous upload attempt after the upload", "# completed but before the gsutil tracker file was deleted). Set", "# total_bytes_uploaded to server_end so we'll attempt to upload", "# no more bytes but will still make final HTTP request and get", "# back the response (which contains the etag we need to compare", "# at the end).", "total_bytes_uploaded", "=", "server_end", "else", ":", "total_bytes_uploaded", "=", "server_end", "+", "1", "fp", ".", "seek", "(", "total_bytes_uploaded", ")", "conn", "=", "key", ".", "bucket", ".", "connection", "# Get a new HTTP connection (vs conn.get_http_connection(), which reuses", "# pool connections) because httplib requires a new HTTP connection per", "# transaction. (Without this, calling http_conn.getresponse() would get", "# \"ResponseNotReady\".)", "http_conn", "=", "conn", ".", "new_http_connection", "(", "self", ".", "tracker_uri_host", ",", "conn", ".", "is_secure", ")", "http_conn", ".", "set_debuglevel", "(", "conn", ".", "debug", ")", "# Make sure to close http_conn at end so if a local file read", "# failure occurs partway through server will terminate current upload", "# and can report that progress on next attempt.", "try", ":", "return", "self", ".", "_upload_file_bytes", "(", "conn", ",", "http_conn", ",", "fp", ",", "file_length", ",", "total_bytes_uploaded", ",", "cb", ",", "num_cb", ")", "except", "(", "ResumableUploadException", ",", "socket", ".", "error", ")", ":", "resp", "=", "self", ".", "_query_server_state", "(", "conn", ",", "file_length", ")", "if", "resp", ".", "status", "==", "400", ":", "raise", "ResumableUploadException", "(", "'Got 400 response from server '", "'state query after failed resumable upload attempt. This '", "'can happen for various reasons, including specifying an '", "'invalid request (e.g., an invalid canned ACL) or if the '", "'file size changed between upload attempts'", ",", "ResumableTransferDisposition", ".", "ABORT", ")", "else", ":", "raise", "finally", ":", "http_conn", ".", "close", "(", ")" ]
Turn OFF all features of the device .
def off ( self , dev_id ) : path = 'device/off' payload = { 'id' : dev_id } return self . rachio . put ( path , payload )
8,689
https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L59-L66
[ "def", "_attempt_resumable_upload", "(", "self", ",", "key", ",", "fp", ",", "file_length", ",", "headers", ",", "cb", ",", "num_cb", ")", ":", "(", "server_start", ",", "server_end", ")", "=", "self", ".", "SERVER_HAS_NOTHING", "conn", "=", "key", ".", "bucket", ".", "connection", "if", "self", ".", "tracker_uri", ":", "# Try to resume existing resumable upload.", "try", ":", "(", "server_start", ",", "server_end", ")", "=", "(", "self", ".", "_query_server_pos", "(", "conn", ",", "file_length", ")", ")", "self", ".", "server_has_bytes", "=", "server_start", "key", "=", "key", "if", "conn", ".", "debug", ">=", "1", ":", "print", "'Resuming transfer.'", "except", "ResumableUploadException", ",", "e", ":", "if", "conn", ".", "debug", ">=", "1", ":", "print", "'Unable to resume transfer (%s).'", "%", "e", ".", "message", "self", ".", "_start_new_resumable_upload", "(", "key", ",", "headers", ")", "else", ":", "self", ".", "_start_new_resumable_upload", "(", "key", ",", "headers", ")", "# upload_start_point allows the code that instantiated the", "# ResumableUploadHandler to find out the point from which it started", "# uploading (e.g., so it can correctly compute throughput).", "if", "self", ".", "upload_start_point", "is", "None", ":", "self", ".", "upload_start_point", "=", "server_end", "if", "server_end", "==", "file_length", ":", "# Boundary condition: complete file was already uploaded (e.g.,", "# user interrupted a previous upload attempt after the upload", "# completed but before the gsutil tracker file was deleted). Set", "# total_bytes_uploaded to server_end so we'll attempt to upload", "# no more bytes but will still make final HTTP request and get", "# back the response (which contains the etag we need to compare", "# at the end).", "total_bytes_uploaded", "=", "server_end", "else", ":", "total_bytes_uploaded", "=", "server_end", "+", "1", "fp", ".", "seek", "(", "total_bytes_uploaded", ")", "conn", "=", "key", ".", "bucket", ".", "connection", "# Get a new HTTP connection (vs conn.get_http_connection(), which reuses", "# pool connections) because httplib requires a new HTTP connection per", "# transaction. (Without this, calling http_conn.getresponse() would get", "# \"ResponseNotReady\".)", "http_conn", "=", "conn", ".", "new_http_connection", "(", "self", ".", "tracker_uri_host", ",", "conn", ".", "is_secure", ")", "http_conn", ".", "set_debuglevel", "(", "conn", ".", "debug", ")", "# Make sure to close http_conn at end so if a local file read", "# failure occurs partway through server will terminate current upload", "# and can report that progress on next attempt.", "try", ":", "return", "self", ".", "_upload_file_bytes", "(", "conn", ",", "http_conn", ",", "fp", ",", "file_length", ",", "total_bytes_uploaded", ",", "cb", ",", "num_cb", ")", "except", "(", "ResumableUploadException", ",", "socket", ".", "error", ")", ":", "resp", "=", "self", ".", "_query_server_state", "(", "conn", ",", "file_length", ")", "if", "resp", ".", "status", "==", "400", ":", "raise", "ResumableUploadException", "(", "'Got 400 response from server '", "'state query after failed resumable upload attempt. This '", "'can happen for various reasons, including specifying an '", "'invalid request (e.g., an invalid canned ACL) or if the '", "'file size changed between upload attempts'", ",", "ResumableTransferDisposition", ".", "ABORT", ")", "else", ":", "raise", "finally", ":", "http_conn", ".", "close", "(", ")" ]
Create a BIP0032 - style hierarchical wallet .
def create_wallet ( self , master_secret = b"" ) : master_secret = deserialize . bytes_str ( master_secret ) bip32node = control . create_wallet ( self . testnet , master_secret = master_secret ) return bip32node . hwif ( as_private = True )
8,690
https://github.com/F483/btctxstore/blob/5790ace3a3d4c9bcc759e7c931fc4a57d40b6c25/btctxstore/api.py#L33-L41
[ "def", "add_quantity_modifier", "(", "self", ",", "quantity", ",", "modifier", ",", "overwrite", "=", "False", ")", ":", "if", "quantity", "in", "self", ".", "_quantity_modifiers", "and", "not", "overwrite", ":", "raise", "ValueError", "(", "'quantity `{}` already exists'", ".", "format", "(", "quantity", ")", ")", "self", ".", "_quantity_modifiers", "[", "quantity", "]", "=", "modifier", "self", ".", "_check_quantities_exist", "(", "[", "quantity", "]", ",", "raise_exception", "=", "False", ")" ]
Create new private key and return in wif format .
def create_key ( self , master_secret = b"" ) : master_secret = deserialize . bytes_str ( master_secret ) bip32node = control . create_wallet ( self . testnet , master_secret = master_secret ) return bip32node . wif ( )
8,691
https://github.com/F483/btctxstore/blob/5790ace3a3d4c9bcc759e7c931fc4a57d40b6c25/btctxstore/api.py#L54-L62
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Returns number of confirms or None if unpublished .
def confirms ( self , txid ) : txid = deserialize . txid ( txid ) return self . service . confirms ( txid )
8,692
https://github.com/F483/btctxstore/blob/5790ace3a3d4c9bcc759e7c931fc4a57d40b6c25/btctxstore/api.py#L328-L331
[ "def", "CanonicalPathToLocalPath", "(", "path", ")", ":", "# Account for raw devices", "path", "=", "path", ".", "replace", "(", "\"/\\\\\"", ",", "\"\\\\\"", ")", "path", "=", "path", ".", "replace", "(", "\"/\"", ",", "\"\\\\\"", ")", "m", "=", "re", ".", "match", "(", "r\"\\\\([a-zA-Z]):(.*)$\"", ",", "path", ")", "if", "m", ":", "path", "=", "\"%s:\\\\%s\"", "%", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ".", "lstrip", "(", "\"\\\\\"", ")", ")", "return", "path" ]
Get the corresponding TimePeriod from the value .
def get_time_period ( value ) : for time_period in TimePeriod : if time_period . period == value : return time_period raise ValueError ( '{} is not a valid TimePeriod' . format ( value ) )
8,693
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L41-L49
[ "def", "validate_categories_equal_entities", "(", "categories", ",", "entities", ",", "message", "=", "None", ",", "exception", "=", "MatrixError", ")", ":", "nb_categories", "=", "len", "(", "categories", ")", "nb_entities", "=", "len", "(", "entities", ")", "if", "message", "is", "None", ":", "message", "=", "'Number of categories: %s != number of entities: %s'", "%", "(", "nb_categories", ",", "nb_entities", ")", "if", "categories", "and", "nb_categories", "!=", "nb_entities", ":", "raise", "exception", "(", "message", ")" ]
Update the monitor and monitor status from the ZM server .
def update_monitor ( self ) : result = self . _client . get_state ( self . _monitor_url ) self . _raw_result = result [ 'monitor' ]
8,694
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L86-L89
[ "async", "def", "issueClaims", "(", "self", ",", "allClaimRequest", ":", "Dict", "[", "ID", ",", "ClaimRequest", "]", ")", "->", "Dict", "[", "ID", ",", "Claims", "]", ":", "res", "=", "{", "}", "for", "schemaId", ",", "claimReq", "in", "allClaimRequest", ".", "items", "(", ")", ":", "res", "[", "schemaId", "]", "=", "await", "self", ".", "issueClaim", "(", "schemaId", ",", "claimReq", ")", "return", "res" ]
Set the MonitorState of this Monitor .
def function ( self , new_function ) : self . _client . change_state ( self . _monitor_url , { 'Monitor[Function]' : new_function . value } )
8,695
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L99-L103
[ "def", "_calculate_unpack_filter", "(", "cls", ",", "includes", "=", "None", ",", "excludes", "=", "None", ",", "spec", "=", "None", ")", ":", "include_patterns", "=", "cls", ".", "compile_patterns", "(", "includes", "or", "[", "]", ",", "field_name", "=", "'include_patterns'", ",", "spec", "=", "spec", ")", "logger", ".", "debug", "(", "'include_patterns: {}'", ".", "format", "(", "list", "(", "p", ".", "pattern", "for", "p", "in", "include_patterns", ")", ")", ")", "exclude_patterns", "=", "cls", ".", "compile_patterns", "(", "excludes", "or", "[", "]", ",", "field_name", "=", "'exclude_patterns'", ",", "spec", "=", "spec", ")", "logger", ".", "debug", "(", "'exclude_patterns: {}'", ".", "format", "(", "list", "(", "p", ".", "pattern", "for", "p", "in", "exclude_patterns", ")", ")", ")", "return", "lambda", "f", ":", "cls", ".", "_file_filter", "(", "f", ",", "include_patterns", ",", "exclude_patterns", ")" ]
Indicate if this Monitor is currently recording .
def is_recording ( self ) -> Optional [ bool ] : status_response = self . _client . get_state ( 'api/monitors/alarm/id:{}/command:status.json' . format ( self . _monitor_id ) ) if not status_response : _LOGGER . warning ( 'Could not get status for monitor {}' . format ( self . _monitor_id ) ) return None status = status_response . get ( 'status' ) # ZoneMinder API returns an empty string to indicate that this monitor # cannot record right now if status == '' : return False return int ( status ) == STATE_ALARM
8,696
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L121-L140
[ "def", "readImages", "(", "self", ",", "path", ",", "recursive", "=", "False", ",", "numPartitions", "=", "-", "1", ",", "dropImageFailures", "=", "False", ",", "sampleRatio", "=", "1.0", ",", "seed", "=", "0", ")", ":", "warnings", ".", "warn", "(", "\"`ImageSchema.readImage` is deprecated. \"", "+", "\"Use `spark.read.format(\\\"image\\\").load(path)` instead.\"", ",", "DeprecationWarning", ")", "spark", "=", "SparkSession", ".", "builder", ".", "getOrCreate", "(", ")", "image_schema", "=", "spark", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "ml", ".", "image", ".", "ImageSchema", "jsession", "=", "spark", ".", "_jsparkSession", "jresult", "=", "image_schema", ".", "readImages", "(", "path", ",", "jsession", ",", "recursive", ",", "numPartitions", ",", "dropImageFailures", ",", "float", "(", "sampleRatio", ")", ",", "seed", ")", "return", "DataFrame", "(", "jresult", ",", "spark", ".", "_wrapped", ")" ]
Indicate if this Monitor is currently available .
def is_available ( self ) -> bool : status_response = self . _client . get_state ( 'api/monitors/daemonStatus/id:{}/daemon:zmc.json' . format ( self . _monitor_id ) ) if not status_response : _LOGGER . warning ( 'Could not get availability for monitor {}' . format ( self . _monitor_id ) ) return False # Monitor_Status was only added in ZM 1.32.3 monitor_status = self . _raw_result . get ( 'Monitor_Status' , None ) capture_fps = monitor_status and monitor_status [ 'CaptureFPS' ] return status_response . get ( 'status' , False ) and capture_fps != "0.00"
8,697
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L143-L161
[ "def", "_calculate_unpack_filter", "(", "cls", ",", "includes", "=", "None", ",", "excludes", "=", "None", ",", "spec", "=", "None", ")", ":", "include_patterns", "=", "cls", ".", "compile_patterns", "(", "includes", "or", "[", "]", ",", "field_name", "=", "'include_patterns'", ",", "spec", "=", "spec", ")", "logger", ".", "debug", "(", "'include_patterns: {}'", ".", "format", "(", "list", "(", "p", ".", "pattern", "for", "p", "in", "include_patterns", ")", ")", ")", "exclude_patterns", "=", "cls", ".", "compile_patterns", "(", "excludes", "or", "[", "]", ",", "field_name", "=", "'exclude_patterns'", ",", "spec", "=", "spec", ")", "logger", ".", "debug", "(", "'exclude_patterns: {}'", ".", "format", "(", "list", "(", "p", ".", "pattern", "for", "p", "in", "exclude_patterns", ")", ")", ")", "return", "lambda", "f", ":", "cls", ".", "_file_filter", "(", "f", ",", "include_patterns", ",", "exclude_patterns", ")" ]
Get the number of events that have occurred on this Monitor .
def get_events ( self , time_period , include_archived = False ) -> Optional [ int ] : date_filter = '1%20{}' . format ( time_period . period ) if time_period == TimePeriod . ALL : # The consoleEvents API uses DATE_SUB, so give it # something large date_filter = '100%20year' archived_filter = '/Archived=:0' if include_archived : archived_filter = '' event = self . _client . get_state ( 'api/events/consoleEvents/{}{}.json' . format ( date_filter , archived_filter ) ) try : events_by_monitor = event [ 'results' ] if isinstance ( events_by_monitor , list ) : return 0 return events_by_monitor . get ( str ( self . _monitor_id ) , 0 ) except ( TypeError , KeyError , AttributeError ) : return None
8,698
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L163-L192
[ "def", "write", "(", "self", ",", "df", ",", "table_name", ",", "temp_dir", "=", "CACHE_DIR", ",", "overwrite", "=", "False", ",", "lnglat", "=", "None", ",", "encode_geom", "=", "False", ",", "geom_col", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# noqa", "tqdm", ".", "write", "(", "'Params: encode_geom, geom_col and everything in kwargs are deprecated and not being used any more'", ")", "dataset", "=", "Dataset", "(", "self", ",", "table_name", ",", "df", "=", "df", ")", "if_exists", "=", "Dataset", ".", "FAIL", "if", "overwrite", ":", "if_exists", "=", "Dataset", ".", "REPLACE", "dataset", "=", "dataset", ".", "upload", "(", "with_lonlat", "=", "lnglat", ",", "if_exists", "=", "if_exists", ")", "tqdm", ".", "write", "(", "'Table successfully written to CARTO: {table_url}'", ".", "format", "(", "table_url", "=", "utils", ".", "join_url", "(", "self", ".", "creds", ".", "base_url", "(", ")", ",", "'dataset'", ",", "dataset", ".", "table_name", ")", ")", ")", "return", "dataset" ]
Build and return a ZoneMinder camera image url .
def _build_image_url ( self , monitor , mode ) -> str : query = urlencode ( { 'mode' : mode , 'buffer' : monitor [ 'StreamReplayBuffer' ] , 'monitor' : monitor [ 'Id' ] , } ) url = '{zms_url}?{query}' . format ( zms_url = self . _client . get_zms_url ( ) , query = query ) _LOGGER . debug ( 'Monitor %s %s URL (without auth): %s' , monitor [ 'Id' ] , mode , url ) return self . _client . get_url_with_auth ( url )
8,699
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L194-L205
[ "def", "register_flags", "(", "self", ",", "flags", ")", ":", "self", ".", "_CONSUMED_FLAGS", ".", "update", "(", "flags", ")", "self", ".", "_flags", ".", "update", "(", "flags", ")" ]