query
stringlengths 5
1.23k
| positive
stringlengths 53
15.2k
| id_
int64 0
252k
| task_name
stringlengths 87
242
| negative
listlengths 20
553
|
|---|---|---|---|---|
Process the dataset header
|
def datasetHeaderChunk ( key , lines ) : KEYWORDS = ( 'DATASET' , 'OBJTYPE' , 'VECTYPE' , 'BEGSCL' , 'BEGVEC' , 'OBJID' , 'ND' , 'NC' , 'NAME' ) TYPE_KEYS = ( 'BEGSCL' , 'BEGVEC' ) result = { 'type' : None , 'numberData' : None , 'numberCells' : None , 'name' : None , 'objectID' : None , 'objectType' : None , 'vectorType' : None } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = pt . splitLine ( chunk [ 0 ] ) if key == 'ND' : result [ 'numberData' ] = int ( schunk [ 1 ] ) elif key == 'NC' : result [ 'numberCells' ] = int ( schunk [ 1 ] ) elif key == 'NAME' : result [ 'name' ] = schunk [ 1 ] elif key == 'OBJID' : result [ 'objectID' ] = int ( schunk [ 1 ] ) elif key == 'OBJTYPE' : result [ 'objectType' ] = schunk [ 1 ] elif key == 'VECTYPE' : result [ 'vectorType' ] = schunk [ 1 ] elif key in TYPE_KEYS : result [ 'type' ] = schunk [ 0 ] return result
| 4,700
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/wms_dataset_chunk.py#L14-L66
|
[
"def",
"_watch",
"(",
"self",
")",
":",
"async",
"def",
"_all_watcher",
"(",
")",
":",
"try",
":",
"allwatcher",
"=",
"client",
".",
"AllWatcherFacade",
".",
"from_connection",
"(",
"self",
".",
"connection",
"(",
")",
")",
"while",
"not",
"self",
".",
"_watch_stopping",
".",
"is_set",
"(",
")",
":",
"try",
":",
"results",
"=",
"await",
"utils",
".",
"run_with_interrupt",
"(",
"allwatcher",
".",
"Next",
"(",
")",
",",
"self",
".",
"_watch_stopping",
",",
"loop",
"=",
"self",
".",
"_connector",
".",
"loop",
")",
"except",
"JujuAPIError",
"as",
"e",
":",
"if",
"'watcher was stopped'",
"not",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"if",
"self",
".",
"_watch_stopping",
".",
"is_set",
"(",
")",
":",
"# this shouldn't ever actually happen, because",
"# the event should trigger before the controller",
"# has a chance to tell us the watcher is stopped",
"# but handle it gracefully, just in case",
"break",
"# controller stopped our watcher for some reason",
"# but we're not actually stopping, so just restart it",
"log",
".",
"warning",
"(",
"'Watcher: watcher stopped, restarting'",
")",
"del",
"allwatcher",
".",
"Id",
"continue",
"except",
"websockets",
".",
"ConnectionClosed",
":",
"monitor",
"=",
"self",
".",
"connection",
"(",
")",
".",
"monitor",
"if",
"monitor",
".",
"status",
"==",
"monitor",
".",
"ERROR",
":",
"# closed unexpectedly, try to reopen",
"log",
".",
"warning",
"(",
"'Watcher: connection closed, reopening'",
")",
"await",
"self",
".",
"connection",
"(",
")",
".",
"reconnect",
"(",
")",
"if",
"monitor",
".",
"status",
"!=",
"monitor",
".",
"CONNECTED",
":",
"# reconnect failed; abort and shutdown",
"log",
".",
"error",
"(",
"'Watcher: automatic reconnect '",
"'failed; stopping watcher'",
")",
"break",
"del",
"allwatcher",
".",
"Id",
"continue",
"else",
":",
"# closed on request, go ahead and shutdown",
"break",
"if",
"self",
".",
"_watch_stopping",
".",
"is_set",
"(",
")",
":",
"try",
":",
"await",
"allwatcher",
".",
"Stop",
"(",
")",
"except",
"websockets",
".",
"ConnectionClosed",
":",
"pass",
"# can't stop on a closed conn",
"break",
"for",
"delta",
"in",
"results",
".",
"deltas",
":",
"try",
":",
"delta",
"=",
"get_entity_delta",
"(",
"delta",
")",
"old_obj",
",",
"new_obj",
"=",
"self",
".",
"state",
".",
"apply_delta",
"(",
"delta",
")",
"await",
"self",
".",
"_notify_observers",
"(",
"delta",
",",
"old_obj",
",",
"new_obj",
")",
"except",
"KeyError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"\"unknown delta type: %s\"",
",",
"e",
".",
"args",
"[",
"0",
"]",
")",
"self",
".",
"_watch_received",
".",
"set",
"(",
")",
"except",
"CancelledError",
":",
"pass",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"'Error in watcher'",
")",
"raise",
"finally",
":",
"self",
".",
"_watch_stopped",
".",
"set",
"(",
")",
"log",
".",
"debug",
"(",
"'Starting watcher task'",
")",
"self",
".",
"_watch_received",
".",
"clear",
"(",
")",
"self",
".",
"_watch_stopping",
".",
"clear",
"(",
")",
"self",
".",
"_watch_stopped",
".",
"clear",
"(",
")",
"self",
".",
"_connector",
".",
"loop",
".",
"create_task",
"(",
"_all_watcher",
"(",
")",
")"
] |
Process the time step chunks for scalar datasets
|
def datasetScalarTimeStepChunk ( lines , numberColumns , numberCells ) : END_DATASET_TAG = 'ENDDS' # Define the result object result = { 'iStatus' : None , 'timestamp' : None , 'cellArray' : None , 'rasterText' : None } # Split the chunks timeStep = pt . splitLine ( lines . pop ( 0 ) ) # Extract cells, ignoring the status indicators startCellsIndex = numberCells # Handle case when status cells are not included (istat = 0) iStatus = int ( timeStep [ 1 ] ) if iStatus == 0 : startCellsIndex = 0 # Strip off ending dataset tag if END_DATASET_TAG in lines [ - 1 ] : lines . pop ( - 1 ) # Assemble the array string arrayString = '[[' columnCounter = 1 lenLines = len ( lines ) - 1 # Also assemble raster text field to preserve for spatial datasets rasterText = '' for index in range ( startCellsIndex , len ( lines ) ) : # Check columns condition if columnCounter % numberColumns != 0 and index != lenLines : arrayString += lines [ index ] . strip ( ) + ', ' elif columnCounter % numberColumns == 0 and index != lenLines : arrayString += lines [ index ] . strip ( ) + '], [' elif index == lenLines : arrayString += lines [ index ] . strip ( ) + ']]' # Advance counter columnCounter += 1 rasterText += lines [ index ] # Get Value Array result [ 'cellArray' ] = arrayString result [ 'rasterText' ] = rasterText # Assign Result result [ 'iStatus' ] = iStatus result [ 'timestamp' ] = float ( timeStep [ 2 ] ) return result
| 4,701
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/wms_dataset_chunk.py#L69-L127
|
[
"def",
"remove_filter",
"(",
"self",
",",
"server_id",
",",
"filter_path",
")",
":",
"# Validate server_id",
"server",
"=",
"self",
".",
"_get_server",
"(",
"server_id",
")",
"conn_id",
"=",
"server",
".",
"conn",
".",
"conn_id",
"if",
"server",
".",
"conn",
"is",
"not",
"None",
"else",
"None",
"# Verify referencing subscriptions.",
"ref_paths",
"=",
"server",
".",
"conn",
".",
"ReferenceNames",
"(",
"filter_path",
",",
"ResultClass",
"=",
"SUBSCRIPTION_CLASSNAME",
")",
"if",
"ref_paths",
":",
"# DSP1054 1.2 defines that this CIM error is raised by the server",
"# in that case, so we simulate that behavior on the client side.",
"raise",
"CIMError",
"(",
"CIM_ERR_FAILED",
",",
"\"The indication filter is referenced by subscriptions.\"",
",",
"conn_id",
"=",
"conn_id",
")",
"server",
".",
"conn",
".",
"DeleteInstance",
"(",
"filter_path",
")",
"inst_list",
"=",
"self",
".",
"_owned_filters",
"[",
"server_id",
"]",
"# We iterate backwards because we change the list",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"len",
"(",
"inst_list",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"inst",
"=",
"inst_list",
"[",
"i",
"]",
"if",
"inst",
".",
"path",
"==",
"filter_path",
":",
"del",
"inst_list",
"[",
"i",
"]"
] |
Write Dispatcher object in Python pickle format .
|
def save_dispatcher ( dsp , path ) : import dill with open ( path , 'wb' ) as f : dill . dump ( dsp , f )
| 4,702
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L16-L48
|
[
"def",
"jtag_configure",
"(",
"self",
",",
"instr_regs",
"=",
"0",
",",
"data_bits",
"=",
"0",
")",
":",
"if",
"not",
"util",
".",
"is_natural",
"(",
"instr_regs",
")",
":",
"raise",
"ValueError",
"(",
"'IR value is not a natural number.'",
")",
"if",
"not",
"util",
".",
"is_natural",
"(",
"data_bits",
")",
":",
"raise",
"ValueError",
"(",
"'Data bits is not a natural number.'",
")",
"self",
".",
"_dll",
".",
"JLINKARM_ConfigJTAG",
"(",
"instr_regs",
",",
"data_bits",
")",
"return",
"None"
] |
Write Dispatcher default values in Python pickle format .
|
def save_default_values ( dsp , path ) : import dill with open ( path , 'wb' ) as f : dill . dump ( dsp . default_values , f )
| 4,703
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L91-L123
|
[
"def",
"revoke",
"(",
"self",
")",
":",
"if",
"self",
".",
"_expired",
":",
"raise",
"Expired",
"(",
")",
"obj",
"=",
"{",
"# ID is the lease ID to revoke. When the ID is revoked, all",
"# associated keys will be deleted.",
"u'ID'",
":",
"self",
".",
"lease_id",
",",
"}",
"data",
"=",
"json",
".",
"dumps",
"(",
"obj",
")",
".",
"encode",
"(",
"'utf8'",
")",
"url",
"=",
"u'{}/v3alpha/kv/lease/revoke'",
".",
"format",
"(",
"self",
".",
"_client",
".",
"_url",
")",
".",
"encode",
"(",
")",
"response",
"=",
"yield",
"treq",
".",
"post",
"(",
"url",
",",
"data",
",",
"headers",
"=",
"self",
".",
"_client",
".",
"_REQ_HEADERS",
")",
"obj",
"=",
"yield",
"treq",
".",
"json_content",
"(",
"response",
")",
"header",
"=",
"Header",
".",
"_parse",
"(",
"obj",
"[",
"u'header'",
"]",
")",
"if",
"u'header'",
"in",
"obj",
"else",
"None",
"self",
".",
"_expired",
"=",
"True",
"returnValue",
"(",
"header",
")"
] |
Load Dispatcher default values in Python pickle format .
|
def load_default_values ( dsp , path ) : import dill # noinspection PyArgumentList with open ( path , 'rb' ) as f : dsp . __init__ ( dmap = dsp . dmap , default_values = dill . load ( f ) )
| 4,704
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L126-L164
|
[
"def",
"revoke",
"(",
"self",
")",
":",
"if",
"self",
".",
"_expired",
":",
"raise",
"Expired",
"(",
")",
"obj",
"=",
"{",
"# ID is the lease ID to revoke. When the ID is revoked, all",
"# associated keys will be deleted.",
"u'ID'",
":",
"self",
".",
"lease_id",
",",
"}",
"data",
"=",
"json",
".",
"dumps",
"(",
"obj",
")",
".",
"encode",
"(",
"'utf8'",
")",
"url",
"=",
"u'{}/v3alpha/kv/lease/revoke'",
".",
"format",
"(",
"self",
".",
"_client",
".",
"_url",
")",
".",
"encode",
"(",
")",
"response",
"=",
"yield",
"treq",
".",
"post",
"(",
"url",
",",
"data",
",",
"headers",
"=",
"self",
".",
"_client",
".",
"_REQ_HEADERS",
")",
"obj",
"=",
"yield",
"treq",
".",
"json_content",
"(",
"response",
")",
"header",
"=",
"Header",
".",
"_parse",
"(",
"obj",
"[",
"u'header'",
"]",
")",
"if",
"u'header'",
"in",
"obj",
"else",
"None",
"self",
".",
"_expired",
"=",
"True",
"returnValue",
"(",
"header",
")"
] |
Write Dispatcher graph object in Python pickle format .
|
def save_map ( dsp , path ) : import dill with open ( path , 'wb' ) as f : dill . dump ( dsp . dmap , f )
| 4,705
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L167-L197
|
[
"def",
"get_lateration_parameters",
"(",
"all_points",
",",
"indices",
",",
"index",
",",
"edm",
",",
"W",
"=",
"None",
")",
":",
"if",
"W",
"is",
"None",
":",
"W",
"=",
"np",
".",
"ones",
"(",
"edm",
".",
"shape",
")",
"# delete points that are not considered anchors",
"anchors",
"=",
"np",
".",
"delete",
"(",
"all_points",
",",
"indices",
",",
"axis",
"=",
"0",
")",
"r2",
"=",
"np",
".",
"delete",
"(",
"edm",
"[",
"index",
",",
":",
"]",
",",
"indices",
")",
"w",
"=",
"np",
".",
"delete",
"(",
"W",
"[",
"index",
",",
":",
"]",
",",
"indices",
")",
"# set w to zero where measurements are invalid",
"if",
"np",
".",
"isnan",
"(",
"r2",
")",
".",
"any",
"(",
")",
":",
"nan_measurements",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"r2",
")",
")",
"[",
"0",
"]",
"r2",
"[",
"nan_measurements",
"]",
"=",
"0.0",
"w",
"[",
"nan_measurements",
"]",
"=",
"0.0",
"if",
"np",
".",
"isnan",
"(",
"w",
")",
".",
"any",
"(",
")",
":",
"nan_measurements",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"w",
")",
")",
"[",
"0",
"]",
"r2",
"[",
"nan_measurements",
"]",
"=",
"0.0",
"w",
"[",
"nan_measurements",
"]",
"=",
"0.0",
"# delete anchors where weight is zero to avoid ill-conditioning",
"missing_anchors",
"=",
"np",
".",
"where",
"(",
"w",
"==",
"0.0",
")",
"[",
"0",
"]",
"w",
"=",
"np",
".",
"asarray",
"(",
"np",
".",
"delete",
"(",
"w",
",",
"missing_anchors",
")",
")",
"r2",
"=",
"np",
".",
"asarray",
"(",
"np",
".",
"delete",
"(",
"r2",
",",
"missing_anchors",
")",
")",
"w",
".",
"resize",
"(",
"edm",
".",
"shape",
"[",
"0",
"]",
"-",
"len",
"(",
"indices",
")",
"-",
"len",
"(",
"missing_anchors",
")",
",",
"1",
")",
"r2",
".",
"resize",
"(",
"edm",
".",
"shape",
"[",
"0",
"]",
"-",
"len",
"(",
"indices",
")",
"-",
"len",
"(",
"missing_anchors",
")",
",",
"1",
")",
"anchors",
"=",
"np",
".",
"delete",
"(",
"anchors",
",",
"missing_anchors",
",",
"axis",
"=",
"0",
")",
"assert",
"w",
".",
"shape",
"[",
"0",
"]",
"==",
"anchors",
".",
"shape",
"[",
"0",
"]",
"assert",
"np",
".",
"isnan",
"(",
"w",
")",
".",
"any",
"(",
")",
"==",
"False",
"assert",
"np",
".",
"isnan",
"(",
"r2",
")",
".",
"any",
"(",
")",
"==",
"False",
"return",
"anchors",
",",
"w",
",",
"r2"
] |
Divide a file into chunks between key words in the list
|
def chunk ( keywords , lines ) : chunks = dict ( ) chunk = [ ] # Create an empty dictionary using all the keywords for keyword in keywords : chunks [ keyword ] = [ ] # Populate dictionary with lists of chunks associated # with the keywords in the list for line in lines : if line . strip ( ) : token = line . split ( ) [ 0 ] if token in keywords : chunk = [ line ] chunks [ token ] . append ( chunk ) else : chunk . append ( line ) return chunks
| 4,706
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/parsetools.py#L46-L69
|
[
"def",
"GenerateGaussianNoise",
"(",
"PSD",
")",
":",
"Noise",
"=",
"np",
".",
"zeros",
"(",
"(",
"N_fd",
")",
",",
"complex",
")",
"# Generate noise from PSD ",
"Real",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"N_fd",
")",
"*",
"np",
".",
"sqrt",
"(",
"PSD",
"/",
"(",
"4.",
"*",
"dF",
")",
")",
"Imag",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"N_fd",
")",
"*",
"np",
".",
"sqrt",
"(",
"PSD",
"/",
"(",
"4.",
"*",
"dF",
")",
")",
"Noise",
"=",
"Real",
"+",
"1j",
"*",
"Imag",
"return",
"Noise"
] |
Apply global pre - processing to values during reading throughout the project .
|
def valueReadPreprocessor ( valueString , replaceParamsFile = None ) : if type ( valueString ) is bool : log . warning ( "Only numerical variable types can be handled by the valueReadPreprocessor function." ) return valueString # Default processedValue = valueString # Check for replacement variables if replaceParamsFile is not None and valueString is not None : if '[' in valueString or ']' in valueString : # Set default value processedValue = '{0}' . format ( REPLACE_NO_VALUE ) # Find the matching parameter and return the negative of the id for targetParam in replaceParamsFile . targetParameters : if targetParam . targetVariable == valueString : processedValue = '{0}' . format ( - 1 * targetParam . id ) break return processedValue
| 4,707
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/parsetools.py#L72-L103
|
[
"def",
"run",
"(",
"wrapped",
")",
":",
"@",
"wraps",
"(",
"wrapped",
")",
"def",
"_run",
"(",
"self",
",",
"query",
",",
"bindings",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_reconnect_if_missing_connection",
"(",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"result",
"=",
"wrapped",
"(",
"self",
",",
"query",
",",
"bindings",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"result",
"=",
"self",
".",
"_try_again_if_caused_by_lost_connection",
"(",
"e",
",",
"query",
",",
"bindings",
",",
"wrapped",
")",
"t",
"=",
"self",
".",
"_get_elapsed_time",
"(",
"start",
")",
"self",
".",
"log_query",
"(",
"query",
",",
"bindings",
",",
"t",
")",
"return",
"result",
"return",
"_run"
] |
Look up variable name in replace param file for the negative id given and return it .
|
def valueWritePreprocessor ( valueString , replaceParamsFile = None ) : if type ( valueString ) is bool : log . warning ( "Only numerical variable types can be handled by the valueReadPreprocessor function." ) return valueString # Default variableString = valueString # Check for replacement variables if replaceParamsFile is not None : # Set Default if variableString == REPLACE_NO_VALUE : variableString = '[NO_VARIABLE]' else : try : number = int ( valueString ) if number < 0 : parameterID = number * - 1 # Find the matching parameter for targetParam in replaceParamsFile . targetParameters : if targetParam . id == parameterID : variableString = targetParam . targetVariable break except : pass return variableString
| 4,708
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/parsetools.py#L106-L144
|
[
"def",
"not_storable",
"(",
"_type",
")",
":",
"return",
"Storable",
"(",
"_type",
",",
"handlers",
"=",
"StorableHandler",
"(",
"poke",
"=",
"fake_poke",
",",
"peek",
"=",
"fail_peek",
"(",
"_type",
")",
")",
")"
] |
Run all FeatureExtractors and output results to CSV .
|
def run ( self , dataset_path ) : features = self . _generate_features ( self . _feature_extractors ) features . to_csv ( dataset_path )
| 4,709
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/collection.py#L28-L31
|
[
"def",
"cluster_types",
"(",
"types",
",",
"max_clust",
"=",
"12",
")",
":",
"if",
"len",
"(",
"types",
")",
"<",
"max_clust",
":",
"max_clust",
"=",
"len",
"(",
"types",
")",
"# Do actual clustering",
"cluster_dict",
"=",
"do_clustering",
"(",
"types",
",",
"max_clust",
")",
"cluster_ranks",
"=",
"rank_clusters",
"(",
"cluster_dict",
")",
"# Create a dictionary mapping binary numbers to indices",
"ranks",
"=",
"{",
"}",
"for",
"key",
"in",
"cluster_dict",
":",
"for",
"typ",
"in",
"cluster_dict",
"[",
"key",
"]",
":",
"ranks",
"[",
"typ",
"]",
"=",
"cluster_ranks",
"[",
"key",
"]",
"return",
"ranks"
] |
Run all FeatureExtractors and record results in a key - value format .
|
def _generate_features ( self , feature_extractors ) : results = [ pd . DataFrame ( ) ] n_ext = len ( feature_extractors ) for i , extractor in enumerate ( feature_extractors ) : log . info ( "generating: '%s' (%d/%d)" , extractor . name , i + 1 , n_ext ) cached_extractor = self . _cache [ extractor . name ] if extractor . same ( cached_extractor ) : log . info ( 'pulling from cache' ) extractor = cached_extractor else : log . info ( 'running...' ) extractor . extract ( ) results . append ( extractor . result ) if self . cache_path : self . _cache [ extractor . name ] = extractor if self . cache_path : with open ( self . cache_path , 'wb' ) as f : pickle . dump ( self . _cache , f ) return pd . concat ( results , axis = 1 )
| 4,710
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/collection.py#L33-L58
|
[
"def",
"start",
"(",
"self",
")",
":",
"def",
"_heartbeat",
"(",
")",
":",
"if",
"not",
"self",
".",
"_client",
".",
"lifecycle",
".",
"is_live",
":",
"return",
"self",
".",
"_heartbeat",
"(",
")",
"self",
".",
"_heartbeat_timer",
"=",
"self",
".",
"_client",
".",
"reactor",
".",
"add_timer",
"(",
"self",
".",
"_heartbeat_interval",
",",
"_heartbeat",
")",
"self",
".",
"_heartbeat_timer",
"=",
"self",
".",
"_client",
".",
"reactor",
".",
"add_timer",
"(",
"self",
".",
"_heartbeat_interval",
",",
"_heartbeat",
")"
] |
Generic read file into database method .
|
def read ( self , directory , filename , session , spatial = False , spatialReferenceID = 4236 , replaceParamFile = None , * * kwargs ) : # Read parameter derivatives path = os . path . join ( directory , filename ) filename_split = filename . split ( '.' ) name = filename_split [ 0 ] # Default file extension extension = '' if len ( filename_split ) >= 2 : extension = filename_split [ - 1 ] if os . path . isfile ( path ) : # Add self to session session . add ( self ) # Read self . _read ( directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile , * * kwargs ) # Commit to database self . _commit ( session , self . COMMIT_ERROR_MESSAGE ) else : # Rollback the session if the file doesn't exist session . rollback ( ) # Print warning log . warning ( 'Could not find file named {0}. File not read.' . format ( filename ) )
| 4,711
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L37-L80
|
[
"def",
"hellman",
"(",
"wind_speed",
",",
"wind_speed_height",
",",
"hub_height",
",",
"roughness_length",
"=",
"None",
",",
"hellman_exponent",
"=",
"None",
")",
":",
"if",
"hellman_exponent",
"is",
"None",
":",
"if",
"roughness_length",
"is",
"not",
"None",
":",
"# Return np.array if wind_speed is np.array",
"if",
"(",
"isinstance",
"(",
"wind_speed",
",",
"np",
".",
"ndarray",
")",
"and",
"isinstance",
"(",
"roughness_length",
",",
"pd",
".",
"Series",
")",
")",
":",
"roughness_length",
"=",
"np",
".",
"array",
"(",
"roughness_length",
")",
"hellman_exponent",
"=",
"1",
"/",
"np",
".",
"log",
"(",
"hub_height",
"/",
"roughness_length",
")",
"else",
":",
"hellman_exponent",
"=",
"1",
"/",
"7",
"return",
"wind_speed",
"*",
"(",
"hub_height",
"/",
"wind_speed_height",
")",
"**",
"hellman_exponent"
] |
Write from database back to file .
|
def write ( self , session , directory , name , replaceParamFile = None , * * kwargs ) : # Assemble Path to file name_split = name . split ( '.' ) name = name_split [ 0 ] # Default extension extension = '' if len ( name_split ) >= 2 : extension = name_split [ - 1 ] # Run name preprocessor method if present try : name = self . _namePreprocessor ( name ) except : 'DO NOTHING' if extension == '' : filename = '{0}.{1}' . format ( name , self . fileExtension ) else : filename = '{0}.{1}' . format ( name , extension ) filePath = os . path . join ( directory , filename ) with io_open ( filePath , 'w' ) as openFile : # Write Lines self . _write ( session = session , openFile = openFile , replaceParamFile = replaceParamFile , * * kwargs )
| 4,712
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L82-L122
|
[
"def",
"makeAggShkDstn",
"(",
"self",
")",
":",
"TranShkAggDstn",
"=",
"[",
"]",
"PermShkAggDstn",
"=",
"[",
"]",
"AggShkDstn",
"=",
"[",
"]",
"StateCount",
"=",
"self",
".",
"MrkvArray",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"StateCount",
")",
":",
"TranShkAggDstn",
".",
"append",
"(",
"approxMeanOneLognormal",
"(",
"sigma",
"=",
"self",
".",
"TranShkAggStd",
"[",
"i",
"]",
",",
"N",
"=",
"self",
".",
"TranShkAggCount",
")",
")",
"PermShkAggDstn",
".",
"append",
"(",
"approxMeanOneLognormal",
"(",
"sigma",
"=",
"self",
".",
"PermShkAggStd",
"[",
"i",
"]",
",",
"N",
"=",
"self",
".",
"PermShkAggCount",
")",
")",
"AggShkDstn",
".",
"append",
"(",
"combineIndepDstns",
"(",
"PermShkAggDstn",
"[",
"-",
"1",
"]",
",",
"TranShkAggDstn",
"[",
"-",
"1",
"]",
")",
")",
"self",
".",
"TranShkAggDstn",
"=",
"TranShkAggDstn",
"self",
".",
"PermShkAggDstn",
"=",
"PermShkAggDstn",
"self",
".",
"AggShkDstn",
"=",
"AggShkDstn"
] |
Custom commit function for file objects
|
def _commit ( self , session , errorMessage ) : try : session . commit ( ) except IntegrityError : # Raise special error if the commit fails due to empty files log . error ( 'Commit to database failed. %s' % errorMessage ) except : # Raise other errors as normal raise
| 4,713
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L124-L135
|
[
"def",
"group_experiments_greedy",
"(",
"tomo_expt",
":",
"TomographyExperiment",
")",
":",
"diag_sets",
"=",
"_max_tpb_overlap",
"(",
"tomo_expt",
")",
"grouped_expt_settings_list",
"=",
"list",
"(",
"diag_sets",
".",
"values",
"(",
")",
")",
"grouped_tomo_expt",
"=",
"TomographyExperiment",
"(",
"grouped_expt_settings_list",
",",
"program",
"=",
"tomo_expt",
".",
"program",
")",
"return",
"grouped_tomo_expt"
] |
Create the versioneer . py file .
|
def run ( self ) : print ( " creating %s" % versionfile_source ) with open ( versionfile_source , "w" ) as f : f . write ( get_vcs_code ( ) ) ipy = os . path . join ( os . path . dirname ( versionfile_source ) , "__init__.py" ) try : with open ( ipy , "r" ) as f : old = f . read ( ) except EnvironmentError : old = "" if INIT_PY_SNIPPET not in old : print ( " appending to %s" % ipy ) with open ( ipy , "a" ) as f : f . write ( INIT_PY_SNIPPET ) else : print ( " %s unmodified" % ipy ) # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os . path . join ( get_root ( ) , "MANIFEST.in" ) simple_includes = set ( ) try : with open ( manifest_in , "r" ) as f : for line in f : if line . startswith ( "include " ) : for include in line . split ( ) [ 1 : ] : simple_includes . add ( include ) except EnvironmentError : pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes : print ( " appending 'versioneer.py' to MANIFEST.in" ) with open ( manifest_in , "a" ) as f : f . write ( "include versioneer.py\n" ) else : print ( " 'versioneer.py' already in MANIFEST.in" ) if versionfile_source not in simple_includes : print ( " appending versionfile_source ('%s') to MANIFEST.in" % versionfile_source ) with open ( manifest_in , "a" ) as f : f . write ( "include %s\n" % versionfile_source ) else : print ( " versionfile_source already in MANIFEST.in" ) # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-time keyword # substitution. do_vcs_install_f = getattr ( sys . modules [ __name__ ] , VCS + '_do_vcs_install' ) do_vcs_install_f ( manifest_in , versionfile_source , ipy )
| 4,714
|
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/versioneer.py#L1378-L1435
|
[
"def",
"get_stores_secrets_volumes",
"(",
"cls",
",",
"stores_secrets",
")",
":",
"volumes",
"=",
"[",
"]",
"volume_mounts",
"=",
"[",
"]",
"for",
"store_secret",
"in",
"stores_secrets",
":",
"store",
"=",
"store_secret",
"[",
"'store'",
"]",
"if",
"store",
"in",
"{",
"GCS",
",",
"S3",
"}",
":",
"secrets_volumes",
",",
"secrets_volume_mounts",
"=",
"get_volume_from_secret",
"(",
"volume_name",
"=",
"cls",
".",
"STORE_SECRET_VOLUME_NAME",
".",
"format",
"(",
"store",
")",
",",
"mount_path",
"=",
"cls",
".",
"STORE_SECRET_KEY_MOUNT_PATH",
".",
"format",
"(",
"store",
")",
",",
"secret_name",
"=",
"store_secret",
"[",
"'persistence_secret'",
"]",
",",
")",
"volumes",
"+=",
"secrets_volumes",
"volume_mounts",
"+=",
"secrets_volume_mounts",
"return",
"volumes",
",",
"volume_mounts"
] |
Parse LINK Chunk Method
|
def linkChunk ( key , chunk ) : # Extract link type card linkType = chunk [ 1 ] . strip ( ) . split ( ) [ 0 ] # Cases if linkType == 'DX' : # Cross section link type handler result = xSectionLink ( chunk ) elif linkType == 'STRUCTURE' : # Structure link type handler result = structureLink ( chunk ) elif linkType in ( 'RESERVOIR' , 'LAKE' ) : # Reservoir link type handler result = reservoirLink ( chunk ) return result
| 4,715
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L45-L64
|
[
"def",
"rate_limit",
"(",
"f",
")",
":",
"def",
"new_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"errors",
"=",
"0",
"while",
"True",
":",
"resp",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"errors",
"=",
"0",
"return",
"resp",
"elif",
"resp",
".",
"status_code",
"==",
"401",
":",
"# Hack to retain the original exception, but augment it with",
"# additional context for the user to interpret it. In a Python",
"# 3 only future we can raise a new exception of the same type",
"# with a new message from the old error.",
"try",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"message",
"=",
"\"\\nThis is a protected or locked account, or\"",
"+",
"\" the credentials provided are no longer valid.\"",
"e",
".",
"args",
"=",
"(",
"e",
".",
"args",
"[",
"0",
"]",
"+",
"message",
",",
")",
"+",
"e",
".",
"args",
"[",
"1",
":",
"]",
"log",
".",
"warning",
"(",
"\"401 Authentication required for %s\"",
",",
"resp",
".",
"url",
")",
"raise",
"elif",
"resp",
".",
"status_code",
"==",
"429",
":",
"reset",
"=",
"int",
"(",
"resp",
".",
"headers",
"[",
"'x-rate-limit-reset'",
"]",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"seconds",
"=",
"reset",
"-",
"now",
"+",
"10",
"if",
"seconds",
"<",
"1",
":",
"seconds",
"=",
"10",
"log",
".",
"warning",
"(",
"\"rate limit exceeded: sleeping %s secs\"",
",",
"seconds",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"elif",
"resp",
".",
"status_code",
">=",
"500",
":",
"errors",
"+=",
"1",
"if",
"errors",
">",
"30",
":",
"log",
".",
"warning",
"(",
"\"too many errors from Twitter, giving up\"",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"seconds",
"=",
"60",
"*",
"errors",
"log",
".",
"warning",
"(",
"\"%s from Twitter API, sleeping %s\"",
",",
"resp",
".",
"status_code",
",",
"seconds",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"else",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"new_f"
] |
Parse STRUCTURE LINK Method
|
def structureLink ( lines ) : # Constants KEYWORDS = ( 'LINK' , 'STRUCTURE' , 'NUMSTRUCTS' , 'STRUCTTYPE' ) WEIR_KEYWORDS = ( 'STRUCTTYPE' , 'CREST_LENGTH' , 'CREST_LOW_ELEV' , 'DISCHARGE_COEFF_FORWARD' , 'DISCHARGE_COEFF_REVERSE' , 'CREST_LOW_LOC' , 'STEEP_SLOPE' , 'SHALLOW_SLOPE' ) CULVERT_KEYWORDS = ( 'STRUCTTYPE' , 'UPINVERT' , 'DOWNINVERT' , 'INLET_DISCH_COEFF' , 'REV_FLOW_DISCH_COEFF' , 'SLOPE' , 'LENGTH' , 'ROUGH_COEFF' , 'DIAMETER' , 'WIDTH' , 'HEIGHT' ) WEIRS = ( 'WEIR' , 'SAG_WEIR' ) CULVERTS = ( 'ROUND_CULVERT' , 'RECT_CULVERT' ) CURVES = ( 'RATING_CURVE' , 'SCHEDULED_RELEASE' , 'RULE_CURVE' ) result = { 'type' : 'STRUCTURE' , 'header' : { 'link' : None , 'numstructs' : None } , 'structures' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Cases if key == 'STRUCTTYPE' : # Structure handler structType = chunk [ 0 ] . strip ( ) . split ( ) [ 1 ] # Cases if structType in WEIRS : weirResult = { 'structtype' : None , 'crest_length' : None , 'crest_low_elev' : None , 'discharge_coeff_forward' : None , 'discharge_coeff_reverse' : None , 'crest_low_loc' : None , 'steep_slope' : None , 'shallow_slope' : None } # Weir type structures handler result [ 'structures' ] . append ( structureChunk ( WEIR_KEYWORDS , weirResult , chunk ) ) elif structType in CULVERTS : culvertResult = { 'structtype' : None , 'upinvert' : None , 'downinvert' : None , 'inlet_disch_coeff' : None , 'rev_flow_disch_coeff' : None , 'slope' : None , 'length' : None , 'rough_coeff' : None , 'diameter' : None , 'width' : None , 'height' : None } # Culvert type structures handler result [ 'structures' ] . append ( structureChunk ( CULVERT_KEYWORDS , culvertResult , chunk ) ) elif structType in CURVES : # Curve type handler pass elif key != 'STRUCTURE' : # All other variables header result [ 'header' ] [ key . lower ( ) ] = chunk [ 0 ] . strip ( ) . split ( ) [ 1 ] return result
| 4,716
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L66-L158
|
[
"def",
"gen",
"(",
"id_",
"=",
"None",
",",
"keysize",
"=",
"2048",
")",
":",
"if",
"id_",
"is",
"None",
":",
"id_",
"=",
"hashlib",
".",
"sha512",
"(",
"os",
".",
"urandom",
"(",
"32",
")",
")",
".",
"hexdigest",
"(",
")",
"else",
":",
"id_",
"=",
"clean",
".",
"filename",
"(",
"id_",
")",
"ret",
"=",
"{",
"'priv'",
":",
"''",
",",
"'pub'",
":",
"''",
"}",
"priv",
"=",
"salt",
".",
"crypt",
".",
"gen_keys",
"(",
"__opts__",
"[",
"'pki_dir'",
"]",
",",
"id_",
",",
"keysize",
")",
"pub",
"=",
"'{0}.pub'",
".",
"format",
"(",
"priv",
"[",
":",
"priv",
".",
"rindex",
"(",
"'.'",
")",
"]",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"priv",
")",
"as",
"fp_",
":",
"ret",
"[",
"'priv'",
"]",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fp_",
".",
"read",
"(",
")",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"pub",
")",
"as",
"fp_",
":",
"ret",
"[",
"'pub'",
"]",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fp_",
".",
"read",
"(",
")",
")",
"# The priv key is given the Read-Only attribute. The causes `os.remove` to",
"# fail in Windows.",
"if",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"os",
".",
"chmod",
"(",
"priv",
",",
"128",
")",
"os",
".",
"remove",
"(",
"priv",
")",
"os",
".",
"remove",
"(",
"pub",
")",
"return",
"ret"
] |
Parse Cross Section Links Method
|
def xSectionLink ( lines ) : # Constants KEYWORDS = ( 'LINK' , 'DX' , 'TRAPEZOID' , 'TRAPEZOID_ERODE' , 'TRAPEZOID_SUBSURFACE' , 'ERODE_TRAPEZOID' , 'ERODE_SUBSURFACE' , 'SUBSURFACE_TRAPEZOID' , 'SUBSURFACE_ERODE' , 'TRAPEZOID_ERODE_SUBSURFACE' , 'TRAPEZOID_SUBSURFACE_ERODE' , 'ERODE_TRAPEZOID_SUBSURFACE' , 'ERODE_SUBSURFACE_TRAPEZOID' , 'SUBSURFACE_TRAPEZOID_ERODE' , 'SUBSURFACE_ERODE_TRAPEZOID' , 'BREAKPOINT' , 'BREAKPOINT_ERODE' , 'BREAKPOINT_SUBSURFACE' , 'ERODE_BREAKPOINT' , 'ERODE_SUBSURFACE' , 'SUBSURFACE_BREAKPOINT' , 'SUBSURFACE_ERODE' , 'BREAKPOINT_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE_ERODE' , 'ERODE_BREAKPOINT_SUBSURFACE' , 'ERODE_SUBSURFACE_BREAKPOINT' , 'SUBSURFACE_BREAKPOINT_ERODE' , 'SUBSURFACE_ERODE_BREAKPOINT' , 'TRAP' , 'TRAP_ERODE' , 'TRAP_SUBSURFACE' , 'ERODE_TRAP' , 'ERODE_SUBSURFACE' , 'SUBSURFACE_TRAP' , 'SUBSURFACE_ERODE' , 'TRAP_ERODE_SUBSURFACE' , 'TRAP_SUBSURFACE_ERODE' , 'ERODE_TRAP_SUBSURFACE' , 'ERODE_SUBSURFACE_TRAP' , 'SUBSURFACE_TRAP_ERODE' , 'SUBSURFACE_ERODE_TRAP' , 'NODES' , 'NODE' , 'XSEC' ) ERODE = ( 'TRAPEZOID_ERODE' , 'TRAP_ERODE' , 'TRAP_SUBSURFACE_ERODE' , 'TRAP_ERODE_SUBSURFACE' , 'BREAKPOINT_ERODE' , 'TRAPEZOID_SUBSURFACE_ERODE' , 'TRAPEZOID_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE_ERODE' , 'BREAKPOINT_ERODE_SUBSURFACE' ) SUBSURFACE = ( 'TRAPEZOID_SUBSURFACE' , 'TRAP_SUBSURFACE' , 'TRAP_SUBSURFACE_ERODE' , 'TRAP_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE' , 'TRAPEZOID_SUBSURFACE_ERODE' , 'TRAPEZOID_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE_ERODE' , 'BREAKPOINT_ERODE_SUBSURFACE' ) result = { 'type' : 'XSEC' , 'header' : { 'link' : None , 'dx' : None , 'xSecType' : None , 'nodes' : None , 'erode' : False , 'subsurface' : False } , 'xSection' : None , 'nodes' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Cases if key == 'NODE' : # Extract node x and y result [ 'nodes' ] . append ( nodeChunk ( chunk ) ) elif key == 'XSEC' : # Extract cross section information result [ 'xSection' ] = xSectionChunk ( chunk ) elif ( 'TRAPEZOID' in key ) or ( 'BREAKPOINT' in key ) or ( 'TRAP' in key ) : # Cross section type handler result [ 'header' ] [ 'xSecType' ] = key elif key in ERODE : # Erode handler result [ 'header' ] [ 'erode' ] = True elif key in SUBSURFACE : # Subsurface handler result [ 'header' ] [ 'subsurface' ] = True else : # Extract all other variables into header result [ 'header' ] [ key . lower ( ) ] = chunk [ 0 ] . strip ( ) . split ( ) [ 1 ] return result
| 4,717
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L160-L273
|
[
"def",
"get_changed_devices",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"payload",
"=",
"{",
"}",
"else",
":",
"payload",
"=",
"{",
"'timeout'",
":",
"SUBSCRIPTION_WAIT",
",",
"'minimumdelay'",
":",
"SUBSCRIPTION_MIN_WAIT",
"}",
"payload",
".",
"update",
"(",
"timestamp",
")",
"# double the timeout here so requests doesn't timeout before vera",
"payload",
".",
"update",
"(",
"{",
"'id'",
":",
"'lu_sdata'",
",",
"}",
")",
"logger",
".",
"debug",
"(",
"\"get_changed_devices() requesting payload %s\"",
",",
"str",
"(",
"payload",
")",
")",
"r",
"=",
"self",
".",
"data_request",
"(",
"payload",
",",
"TIMEOUT",
"*",
"2",
")",
"r",
".",
"raise_for_status",
"(",
")",
"# If the Vera disconnects before writing a full response (as lu_sdata",
"# will do when interrupted by a Luup reload), the requests module will",
"# happily return 200 with an empty string. So, test for empty response,",
"# so we don't rely on the JSON parser to throw an exception.",
"if",
"r",
".",
"text",
"==",
"\"\"",
":",
"raise",
"PyveraError",
"(",
"\"Empty response from Vera\"",
")",
"# Catch a wide swath of what the JSON parser might throw, within",
"# reason. Unfortunately, some parsers don't specifically return",
"# json.decode.JSONDecodeError, but so far most seem to derive what",
"# they do throw from ValueError, so that's helpful.",
"try",
":",
"result",
"=",
"r",
".",
"json",
"(",
")",
"except",
"ValueError",
"as",
"ex",
":",
"raise",
"PyveraError",
"(",
"\"JSON decode error: \"",
"+",
"str",
"(",
"ex",
")",
")",
"if",
"not",
"(",
"type",
"(",
"result",
")",
"is",
"dict",
"and",
"'loadtime'",
"in",
"result",
"and",
"'dataversion'",
"in",
"result",
")",
":",
"raise",
"PyveraError",
"(",
"\"Unexpected/garbled response from Vera\"",
")",
"# At this point, all good. Update timestamp and return change data.",
"device_data",
"=",
"result",
".",
"get",
"(",
"'devices'",
")",
"timestamp",
"=",
"{",
"'loadtime'",
":",
"result",
".",
"get",
"(",
"'loadtime'",
")",
",",
"'dataversion'",
":",
"result",
".",
"get",
"(",
"'dataversion'",
")",
"}",
"return",
"[",
"device_data",
",",
"timestamp",
"]"
] |
Parse RESERVOIR Link Method
|
def reservoirLink ( lines ) : # Constants KEYWORDS = ( 'LINK' , 'RESERVOIR' , 'RES_MINWSE' , 'RES_INITWSE' , 'RES_MAXWSE' , 'RES_NUMPTS' , 'LAKE' , 'MINWSE' , 'INITWSE' , 'MAXWSE' , 'NUMPTS' ) result = { 'header' : { 'link' : None , 'res_minwse' : None , 'res_initwse' : None , 'res_maxwse' : None , 'res_numpts' : None , 'minwse' : None , 'initwse' : None , 'maxwse' : None , 'numpts' : None } , 'type' : None , 'points' : [ ] } pair = { 'i' : None , 'j' : None } # Rechunk the chunk chunks = pt . chunk ( KEYWORDS , lines ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) # Cases if key in ( 'NUMPTS' , 'RES_NUMPTS' ) : # Points handler result [ 'header' ] [ key . lower ( ) ] = schunk [ 1 ] # Parse points for idx in range ( 1 , len ( chunk ) ) : schunk = chunk [ idx ] . strip ( ) . split ( ) for count , ordinate in enumerate ( schunk ) : # Divide ordinates into ij pairs if ( count % 2 ) == 0 : pair [ 'i' ] = ordinate else : pair [ 'j' ] = ordinate result [ 'points' ] . append ( pair ) pair = { 'i' : None , 'j' : None } elif key in ( 'LAKE' , 'RESERVOIR' ) : # Type handler result [ 'type' ] = schunk [ 0 ] else : # Header variables handler result [ 'header' ] [ key . lower ( ) ] = schunk [ 1 ] return result
| 4,718
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L275-L342
|
[
"def",
"flush",
"(",
"self",
")",
":",
"writer",
"=",
"self",
".",
"writer",
"if",
"writer",
"is",
"None",
":",
"raise",
"GaugedUseAfterFreeError",
"self",
".",
"flush_writer_position",
"(",
")",
"keys",
"=",
"self",
".",
"translate_keys",
"(",
")",
"blocks",
"=",
"[",
"]",
"current_block",
"=",
"self",
".",
"current_block",
"statistics",
"=",
"self",
".",
"statistics",
"driver",
"=",
"self",
".",
"driver",
"flags",
"=",
"0",
"# for future extensions, e.g. block compression",
"for",
"namespace",
",",
"key",
",",
"block",
"in",
"self",
".",
"pending_blocks",
"(",
")",
":",
"length",
"=",
"block",
".",
"byte_length",
"(",
")",
"if",
"not",
"length",
":",
"continue",
"key_id",
"=",
"keys",
"[",
"(",
"namespace",
",",
"key",
")",
"]",
"statistics",
"[",
"namespace",
"]",
".",
"byte_count",
"+=",
"length",
"blocks",
".",
"append",
"(",
"(",
"namespace",
",",
"current_block",
",",
"key_id",
",",
"block",
".",
"buffer",
"(",
")",
",",
"flags",
")",
")",
"if",
"self",
".",
"config",
".",
"overwrite_blocks",
":",
"driver",
".",
"replace_blocks",
"(",
"blocks",
")",
"else",
":",
"driver",
".",
"insert_or_append_blocks",
"(",
"blocks",
")",
"if",
"not",
"Gauged",
".",
"writer_flush_maps",
"(",
"writer",
",",
"True",
")",
":",
"raise",
"MemoryError",
"update_namespace",
"=",
"driver",
".",
"add_namespace_statistics",
"for",
"namespace",
",",
"stats",
"in",
"statistics",
".",
"iteritems",
"(",
")",
":",
"update_namespace",
"(",
"namespace",
",",
"self",
".",
"current_block",
",",
"stats",
".",
"data_points",
",",
"stats",
".",
"byte_count",
")",
"statistics",
".",
"clear",
"(",
")",
"driver",
".",
"commit",
"(",
")",
"self",
".",
"flush_now",
"=",
"False"
] |
Parse NODE Method
|
def nodeChunk ( lines ) : # Constants KEYWORDS = ( 'NODE' , 'X_Y' , 'ELEV' ) result = { 'node' : None , 'x' : None , 'y' : None , 'elev' : None } chunks = pt . chunk ( KEYWORDS , lines ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) if key == 'X_Y' : result [ 'x' ] = schunk [ 1 ] result [ 'y' ] = schunk [ 2 ] else : result [ key . lower ( ) ] = schunk [ 1 ] return result
| 4,719
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L344-L371
|
[
"def",
"rate_limit",
"(",
"f",
")",
":",
"def",
"new_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"errors",
"=",
"0",
"while",
"True",
":",
"resp",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"errors",
"=",
"0",
"return",
"resp",
"elif",
"resp",
".",
"status_code",
"==",
"401",
":",
"# Hack to retain the original exception, but augment it with",
"# additional context for the user to interpret it. In a Python",
"# 3 only future we can raise a new exception of the same type",
"# with a new message from the old error.",
"try",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"message",
"=",
"\"\\nThis is a protected or locked account, or\"",
"+",
"\" the credentials provided are no longer valid.\"",
"e",
".",
"args",
"=",
"(",
"e",
".",
"args",
"[",
"0",
"]",
"+",
"message",
",",
")",
"+",
"e",
".",
"args",
"[",
"1",
":",
"]",
"log",
".",
"warning",
"(",
"\"401 Authentication required for %s\"",
",",
"resp",
".",
"url",
")",
"raise",
"elif",
"resp",
".",
"status_code",
"==",
"429",
":",
"reset",
"=",
"int",
"(",
"resp",
".",
"headers",
"[",
"'x-rate-limit-reset'",
"]",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"seconds",
"=",
"reset",
"-",
"now",
"+",
"10",
"if",
"seconds",
"<",
"1",
":",
"seconds",
"=",
"10",
"log",
".",
"warning",
"(",
"\"rate limit exceeded: sleeping %s secs\"",
",",
"seconds",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"elif",
"resp",
".",
"status_code",
">=",
"500",
":",
"errors",
"+=",
"1",
"if",
"errors",
">",
"30",
":",
"log",
".",
"warning",
"(",
"\"too many errors from Twitter, giving up\"",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"seconds",
"=",
"60",
"*",
"errors",
"log",
".",
"warning",
"(",
"\"%s from Twitter API, sleeping %s\"",
",",
"resp",
".",
"status_code",
",",
"seconds",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"else",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"new_f"
] |
Parse XSEC Method
|
def xSectionChunk ( lines ) : # Constants KEYWORDS = ( 'MANNINGS_N' , 'BOTTOM_WIDTH' , 'BANKFULL_DEPTH' , 'SIDE_SLOPE' , 'NPAIRS' , 'NUM_INTERP' , 'X1' , 'ERODE' , 'MAX_EROSION' , 'SUBSURFACE' , 'M_RIVER' , 'K_RIVER' ) result = { 'mannings_n' : None , 'bottom_width' : None , 'bankfull_depth' : None , 'side_slope' : None , 'npairs' : None , 'num_interp' : None , 'erode' : False , 'subsurface' : False , 'max_erosion' : None , 'm_river' : None , 'k_river' : None , 'breakpoints' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Strip and split the line (only one item in each list) schunk = chunk [ 0 ] . strip ( ) . split ( ) # Cases if key == 'X1' : # Extract breakpoint XY pairs x = schunk [ 1 ] y = schunk [ 2 ] result [ 'breakpoints' ] . append ( { 'x' : x , 'y' : y } ) if key in ( 'SUBSURFACE' , 'ERODE' ) : # Set booleans result [ key . lower ( ) ] = True else : # Extract value result [ key . lower ( ) ] = schunk [ 1 ] return result
| 4,720
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L373-L427
|
[
"def",
"database_backup",
"(",
"self",
",",
"data_directory",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"upload_good",
"=",
"False",
"backup_stop_good",
"=",
"False",
"while_offline",
"=",
"False",
"start_backup_info",
"=",
"None",
"if",
"'while_offline'",
"in",
"kwargs",
":",
"while_offline",
"=",
"kwargs",
".",
"pop",
"(",
"'while_offline'",
")",
"try",
":",
"if",
"not",
"while_offline",
":",
"start_backup_info",
"=",
"PgBackupStatements",
".",
"run_start_backup",
"(",
")",
"version",
"=",
"PgBackupStatements",
".",
"pg_version",
"(",
")",
"[",
"'version'",
"]",
"else",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_directory",
",",
"'postmaster.pid'",
")",
")",
":",
"hint",
"=",
"(",
"'Shut down postgres. '",
"'If there is a stale lockfile, '",
"'then remove it after being very sure postgres '",
"'is not running.'",
")",
"raise",
"UserException",
"(",
"msg",
"=",
"'while_offline set, but pg looks to be running'",
",",
"detail",
"=",
"'Found a postmaster.pid lockfile, and aborting'",
",",
"hint",
"=",
"hint",
")",
"ctrl_data",
"=",
"PgControlDataParser",
"(",
"data_directory",
")",
"start_backup_info",
"=",
"ctrl_data",
".",
"last_xlog_file_name_and_offset",
"(",
")",
"version",
"=",
"ctrl_data",
".",
"pg_version",
"(",
")",
"ret_tuple",
"=",
"self",
".",
"_upload_pg_cluster_dir",
"(",
"start_backup_info",
",",
"data_directory",
",",
"version",
"=",
"version",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"spec",
",",
"uploaded_to",
",",
"expanded_size_bytes",
"=",
"ret_tuple",
"upload_good",
"=",
"True",
"finally",
":",
"if",
"not",
"upload_good",
":",
"logger",
".",
"warning",
"(",
"'blocking on sending WAL segments'",
",",
"detail",
"=",
"(",
"'The backup was not completed successfully, '",
"'but we have to wait anyway. '",
"'See README: TODO about pg_cancel_backup'",
")",
")",
"if",
"not",
"while_offline",
":",
"stop_backup_info",
"=",
"PgBackupStatements",
".",
"run_stop_backup",
"(",
")",
"else",
":",
"stop_backup_info",
"=",
"start_backup_info",
"backup_stop_good",
"=",
"True",
"# XXX: Ugly, this is more of a 'worker' task because it might",
"# involve retries and error messages, something that is not",
"# treated by the \"operator\" category of modules. So",
"# basically, if this small upload fails, the whole upload",
"# fails!",
"if",
"upload_good",
"and",
"backup_stop_good",
":",
"# Try to write a sentinel file to the cluster backup",
"# directory that indicates that the base backup upload has",
"# definitely run its course and also communicates what WAL",
"# segments are needed to get to consistency.",
"sentinel_content",
"=",
"json",
".",
"dumps",
"(",
"{",
"'wal_segment_backup_stop'",
":",
"stop_backup_info",
"[",
"'file_name'",
"]",
",",
"'wal_segment_offset_backup_stop'",
":",
"stop_backup_info",
"[",
"'file_offset'",
"]",
",",
"'expanded_size_bytes'",
":",
"expanded_size_bytes",
",",
"'spec'",
":",
"spec",
"}",
")",
"# XXX: should use the storage operators.",
"#",
"# XXX: distinguish sentinels by *PREFIX* not suffix,",
"# which makes searching harder. (For the next version",
"# bump).",
"uri_put_file",
"(",
"self",
".",
"creds",
",",
"uploaded_to",
"+",
"'_backup_stop_sentinel.json'",
",",
"BytesIO",
"(",
"sentinel_content",
".",
"encode",
"(",
"\"utf8\"",
")",
")",
",",
"content_type",
"=",
"'application/json'",
")",
"else",
":",
"# NB: Other exceptions should be raised before this that",
"# have more informative results, it is intended that this",
"# exception never will get raised.",
"raise",
"UserCritical",
"(",
"'could not complete backup process'",
")"
] |
Parse Weir and Culvert Structures Method
|
def structureChunk ( keywords , resultDict , lines ) : chunks = pt . chunk ( keywords , lines ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Strip and split the line (only one item in each list) schunk = chunk [ 0 ] . strip ( ) . split ( ) # Extract values and assign to appropriate key in resultDict resultDict [ key . lower ( ) ] = schunk [ 1 ] return resultDict
| 4,721
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/cif_chunk.py#L429-L445
|
[
"def",
"set_nonblock",
"(",
"fd",
")",
":",
"# type: (int) -> None",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_SETFL",
",",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_GETFL",
")",
"|",
"os",
".",
"O_NONBLOCK",
")"
] |
Returns the completed progress bar . Every time this is called the animation moves .
|
def bar ( self , width , * * _ ) : width -= self . _width_offset self . _position += self . _direction # Change direction. if self . _position <= 0 and self . _direction < 0 : self . _position = 0 self . _direction = 1 elif self . _position > width : self . _position = width - 1 self . _direction = - 1 final_bar = ( self . CHAR_LEFT_BORDER + self . CHAR_EMPTY * self . _position + self . CHAR_ANIMATED + self . CHAR_EMPTY * ( width - self . _position ) + self . CHAR_RIGHT_BORDER ) return final_bar
| 4,722
|
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/bars.py#L34-L58
|
[
"def",
"external_metadata",
"(",
"self",
",",
"datasource_type",
"=",
"None",
",",
"datasource_id",
"=",
"None",
")",
":",
"if",
"datasource_type",
"==",
"'druid'",
":",
"datasource",
"=",
"ConnectorRegistry",
".",
"get_datasource",
"(",
"datasource_type",
",",
"datasource_id",
",",
"db",
".",
"session",
")",
"elif",
"datasource_type",
"==",
"'table'",
":",
"database",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Database",
")",
".",
"filter_by",
"(",
"id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'db_id'",
")",
")",
".",
"one",
"(",
")",
")",
"Table",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"'table'",
"]",
"datasource",
"=",
"Table",
"(",
"database",
"=",
"database",
",",
"table_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'table_name'",
")",
",",
"schema",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'schema'",
")",
"or",
"None",
",",
")",
"external_metadata",
"=",
"datasource",
".",
"external_metadata",
"(",
")",
"return",
"self",
".",
"json_response",
"(",
"external_metadata",
")"
] |
Mapping Table Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial = False , spatialReferenceID = 4236 , replaceParamFile = None , readIndexMaps = True ) : # Set file extension property self . fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = { 'INDEX_MAP' : mtc . indexMapChunk , 'ROUGHNESS' : mtc . mapTableChunk , 'INTERCEPTION' : mtc . mapTableChunk , 'RETENTION' : mtc . mapTableChunk , 'GREEN_AMPT_INFILTRATION' : mtc . mapTableChunk , 'GREEN_AMPT_INITIAL_SOIL_MOISTURE' : mtc . mapTableChunk , 'RICHARDS_EQN_INFILTRATION_BROOKS' : mtc . mapTableChunk , 'RICHARDS_EQN_INFILTRATION_HAVERCAMP' : mtc . mapTableChunk , 'EVAPOTRANSPIRATION' : mtc . mapTableChunk , 'WELL_TABLE' : mtc . mapTableChunk , 'OVERLAND_BOUNDARY' : mtc . mapTableChunk , 'TIME_SERIES_INDEX' : mtc . mapTableChunk , 'GROUNDWATER' : mtc . mapTableChunk , 'GROUNDWATER_BOUNDARY' : mtc . mapTableChunk , 'AREA_REDUCTION' : mtc . mapTableChunk , 'WETLAND_PROPERTIES' : mtc . mapTableChunk , 'MULTI_LAYER_SOIL' : mtc . mapTableChunk , 'SOIL_EROSION_PROPS' : mtc . mapTableChunk , 'CONTAMINANT_TRANSPORT' : mtc . contamChunk , 'SEDIMENTS' : mtc . sedimentChunk } indexMaps = dict ( ) mapTables = [ ] # Parse file into chunks associated with keywords/cards with io_open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Call chunk specific parsers for each chunk result = KEYWORDS [ key ] ( key , chunk ) # Index Map handler if key == 'INDEX_MAP' : # Create GSSHAPY IndexMap object from result object indexMap = IndexMap ( name = result [ 'idxName' ] ) # Dictionary used to map index maps to mapping tables indexMaps [ result [ 'idxName' ] ] = indexMap # Associate IndexMap with MapTableFile indexMap . mapTableFile = self if readIndexMaps : # Invoke IndexMap read method indexMap . read ( directory = directory , filename = result [ 'filename' ] , session = session , spatial = spatial , spatialReferenceID = spatialReferenceID ) else : # add path to file indexMap . filename = result [ 'filename' ] # Map Table handler else : # Create a list of all the map tables in the file if result : mapTables . append ( result ) # Create GSSHAPY ORM objects with the resulting objects that are # returned from the parser functions self . _createGsshaPyObjects ( mapTables , indexMaps , replaceParamFile , directory , session , spatial , spatialReferenceID )
| 4,723
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L86-L159
|
[
"def",
"syzygyJD",
"(",
"jd",
")",
":",
"sun",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"SUN",
",",
"jd",
")",
"moon",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"MOON",
",",
"jd",
")",
"dist",
"=",
"angle",
".",
"distance",
"(",
"sun",
",",
"moon",
")",
"# Offset represents the Syzygy type. ",
"# Zero is conjunction and 180 is opposition.",
"offset",
"=",
"180",
"if",
"(",
"dist",
">=",
"180",
")",
"else",
"0",
"while",
"abs",
"(",
"dist",
")",
">",
"MAX_ERROR",
":",
"jd",
"=",
"jd",
"-",
"dist",
"/",
"13.1833",
"# Moon mean daily motion",
"sun",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"SUN",
",",
"jd",
")",
"moon",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"MOON",
",",
"jd",
")",
"dist",
"=",
"angle",
".",
"closestdistance",
"(",
"sun",
"-",
"offset",
",",
"moon",
")",
"return",
"jd"
] |
Map Table Write to File Method
|
def _write ( self , session , openFile , replaceParamFile = None , writeIndexMaps = True ) : # Extract directory directory = os . path . split ( openFile . name ) [ 0 ] # Derive a Unique Set of Contaminants for mapTable in self . getOrderedMapTables ( session ) : if mapTable . name == 'CONTAMINANT_TRANSPORT' : contaminantList = [ ] for mtValue in mapTable . values : if mtValue . contaminant not in contaminantList : contaminantList . append ( mtValue . contaminant ) contaminants = sorted ( contaminantList , key = lambda x : ( x . indexMap . name , x . name ) ) # Write first line to file openFile . write ( 'GSSHA_INDEX_MAP_TABLES\n' ) # Write list of index maps for indexMap in self . indexMaps : # Write to map table file openFile . write ( 'INDEX_MAP%s"%s" "%s"\n' % ( ' ' * 16 , indexMap . filename , indexMap . name ) ) if writeIndexMaps : # Initiate index map write indexMap . write ( directory , session = session ) for mapTable in self . getOrderedMapTables ( session ) : if mapTable . name == 'SEDIMENTS' : self . _writeSedimentTable ( session = session , fileObject = openFile , mapTable = mapTable , replaceParamFile = replaceParamFile ) elif mapTable . name == 'CONTAMINANT_TRANSPORT' : self . _writeContaminantTable ( session = session , fileObject = openFile , mapTable = mapTable , contaminants = contaminants , replaceParamFile = replaceParamFile ) else : self . _writeMapTable ( session = session , fileObject = openFile , mapTable = mapTable , replaceParamFile = replaceParamFile )
| 4,724
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L161-L206
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Retrieve the map tables ordered by name
|
def getOrderedMapTables ( self , session ) : return session . query ( MapTable ) . filter ( MapTable . mapTableFile == self ) . order_by ( MapTable . name ) . all ( )
| 4,725
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L208-L212
|
[
"def",
"run",
"(",
")",
":",
"print",
"(",
"\"Environment\"",
",",
"os",
".",
"environ",
")",
"try",
":",
"os",
".",
"environ",
"[",
"\"SELENIUM\"",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"Please set the environment variable SELENIUM to Selenium URL\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"driver",
"=",
"WhatsAPIDriver",
"(",
"client",
"=",
"'remote'",
",",
"command_executor",
"=",
"os",
".",
"environ",
"[",
"\"SELENIUM\"",
"]",
")",
"print",
"(",
"\"Waiting for QR\"",
")",
"driver",
".",
"wait_for_login",
"(",
")",
"print",
"(",
"\"Bot started\"",
")",
"driver",
".",
"subscribe_new_messages",
"(",
"NewMessageObserver",
"(",
")",
")",
"print",
"(",
"\"Waiting for new messages...\"",
")",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"60",
")"
] |
Remove duplicate map table if it exists
|
def deleteMapTable ( self , name , session ) : duplicate_map_tables = session . query ( MapTable ) . filter ( MapTable . mapTableFile == self ) . filter ( MapTable . name == name ) . all ( ) for duplicate_map_table in duplicate_map_tables : if duplicate_map_table . indexMap : session . delete ( duplicate_map_table . indexMap ) session . delete ( duplicate_map_table ) session . commit ( )
| 4,726
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L214-L224
|
[
"def",
"_wrap_functions",
"(",
"self",
",",
"client",
")",
":",
"def",
"wrap",
"(",
"fn",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Simple wrapper for to catch dead clients.\"\"\"",
"try",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"ConnectionError",
",",
"TimeoutError",
")",
":",
"# TO THE PENALTY BOX!",
"self",
".",
"_penalize_client",
"(",
"client",
")",
"raise",
"return",
"functools",
".",
"update_wrapper",
"(",
"wrapper",
",",
"fn",
")",
"for",
"name",
"in",
"dir",
"(",
"client",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"continue",
"# Some things aren't wrapped",
"if",
"name",
"in",
"(",
"\"echo\"",
",",
"\"execute_command\"",
",",
"\"parse_response\"",
")",
":",
"continue",
"obj",
"=",
"getattr",
"(",
"client",
",",
"name",
")",
"if",
"not",
"callable",
"(",
"obj",
")",
":",
"continue",
"log",
".",
"debug",
"(",
"\"Wrapping %s\"",
",",
"name",
")",
"setattr",
"(",
"client",
",",
"name",
",",
"wrap",
"(",
"obj",
")",
")"
] |
Create GSSHAPY Mapping Table ORM Objects Method
|
def _createGsshaPyObjects ( self , mapTables , indexMaps , replaceParamFile , directory , session , spatial , spatialReferenceID ) : for mt in mapTables : # Create GSSHAPY MapTable object try : # Make sure the index map name listed with the map table is in the list of # index maps read from the top of the mapping table file (Note that the index maps for the sediment # and contaminant tables will have names of None, so we skip these cases. if mt [ 'indexMapName' ] is not None : indexMaps [ mt [ 'indexMapName' ] ] mapTable = MapTable ( name = mt [ 'name' ] , numIDs = mt [ 'numVars' ] [ 'NUM_IDS' ] , maxNumCells = mt [ 'numVars' ] [ 'MAX_NUMBER_CELLS' ] , numSed = mt [ 'numVars' ] . get ( 'NUM_SED' ) , numContam = mt [ 'numVars' ] . get ( 'NUM_CONTAM' ) , maxSoilID = mt [ 'numVars' ] . get ( 'MAX_SOIL_ID' ) ) # Associate MapTable with this MapTableFile and IndexMaps mapTable . mapTableFile = self ## NOTE: Index maps are associated wth contaminants for CONTAMINANT_TRANSPORT map ## tables. The SEDIMENTS map table are associated with index maps via the ## SOIL_EROSION_PROPS map table. if mt [ 'indexMapName' ] : mapTable . indexMap = indexMaps [ mt [ 'indexMapName' ] ] # CONTAMINANT_TRANSPORT map table handler if mt [ 'name' ] == 'CONTAMINANT_TRANSPORT' : for contam in mt [ 'contaminants' ] : # Preprocess the contaminant output paths to be relative outputBaseFilename = self . _preprocessContaminantOutFilePath ( contam [ 'outPath' ] ) # Initialize GSSHAPY MTContaminant object contaminant = MTContaminant ( name = contam [ 'name' ] , outputFilename = outputBaseFilename , precipConc = vrp ( contam [ 'contamVars' ] [ 'PRECIP_CONC' ] , replaceParamFile ) , partition = vrp ( contam [ 'contamVars' ] [ 'PARTITION' ] , replaceParamFile ) , numIDs = contam [ 'contamVars' ] [ 'NUM_IDS' ] ) # Associate MTContaminant with appropriate IndexMap indexMap = indexMaps [ contam [ 'indexMapName' ] ] contaminant . indexMap = indexMap self . _createValueObjects ( contam [ 'valueList' ] , contam [ 'varList' ] , mapTable , indexMap , contaminant , replaceParamFile ) # Read any output files if they are present self . _readContaminantOutputFiles ( directory , outputBaseFilename , session , spatial , spatialReferenceID ) # SEDIMENTS map table handler elif mt [ 'name' ] == 'SEDIMENTS' : for line in mt [ 'valueList' ] : # Create GSSHAPY MTSediment object sediment = MTSediment ( description = line [ 0 ] , specificGravity = vrp ( line [ 1 ] , replaceParamFile ) , particleDiameter = vrp ( line [ 2 ] , replaceParamFile ) , outputFilename = line [ 3 ] ) # Associate the MTSediment with the MapTable sediment . mapTable = mapTable # All other map table handler else : indexMap = indexMaps [ mt [ 'indexMapName' ] ] # Create MTValue and MTIndex objects self . _createValueObjects ( mt [ 'valueList' ] , mt [ 'varList' ] , mapTable , indexMap , None , replaceParamFile ) except KeyError : log . info ( ( 'Index Map "%s" for Mapping Table "%s" not found in list of index maps in the mapping ' 'table file. The Mapping Table was not read into the database.' ) % ( mt [ 'indexMapName' ] , mt [ 'name' ] ) )
| 4,727
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L226-L301
|
[
"def",
"load_stats",
"(",
"self",
",",
"cache",
"=",
"None",
",",
"wait",
"=",
"None",
")",
":",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"not",
"self",
".",
"debug",
"if",
"wait",
"is",
"None",
":",
"wait",
"=",
"self",
".",
"debug",
"if",
"not",
"cache",
"or",
"self",
".",
"_stats",
"is",
"None",
":",
"self",
".",
"_stats",
"=",
"self",
".",
"_load_stats",
"(",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"wait",
"and",
"self",
".",
"_stats",
".",
"get",
"(",
"'status'",
")",
"==",
"'compiling'",
":",
"if",
"self",
".",
"timeout",
"and",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
">",
"self",
".",
"timeout",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Webpack {0!r} timed out while compiling\"",
".",
"format",
"(",
"self",
".",
"stats_file",
".",
"path",
")",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"self",
".",
"_stats",
"=",
"self",
".",
"_load_stats",
"(",
")",
"return",
"self",
".",
"_stats"
] |
Populate GSSHAPY MTValue and MTIndex Objects Method
|
def _createValueObjects ( self , valueList , varList , mapTable , indexMap , contaminant , replaceParamFile ) : def assign_values_to_table ( value_list , layer_id ) : for i , value in enumerate ( value_list ) : value = vrp ( value , replaceParamFile ) # Create MTValue object and associate with MTIndex and MapTable mtValue = MTValue ( variable = varList [ i ] , value = float ( value ) ) mtValue . index = mtIndex mtValue . mapTable = mapTable mtValue . layer_id = layer_id # MTContaminant handler (associate MTValue with MTContaminant) if contaminant : mtValue . contaminant = contaminant for row in valueList : # Create GSSHAPY MTIndex object and associate with IndexMap mtIndex = MTIndex ( index = row [ 'index' ] , description1 = row [ 'description1' ] , description2 = row [ 'description2' ] ) mtIndex . indexMap = indexMap if len ( np . shape ( row [ 'values' ] ) ) == 2 : # this is for ids with multiple layers for layer_id , values in enumerate ( row [ 'values' ] ) : assign_values_to_table ( values , layer_id ) else : assign_values_to_table ( row [ 'values' ] , 0 )
| 4,728
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L303-L329
|
[
"def",
"create_atomic_wrapper",
"(",
"cls",
",",
"wrapped_func",
")",
":",
"def",
"_create_atomic_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Actual wrapper.\"\"\"",
"# When a view call fails due to a permissions error, it raises an exception.",
"# An uncaught exception breaks the DB transaction for any following DB operations",
"# unless it's wrapped in a atomic() decorator or context manager.",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"return",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_create_atomic_wrapper"
] |
Read any contaminant output files if available
|
def _readContaminantOutputFiles ( self , directory , baseFileName , session , spatial , spatialReferenceID ) : if not os . path . isdir ( directory ) : return if baseFileName == '' : return # Look for channel output files denoted by the ".chan" after the base filename chanBaseFileName = '.' . join ( [ baseFileName , 'chan' ] ) # Get contents of directory directoryList = os . listdir ( directory ) # Compile a list of files with "basename.chan" in them chanFiles = [ ] for thing in directoryList : if chanBaseFileName in thing : chanFiles . append ( thing ) # Assume all "chan" files are link node dataset files and try to read them for chanFile in chanFiles : linkNodeDatasetFile = LinkNodeDatasetFile ( ) linkNodeDatasetFile . projectFile = self . projectFile try : linkNodeDatasetFile . read ( directory = directory , filename = chanFile , session = session , spatial = spatial , spatialReferenceID = spatialReferenceID ) except : log . warning ( 'Attempted to read Contaminant Transport Output file {0}, but failed.' . format ( chanFile ) )
| 4,729
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L331-L364
|
[
"def",
"_ValidateValue",
"(",
"value",
",",
"type_check",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"type_check",
")",
":",
"return",
"isinstance",
"(",
"value",
",",
"type_check",
")",
"if",
"isinstance",
"(",
"type_check",
",",
"tuple",
")",
":",
"return",
"_ValidateTuple",
"(",
"value",
",",
"type_check",
")",
"elif",
"callable",
"(",
"type_check",
")",
":",
"return",
"type_check",
"(",
"value",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Invalid type check '%s'\"",
"%",
"repr",
"(",
"type_check",
")",
")"
] |
Write Generic Map Table Method
|
def _writeMapTable ( self , session , fileObject , mapTable , replaceParamFile ) : # Write mapping name fileObject . write ( '%s "%s"\n' % ( mapTable . name , mapTable . indexMap . name ) ) # Write mapping table global variables if mapTable . numIDs : fileObject . write ( 'NUM_IDS %s\n' % ( mapTable . numIDs ) ) if mapTable . maxNumCells : fileObject . write ( 'MAX_NUMBER_CELLS %s\n' % ( mapTable . maxNumCells ) ) if mapTable . numSed : fileObject . write ( 'NUM_SED %s\n' % ( mapTable . numSed ) ) if mapTable . maxSoilID : fileObject . write ( 'MAX_SOIL_ID %s\n' % ( mapTable . maxSoilID ) ) # Write value lines from the database self . _writeValues ( session , fileObject , mapTable , None , replaceParamFile )
| 4,730
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L366-L395
|
[
"def",
"get_changed_devices",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"payload",
"=",
"{",
"}",
"else",
":",
"payload",
"=",
"{",
"'timeout'",
":",
"SUBSCRIPTION_WAIT",
",",
"'minimumdelay'",
":",
"SUBSCRIPTION_MIN_WAIT",
"}",
"payload",
".",
"update",
"(",
"timestamp",
")",
"# double the timeout here so requests doesn't timeout before vera",
"payload",
".",
"update",
"(",
"{",
"'id'",
":",
"'lu_sdata'",
",",
"}",
")",
"logger",
".",
"debug",
"(",
"\"get_changed_devices() requesting payload %s\"",
",",
"str",
"(",
"payload",
")",
")",
"r",
"=",
"self",
".",
"data_request",
"(",
"payload",
",",
"TIMEOUT",
"*",
"2",
")",
"r",
".",
"raise_for_status",
"(",
")",
"# If the Vera disconnects before writing a full response (as lu_sdata",
"# will do when interrupted by a Luup reload), the requests module will",
"# happily return 200 with an empty string. So, test for empty response,",
"# so we don't rely on the JSON parser to throw an exception.",
"if",
"r",
".",
"text",
"==",
"\"\"",
":",
"raise",
"PyveraError",
"(",
"\"Empty response from Vera\"",
")",
"# Catch a wide swath of what the JSON parser might throw, within",
"# reason. Unfortunately, some parsers don't specifically return",
"# json.decode.JSONDecodeError, but so far most seem to derive what",
"# they do throw from ValueError, so that's helpful.",
"try",
":",
"result",
"=",
"r",
".",
"json",
"(",
")",
"except",
"ValueError",
"as",
"ex",
":",
"raise",
"PyveraError",
"(",
"\"JSON decode error: \"",
"+",
"str",
"(",
"ex",
")",
")",
"if",
"not",
"(",
"type",
"(",
"result",
")",
"is",
"dict",
"and",
"'loadtime'",
"in",
"result",
"and",
"'dataversion'",
"in",
"result",
")",
":",
"raise",
"PyveraError",
"(",
"\"Unexpected/garbled response from Vera\"",
")",
"# At this point, all good. Update timestamp and return change data.",
"device_data",
"=",
"result",
".",
"get",
"(",
"'devices'",
")",
"timestamp",
"=",
"{",
"'loadtime'",
":",
"result",
".",
"get",
"(",
"'loadtime'",
")",
",",
"'dataversion'",
":",
"result",
".",
"get",
"(",
"'dataversion'",
")",
"}",
"return",
"[",
"device_data",
",",
"timestamp",
"]"
] |
This method writes the contaminant transport mapping table case .
|
def _writeContaminantTable ( self , session , fileObject , mapTable , contaminants , replaceParamFile ) : # Write the contaminant mapping table header fileObject . write ( '%s\n' % ( mapTable . name ) ) fileObject . write ( 'NUM_CONTAM %s\n' % ( mapTable . numContam ) ) # Write out each contaminant and it's values for contaminant in contaminants : fileObject . write ( '"%s" "%s" %s\n' % ( contaminant . name , contaminant . indexMap . name , contaminant . outputFilename ) ) # Add trailing zeros to values / replacement parameter precipConcString = vwp ( contaminant . precipConc , replaceParamFile ) partitionString = vwp ( contaminant . partition , replaceParamFile ) try : precipConc = '%.2f' % precipConcString except : precipConc = '%s' % precipConcString try : partition = '%.2f' % partitionString except : partition = '%s' % partitionString # Write global variables for the contaminant fileObject . write ( 'PRECIP_CONC%s%s\n' % ( ' ' * 10 , precipConc ) ) fileObject . write ( 'PARTITION%s%s\n' % ( ' ' * 12 , partition ) ) fileObject . write ( 'NUM_IDS %s\n' % contaminant . numIDs ) # Write value lines self . _writeValues ( session , fileObject , mapTable , contaminant , replaceParamFile )
| 4,731
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L398-L430
|
[
"def",
"refresh_devices",
"(",
"self",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"api",
".",
"get",
"(",
"\"/api/v2/devices\"",
",",
"{",
"'properties'",
":",
"'all'",
"}",
")",
"for",
"device_data",
"in",
"response",
"[",
"'DeviceList'",
"]",
":",
"self",
".",
"devices",
".",
"append",
"(",
"Device",
"(",
"device_data",
",",
"self",
")",
")",
"except",
"APIError",
"as",
"e",
":",
"print",
"(",
"\"API error: \"",
")",
"for",
"key",
",",
"value",
"in",
"e",
".",
"data",
".",
"iteritems",
":",
"print",
"(",
"str",
"(",
"key",
")",
"+",
"\": \"",
"+",
"str",
"(",
"value",
")",
")"
] |
Write Sediment Mapping Table Method
|
def _writeSedimentTable ( self , session , fileObject , mapTable , replaceParamFile ) : # Write the sediment mapping table header fileObject . write ( '%s\n' % ( mapTable . name ) ) fileObject . write ( 'NUM_SED %s\n' % ( mapTable . numSed ) ) # Write the value header line fileObject . write ( 'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\n' % ( ' ' * 22 , ' ' * 3 , ' ' * 5 ) ) # Retrive the sediment mapping table values sediments = session . query ( MTSediment ) . filter ( MTSediment . mapTable == mapTable ) . order_by ( MTSediment . id ) . all ( ) # Write sediments out to file for sediment in sediments : # Determine spacing for aesthetics space1 = 42 - len ( sediment . description ) # Pad values with zeros / Get replacement variable specGravString = vwp ( sediment . specificGravity , replaceParamFile ) partDiamString = vwp ( sediment . particleDiameter , replaceParamFile ) try : specGrav = '%.6f' % specGravString except : specGrav = '%s' % specGravString try : partDiam = '%.6f' % partDiamString except : partDiam = '%s' % partDiamString fileObject . write ( '%s%s%s%s%s%s%s\n' % ( sediment . description , ' ' * space1 , specGrav , ' ' * 5 , partDiam , ' ' * 6 , sediment . outputFilename ) )
| 4,732
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L433-L475
|
[
"def",
"stream",
"(",
"command",
",",
"stdin",
"=",
"None",
",",
"env",
"=",
"os",
".",
"environ",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"command",
",",
"list",
")",
":",
"command",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"cmd",
"=",
"which",
"(",
"command",
"[",
"0",
"]",
")",
"if",
"cmd",
"is",
"None",
":",
"path",
"=",
"env",
".",
"get",
"(",
"\"PATH\"",
",",
"\"\"",
")",
"raise",
"Exception",
"(",
"\"Command [%s] not in PATH [%s]\"",
"%",
"(",
"command",
"[",
"0",
"]",
",",
"path",
")",
")",
"command",
"[",
"0",
"]",
"=",
"cmd",
"if",
"timeout",
":",
"if",
"not",
"timeout_command",
"[",
"0",
"]",
":",
"raise",
"Exception",
"(",
"\"Timeout specified but timeout command not available.\"",
")",
"command",
"=",
"timeout_command",
"+",
"[",
"str",
"(",
"timeout",
")",
"]",
"+",
"command",
"output",
"=",
"None",
"try",
":",
"output",
"=",
"Popen",
"(",
"command",
",",
"env",
"=",
"env",
",",
"stdin",
"=",
"stdin",
",",
"*",
"*",
"stream_options",
")",
"yield",
"output",
".",
"stdout",
"finally",
":",
"if",
"output",
":",
"output",
".",
"wait",
"(",
")"
] |
This function retrieves the values of a mapping table from the database and pivots them into the format that is required by the mapping table file . This function returns a list of strings that can be printed to the file directly .
|
def _valuePivot ( self , session , mapTable , contaminant , replaceParaFile ) : # Retrieve the indices for the current mapping table and mapping table file indexes = session . query ( MTIndex ) . join ( MTValue . index ) . filter ( MTValue . mapTable == mapTable ) . filter ( MTValue . contaminant == contaminant ) . order_by ( MTIndex . index ) . all ( ) # determine number of layers layer_indices = [ 0 ] if mapTable . name in ( 'MULTI_LAYER_SOIL' , 'RICHARDS_EQN_INFILTRATION_BROOKS' ) : layer_indices = range ( 3 ) # ---------------------------------------- # Construct each line in the mapping table #----------------------------------------- # All lines will be compiled into this list lines = [ ] values = { } for idx in indexes : for layer_index in layer_indices : # Retrieve values for the current index values = session . query ( MTValue ) . filter ( MTValue . mapTable == mapTable ) . filter ( MTValue . contaminant == contaminant ) . filter ( MTValue . index == idx ) . filter ( MTValue . layer_id == layer_index ) . order_by ( MTValue . id ) . all ( ) # NOTE: The second order_by modifier in the query above handles the special ordering of XSEDIMENT columns # in soil erosion properties table (i.e. these columns must be in the same order as the sediments in the # sediments table. Accomplished by using the sedimentID field). Similarly, the contaminant filter is only # used in the case of the contaminant transport table. Values that don't belong to a contaminant will have # a contaminant attribute equal to None. Compare usage of this function by _writeMapTable and # _writeContaminant. #Value string valString = '' # Define valString for val in values : if val . value <= - 9999 : continue # Format value with trailing zeros up to 6 digits processedValue = vwp ( val . value , replaceParaFile ) try : numString = '%.6f' % processedValue except : numString = '%s' % processedValue valString = '%s%s%s' % ( valString , numString , ' ' * 3 ) # Determine spacing for aesthetics (so each column lines up) spacing1 = max ( 1 , 6 - len ( str ( idx . index ) ) ) spacing2 = max ( 1 , 40 - len ( idx . description1 ) ) spacing3 = max ( 1 , 40 - len ( idx . description2 ) ) # Compile each mapping table line if layer_index == 0 : line = '%s%s%s%s%s%s%s\n' % ( idx . index , ' ' * spacing1 , idx . description1 , ' ' * spacing2 , idx . description2 , ' ' * spacing3 , valString ) else : num_prepend_spaces = len ( str ( idx . index ) ) + spacing1 + len ( idx . description1 ) + spacing2 + len ( idx . description2 ) + spacing3 line = '{0}{1}\n' . format ( ' ' * num_prepend_spaces , valString ) # Compile each lines into a list lines . append ( line ) #----------------------------- # Define the value header line #----------------------------- # Define varString for the header line varString = '' # Compile list of variables (from MTValue object list) into a single string of variables for idx , val in enumerate ( values ) : if val . variable == 'XSEDIMENT' : # Special case for XSEDIMENT variable if idx >= len ( values ) - 1 : varString = '%s%s%s%s' % ( varString , mapTable . numSed , ' SEDIMENTS....' , ' ' * 2 ) else : varString = '%s%s%s' % ( varString , val . variable , ' ' * 2 ) # Compile the mapping table header header = 'ID%sDESCRIPTION1%sDESCRIPTION2%s%s\n' % ( ' ' * 4 , ' ' * 28 , ' ' * 28 , varString ) # Prepend the header line to the list of lines lines . insert ( 0 , header ) # Return the list of lines return lines
| 4,733
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L477-L576
|
[
"def",
"validate_experimental",
"(",
"context",
",",
"param",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"config",
"=",
"ExperimentConfiguration",
"(",
"value",
")",
"config",
".",
"validate",
"(",
")",
"return",
"config"
] |
Preprocess the contaminant output file path to a relative path .
|
def _preprocessContaminantOutFilePath ( outPath ) : if '/' in outPath : splitPath = outPath . split ( '/' ) elif '\\' in outPath : splitPath = outPath . split ( '\\' ) else : splitPath = [ outPath , ] if splitPath [ - 1 ] == '' : outputFilename = splitPath [ - 2 ] else : outputFilename = splitPath [ - 1 ] if '.' in outputFilename : outputFilename = outputFilename . split ( '.' ) [ 0 ] return outputFilename
| 4,734
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L587-L609
|
[
"def",
"toner_status",
"(",
"self",
",",
"filter_supported",
":",
"bool",
"=",
"True",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"toner_status",
"=",
"{",
"}",
"for",
"color",
"in",
"self",
".",
"COLOR_NAMES",
":",
"try",
":",
"toner_stat",
"=",
"self",
".",
"data",
".",
"get",
"(",
"'{}_{}'",
".",
"format",
"(",
"SyncThru",
".",
"TONER",
",",
"color",
")",
",",
"{",
"}",
")",
"if",
"filter_supported",
"and",
"toner_stat",
".",
"get",
"(",
"'opt'",
",",
"0",
")",
"==",
"0",
":",
"continue",
"else",
":",
"toner_status",
"[",
"color",
"]",
"=",
"toner_stat",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"toner_status",
"[",
"color",
"]",
"=",
"{",
"}",
"return",
"toner_status"
] |
Adds a roughness map from land use file
|
def addRoughnessMapFromLandUse ( self , name , session , land_use_grid , land_use_to_roughness_table = None , land_use_grid_id = None , ) : LAND_USE_GRID_TABLES = { 'nga' : 'land_cover_nga.txt' , 'glcf' : 'land_cover_glcf_modis.txt' , 'nlcd' : 'land_cover_nlcd.txt' , } # read in table if isinstance ( land_use_to_roughness_table , pd . DataFrame ) : df = land_use_to_roughness_table else : if land_use_to_roughness_table is None : if land_use_grid_id is None : raise ValueError ( "Must have land_use_to_roughness_table or land_use_grid_id set ..." ) land_use_to_roughness_table = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '..' , 'grid' , 'land_cover' , LAND_USE_GRID_TABLES [ land_use_grid_id ] ) # make sure paths are absolute as the working directory changes land_use_to_roughness_table = os . path . abspath ( land_use_to_roughness_table ) df = pd . read_table ( land_use_to_roughness_table , delim_whitespace = True , header = None , skiprows = 1 , names = ( 'id' , 'description' , 'roughness' ) , dtype = { 'id' : 'int' , 'description' : 'str' , 'roughness' : 'float' } , ) # make sure paths are absolute as the working directory changes land_use_grid = os . path . abspath ( land_use_grid ) # resample land use grid to gssha grid land_use_resampled = resample_grid ( land_use_grid , self . projectFile . getGrid ( ) , resample_method = gdalconst . GRA_NearestNeighbour , as_gdal_grid = True ) unique_land_use_ids = np . unique ( land_use_resampled . np_array ( ) ) # only add ids in index map subset df = df [ df . id . isin ( unique_land_use_ids ) ] # make sure all needed land use IDs exist for land_use_id in unique_land_use_ids : if land_use_id not in df . id . values : raise IndexError ( "Land use ID {0} not found in table." . format ( land_use_id ) ) # delete duplicate/old tables with same name if they exist self . deleteMapTable ( "ROUGHNESS" , session ) # get num ids mapTable = MapTable ( name = "ROUGHNESS" , numIDs = len ( df . index ) , maxNumCells = 0 , numSed = 0 , numContam = 0 ) # Create GSSHAPY IndexMap object from result object indexMap = IndexMap ( name = name ) indexMap . mapTableFile = self mapTable . indexMap = indexMap # Associate MapTable with this MapTableFile and IndexMaps mapTable . mapTableFile = self # add values to table for row in df . itertuples ( ) : idx = MTIndex ( str ( row . id ) , row . description , '' ) idx . indexMap = indexMap val = MTValue ( 'ROUGH' , row . roughness ) val . index = idx val . mapTable = mapTable # remove MANNING_N card becasue it is mutually exclusive manningn_card = self . projectFile . getCard ( 'MANNING_N' ) if manningn_card : session . delete ( manningn_card ) session . commit ( ) mapTable . indexMap . filename = '{0}.idx' . format ( name ) # write file with tmp_chdir ( self . projectFile . project_directory ) : land_use_resampled . to_grass_ascii ( mapTable . indexMap . filename , print_nodata = False ) # update project card if not self . projectFile . getCard ( 'MAPPING_TABLE' ) : self . projectFile . setCard ( 'MAPPING_TABLE' , '{0}.cmt' . format ( self . projectFile . name ) , add_quotes = True )
| 4,735
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L611-L744
|
[
"def",
"FromJson",
"(",
"json",
")",
":",
"type",
"=",
"ContractParameterType",
".",
"FromString",
"(",
"json",
"[",
"'type'",
"]",
")",
"value",
"=",
"json",
"[",
"'value'",
"]",
"param",
"=",
"ContractParameter",
"(",
"type",
"=",
"type",
",",
"value",
"=",
"None",
")",
"if",
"type",
"==",
"ContractParameterType",
".",
"Signature",
"or",
"type",
"==",
"ContractParameterType",
".",
"ByteArray",
":",
"param",
".",
"Value",
"=",
"bytearray",
".",
"fromhex",
"(",
"value",
")",
"elif",
"type",
"==",
"ContractParameterType",
".",
"Boolean",
":",
"param",
".",
"Value",
"=",
"bool",
"(",
"value",
")",
"elif",
"type",
"==",
"ContractParameterType",
".",
"Integer",
":",
"param",
".",
"Value",
"=",
"int",
"(",
"value",
")",
"elif",
"type",
"==",
"ContractParameterType",
".",
"Hash160",
":",
"param",
".",
"Value",
"=",
"UInt160",
".",
"ParseString",
"(",
"value",
")",
"elif",
"type",
"==",
"ContractParameterType",
".",
"Hash256",
":",
"param",
".",
"Value",
"=",
"UInt256",
".",
"ParseString",
"(",
"value",
")",
"# @TODO Not sure if this is working...",
"elif",
"type",
"==",
"ContractParameterType",
".",
"PublicKey",
":",
"param",
".",
"Value",
"=",
"ECDSA",
".",
"decode_secp256r1",
"(",
"value",
")",
".",
"G",
"elif",
"type",
"==",
"ContractParameterType",
".",
"String",
":",
"param",
".",
"Value",
"=",
"str",
"(",
"value",
")",
"elif",
"type",
"==",
"ContractParameterType",
".",
"Array",
":",
"val",
"=",
"[",
"ContractParameter",
".",
"FromJson",
"(",
"item",
")",
"for",
"item",
"in",
"value",
"]",
"param",
".",
"Value",
"=",
"val",
"return",
"param"
] |
Update wildcards set with the input data nodes that are also outputs .
|
def _set_wildcards ( self , inputs = None , outputs = None ) : w = self . _wildcards = set ( ) # Clean wildcards. if outputs and inputs : node , wi = self . nodes , self . _wait_in . get # Namespace shortcut. # Input data nodes that are in output_targets. w_crd = { u : node [ u ] for u in inputs if u in outputs or wi ( u , False ) } # Data nodes without the wildcard. w . update ( [ k for k , v in w_crd . items ( ) if v . get ( 'wildcard' , True ) ] )
| 4,736
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L108-L130
|
[
"def",
"disconnect",
"(",
"service_instance",
")",
":",
"log",
".",
"trace",
"(",
"'Disconnecting'",
")",
"try",
":",
"Disconnect",
"(",
"service_instance",
")",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")"
] |
Set all asynchronous results .
|
def result ( self , timeout = None ) : it , exceptions , future_lists = [ ] , [ ] , [ ] from concurrent . futures import Future , wait as wait_fut def update ( fut , data , key ) : if isinstance ( fut , Future ) : it . append ( ( fut , data , key ) ) elif isinstance ( fut , AsyncList ) and fut not in future_lists : future_lists . append ( fut ) it . extend ( [ ( j , fut , i ) for i , j in enumerate ( fut ) if isinstance ( j , Future ) ] [ : : - 1 ] ) for s in self . sub_sol . values ( ) : for k , v in list ( s . items ( ) ) : update ( v , s , k ) for d in s . workflow . nodes . values ( ) : if 'results' in d : update ( d [ 'results' ] , d , 'results' ) for d in s . workflow . edges . values ( ) : if 'value' in d : update ( d [ 'value' ] , d , 'value' ) wait_fut ( { v [ 0 ] for v in it } , timeout ) for f , d , k in it : try : d [ k ] = await_result ( f , 0 ) except SkipNode as e : exceptions . append ( ( f , d , k , e . ex ) ) del d [ k ] except ( Exception , ExecutorShutdown , DispatcherAbort ) as ex : exceptions . append ( ( f , d , k , ex ) ) del d [ k ] if exceptions : raise exceptions [ 0 ] [ - 1 ] return self
| 4,737
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L194-L245
|
[
"def",
"delete_group",
"(",
"group_id",
",",
"purge_data",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
"try",
":",
"group_i",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceGroup",
")",
".",
"filter",
"(",
"ResourceGroup",
".",
"id",
"==",
"group_id",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"raise",
"ResourceNotFoundError",
"(",
"\"Group %s not found\"",
"%",
"(",
"group_id",
")",
")",
"group_items",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceGroupItem",
")",
".",
"filter",
"(",
"ResourceGroupItem",
".",
"group_id",
"==",
"group_id",
")",
".",
"all",
"(",
")",
"for",
"gi",
"in",
"group_items",
":",
"db",
".",
"DBSession",
".",
"delete",
"(",
"gi",
")",
"if",
"purge_data",
"==",
"'Y'",
":",
"_purge_datasets_unique_to_resource",
"(",
"'GROUP'",
",",
"group_id",
")",
"log",
".",
"info",
"(",
"\"Deleting group %s, id=%s\"",
",",
"group_i",
".",
"name",
",",
"group_id",
")",
"group_i",
".",
"network",
".",
"check_write_permission",
"(",
"user_id",
")",
"db",
".",
"DBSession",
".",
"delete",
"(",
"group_i",
")",
"db",
".",
"DBSession",
".",
"flush",
"(",
")"
] |
Returns a function to terminate the ArciDispatch algorithm when all targets have been visited .
|
def _check_targets ( self ) : if self . outputs : targets = self . outputs . copy ( ) # Namespace shortcut for speed. def check_targets ( node_id ) : """
Terminates ArciDispatch algorithm when all targets have been
visited.
:param node_id:
Data or function node id.
:type node_id: str
:return:
True if all targets have been visited, otherwise False.
:rtype: bool
""" try : targets . remove ( node_id ) # Remove visited node. return not targets # If no targets terminate the algorithm. except KeyError : # The node is not in the targets set. return False else : # noinspection PyUnusedLocal def check_targets ( node_id ) : return False return check_targets
| 4,738
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L375-L413
|
[
"def",
"ensure_compatible_admin",
"(",
"view",
")",
":",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"user_roles",
"=",
"request",
".",
"user",
".",
"user_data",
".",
"get",
"(",
"'roles'",
",",
"[",
"]",
")",
"if",
"len",
"(",
"user_roles",
")",
"!=",
"1",
":",
"context",
"=",
"{",
"'message'",
":",
"'I need to be able to manage user accounts. '",
"'My username is %s'",
"%",
"request",
".",
"user",
".",
"username",
"}",
"return",
"render",
"(",
"request",
",",
"'mtp_common/user_admin/incompatible-admin.html'",
",",
"context",
"=",
"context",
")",
"return",
"view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
Returns the data nodes estimations and wait_inputs flag .
|
def _get_node_estimations ( self , node_attr , node_id ) : # Get data node estimations. estimations = self . _wf_pred [ node_id ] wait_in = node_attr [ 'wait_inputs' ] # Namespace shortcut. # Check if node has multiple estimations and it is not waiting inputs. if len ( estimations ) > 1 and not self . _wait_in . get ( node_id , wait_in ) : # Namespace shortcuts. dist , edg_length , adj = self . dist , self . _edge_length , self . dmap . adj est = [ ] # Estimations' heap. for k , v in estimations . items ( ) : # Calculate length. if k is not START : d = dist [ k ] + edg_length ( adj [ k ] [ node_id ] , node_attr ) heapq . heappush ( est , ( d , k , v ) ) # The estimation with minimum distance from the starting node. estimations = { est [ 0 ] [ 1 ] : est [ 0 ] [ 2 ] } # Remove unused workflow edges. self . workflow . remove_edges_from ( [ ( v [ 1 ] , node_id ) for v in est [ 1 : ] ] ) return estimations , wait_in
| 4,739
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L498-L540
|
[
"def",
"render_category",
"(",
"category",
"=",
"''",
",",
"template",
"=",
"None",
")",
":",
"# pylint:disable=too-many-return-statements",
"# See if this is an aliased path",
"redir",
"=",
"get_redirect",
"(",
")",
"if",
"redir",
":",
"return",
"redir",
"# Forbidden template types",
"if",
"template",
"and",
"template",
".",
"startswith",
"(",
"'_'",
")",
":",
"raise",
"http_error",
".",
"Forbidden",
"(",
"\"Template is private\"",
")",
"if",
"template",
"in",
"[",
"'entry'",
",",
"'error'",
"]",
":",
"raise",
"http_error",
".",
"BadRequest",
"(",
"\"Invalid view requested\"",
")",
"if",
"category",
":",
"# See if there's any entries for the view...",
"if",
"not",
"orm",
".",
"select",
"(",
"e",
"for",
"e",
"in",
"model",
".",
"Entry",
"if",
"e",
".",
"category",
"==",
"category",
"or",
"e",
".",
"category",
".",
"startswith",
"(",
"category",
"+",
"'/'",
")",
")",
":",
"raise",
"http_error",
".",
"NotFound",
"(",
"\"No such category\"",
")",
"if",
"not",
"template",
":",
"template",
"=",
"Category",
"(",
"category",
")",
".",
"get",
"(",
"'Index-Template'",
")",
"or",
"'index'",
"tmpl",
"=",
"map_template",
"(",
"category",
",",
"template",
")",
"if",
"not",
"tmpl",
":",
"# this might actually be a malformed category URL",
"test_path",
"=",
"'/'",
".",
"join",
"(",
"(",
"category",
",",
"template",
")",
")",
"if",
"category",
"else",
"template",
"logger",
".",
"debug",
"(",
"\"Checking for malformed category %s\"",
",",
"test_path",
")",
"record",
"=",
"orm",
".",
"select",
"(",
"e",
"for",
"e",
"in",
"model",
".",
"Entry",
"if",
"e",
".",
"category",
"==",
"test_path",
")",
".",
"exists",
"(",
")",
"if",
"record",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"'category'",
",",
"category",
"=",
"test_path",
",",
"*",
"*",
"request",
".",
"args",
")",
")",
"# nope, we just don't know what this is",
"raise",
"http_error",
".",
"NotFound",
"(",
"\"No such view\"",
")",
"view_spec",
"=",
"view",
".",
"parse_view_spec",
"(",
"request",
".",
"args",
")",
"view_spec",
"[",
"'category'",
"]",
"=",
"category",
"view_obj",
"=",
"view",
".",
"View",
"(",
"view_spec",
")",
"rendered",
",",
"etag",
"=",
"render_publ_template",
"(",
"tmpl",
",",
"_url_root",
"=",
"request",
".",
"url_root",
",",
"category",
"=",
"Category",
"(",
"category",
")",
",",
"view",
"=",
"view_obj",
")",
"if",
"request",
".",
"if_none_match",
".",
"contains",
"(",
"etag",
")",
":",
"return",
"'Not modified'",
",",
"304",
"return",
"rendered",
",",
"{",
"'Content-Type'",
":",
"mime_type",
"(",
"tmpl",
")",
",",
"'ETag'",
":",
"etag",
"}"
] |
Set the node outputs from node inputs .
|
def _set_node_output ( self , node_id , no_call , next_nds = None , * * kw ) : # Namespace shortcuts. node_attr = self . nodes [ node_id ] node_type = node_attr [ 'type' ] if node_type == 'data' : # Set data node. return self . _set_data_node_output ( node_id , node_attr , no_call , next_nds , * * kw ) elif node_type == 'function' : # Set function node. return self . _set_function_node_output ( node_id , node_attr , no_call , next_nds , * * kw )
| 4,740
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L554-L581
|
[
"def",
"update",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_md",
".",
"update",
"(",
"data",
")",
"bufpos",
"=",
"self",
".",
"_nbytes",
"&",
"63",
"self",
".",
"_nbytes",
"+=",
"len",
"(",
"data",
")",
"if",
"self",
".",
"_rarbug",
"and",
"len",
"(",
"data",
")",
">",
"64",
":",
"dpos",
"=",
"self",
".",
"block_size",
"-",
"bufpos",
"while",
"dpos",
"+",
"self",
".",
"block_size",
"<=",
"len",
"(",
"data",
")",
":",
"self",
".",
"_corrupt",
"(",
"data",
",",
"dpos",
")",
"dpos",
"+=",
"self",
".",
"block_size"
] |
Set the data node output from node estimations .
|
def _set_data_node_output ( self , node_id , node_attr , no_call , next_nds = None , * * kw ) : # Get data node estimations. est , wait_in = self . _get_node_estimations ( node_attr , node_id ) if not no_call : if node_id is PLOT : est = est . copy ( ) est [ PLOT ] = { 'value' : { 'obj' : self } } sf , args = False , ( { k : v [ 'value' ] for k , v in est . items ( ) } , ) if not ( wait_in or 'function' in node_attr ) : # Data node that has just one estimation value. sf , args = True , tuple ( args [ 0 ] . values ( ) ) try : # Final estimation of the node and node status. value = async_thread ( self , args , node_attr , node_id , sf , * * kw ) except SkipNode : return False if value is not NONE : # Set data output. self [ node_id ] = value value = { 'value' : value } # Output value. else : self [ node_id ] = NONE # Set data output. value = { } # Output value. if next_nds : # namespace shortcuts for speed. wf_add_edge = self . _wf_add_edge for u in next_nds : # Set workflow. wf_add_edge ( node_id , u , * * value ) else : # namespace shortcuts for speed. n , has , sub_sol = self . nodes , self . workflow . has_edge , self . sub_sol def no_visited_in_sub_dsp ( i ) : node = n [ i ] if node [ 'type' ] == 'dispatcher' and has ( i , node_id ) : visited = sub_sol [ self . index + node [ 'index' ] ] . _visited return node [ 'inputs' ] [ node_id ] not in visited return True # List of functions. succ_fun = [ u for u in self . _succ [ node_id ] if no_visited_in_sub_dsp ( u ) ] # Check if it has functions as outputs and wildcard condition. if succ_fun and succ_fun [ 0 ] not in self . _visited : # namespace shortcuts for speed. wf_add_edge = self . _wf_add_edge for u in succ_fun : # Set workflow. wf_add_edge ( node_id , u , * * value ) return True
| 4,741
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L642-L721
|
[
"def",
"merged",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"name_",
"!=",
"other",
".",
"name_",
":",
"return",
"None",
"# cannot merge across object names",
"def",
"_r",
"(",
"r_",
")",
":",
"r",
"=",
"Requirement",
"(",
"None",
")",
"r",
".",
"name_",
"=",
"r_",
".",
"name_",
"r",
".",
"negate_",
"=",
"r_",
".",
"negate_",
"r",
".",
"conflict_",
"=",
"r_",
".",
"conflict_",
"r",
".",
"sep_",
"=",
"r_",
".",
"sep_",
"return",
"r",
"if",
"self",
".",
"range",
"is",
"None",
":",
"return",
"other",
"elif",
"other",
".",
"range",
"is",
"None",
":",
"return",
"self",
"elif",
"self",
".",
"conflict",
":",
"if",
"other",
".",
"conflict",
":",
"r",
"=",
"_r",
"(",
"self",
")",
"r",
".",
"range_",
"=",
"self",
".",
"range_",
"|",
"other",
".",
"range_",
"r",
".",
"negate_",
"=",
"(",
"self",
".",
"negate_",
"and",
"other",
".",
"negate_",
"and",
"not",
"r",
".",
"range_",
".",
"is_any",
"(",
")",
")",
"return",
"r",
"else",
":",
"range_",
"=",
"other",
".",
"range",
"-",
"self",
".",
"range",
"if",
"range_",
"is",
"None",
":",
"return",
"None",
"else",
":",
"r",
"=",
"_r",
"(",
"other",
")",
"r",
".",
"range_",
"=",
"range_",
"return",
"r",
"elif",
"other",
".",
"conflict",
":",
"range_",
"=",
"self",
".",
"range_",
"-",
"other",
".",
"range_",
"if",
"range_",
"is",
"None",
":",
"return",
"None",
"else",
":",
"r",
"=",
"_r",
"(",
"self",
")",
"r",
".",
"range_",
"=",
"range_",
"return",
"r",
"else",
":",
"range_",
"=",
"self",
".",
"range_",
"&",
"other",
".",
"range_",
"if",
"range_",
"is",
"None",
":",
"return",
"None",
"else",
":",
"r",
"=",
"_r",
"(",
"self",
")",
"r",
".",
"range_",
"=",
"range_",
"return",
"r"
] |
Set the function node output from node inputs .
|
def _set_function_node_output ( self , node_id , node_attr , no_call , next_nds = None , * * kw ) : # Namespace shortcuts for speed. o_nds , dist = node_attr [ 'outputs' ] , self . dist # List of nodes that can still be estimated by the function node. output_nodes = next_nds or set ( self . _succ [ node_id ] ) . difference ( dist ) if not output_nodes : # This function is not needed. self . workflow . remove_node ( node_id ) # Remove function node. return False wf_add_edge = self . _wf_add_edge # Namespace shortcuts for speed. if no_call : for u in output_nodes : # Set workflow out. wf_add_edge ( node_id , u ) return True args = self . _wf_pred [ node_id ] # List of the function's arguments. args = [ args [ k ] [ 'value' ] for k in node_attr [ 'inputs' ] ] try : self . _check_function_domain ( args , node_attr , node_id ) res = async_thread ( self , args , node_attr , node_id , * * kw ) # noinspection PyUnresolvedReferences self . workflow . node [ node_id ] [ 'results' ] = res except SkipNode : return False # Set workflow. for k , v in zip ( o_nds , res if len ( o_nds ) > 1 else [ res ] ) : if k in output_nodes and v is not NONE : wf_add_edge ( node_id , k , value = v ) return True
| 4,742
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L743-L798
|
[
"def",
"_add_section",
"(",
"self",
",",
"section",
")",
":",
"section",
".",
"rid",
"=",
"0",
"plen",
"=",
"0",
"while",
"self",
".",
"_merge",
"and",
"self",
".",
"_sections",
"and",
"plen",
"!=",
"len",
"(",
"self",
".",
"_sections",
")",
":",
"plen",
"=",
"len",
"(",
"self",
".",
"_sections",
")",
"self",
".",
"_sections",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"_sections",
"if",
"not",
"section",
".",
"join",
"(",
"s",
")",
"]",
"self",
".",
"_sections",
".",
"append",
"(",
"section",
")"
] |
Add initial values updating workflow seen and fringe .
|
def _add_initial_value ( self , data_id , value , initial_dist = 0.0 , fringe = None , check_cutoff = None , no_call = None ) : # Namespace shortcuts for speed. nodes , seen , edge_weight = self . nodes , self . seen , self . _edge_length wf_remove_edge , check_wait_in = self . _wf_remove_edge , self . check_wait_in wf_add_edge , dsp_in = self . _wf_add_edge , self . _set_sub_dsp_node_input update_view = self . _update_meeting if fringe is None : fringe = self . fringe if no_call is None : no_call = self . no_call check_cutoff = check_cutoff or self . check_cutoff if data_id not in nodes : # Data node is not in the dmap. return False wait_in = nodes [ data_id ] [ 'wait_inputs' ] # Store wait inputs flag. index = nodes [ data_id ] [ 'index' ] # Store node index. wf_add_edge ( START , data_id , * * value ) # Add edge. if data_id in self . _wildcards : # Check if the data node has wildcard. self . _visited . add ( data_id ) # Update visited nodes. self . workflow . add_node ( data_id ) # Add node to workflow. for w , edge_data in self . dmap [ data_id ] . items ( ) : # See func node. wf_add_edge ( data_id , w , * * value ) # Set workflow. node = nodes [ w ] # Node attributes. # Evaluate distance. vw_dist = initial_dist + edge_weight ( edge_data , node ) update_view ( w , vw_dist ) # Update view distance. # Check the cutoff limit and if all inputs are satisfied. if check_cutoff ( vw_dist ) : wf_remove_edge ( data_id , w ) # Remove workflow edge. continue # Pass the node. elif node [ 'type' ] == 'dispatcher' : dsp_in ( data_id , w , fringe , check_cutoff , no_call , vw_dist ) elif check_wait_in ( True , w ) : continue # Pass the node. seen [ w ] = vw_dist # Update distance. vd = ( True , w , self . index + node [ 'index' ] ) # Virtual distance. heapq . heappush ( fringe , ( vw_dist , vd , ( w , self ) ) ) # Add 2 heapq. return True update_view ( data_id , initial_dist ) # Update view distance. if check_cutoff ( initial_dist ) : # Check the cutoff limit. wf_remove_edge ( START , data_id ) # Remove workflow edge. elif not check_wait_in ( wait_in , data_id ) : # Check inputs. seen [ data_id ] = initial_dist # Update distance. vd = ( wait_in , data_id , self . index + index ) # Virtual distance. # Add node to heapq. heapq . heappush ( fringe , ( initial_dist , vd , ( data_id , self ) ) ) return True return False
| 4,743
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L800-L903
|
[
"def",
"CreateCustomizerFeedItems",
"(",
"client",
",",
"adgroup_ids",
",",
"ad_customizer_feed",
")",
":",
"# Get the FeedItemService",
"feed_item_service",
"=",
"client",
".",
"GetService",
"(",
"'FeedItemService'",
",",
"'v201809'",
")",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"mars_date",
"=",
"datetime",
"(",
"now",
".",
"year",
",",
"now",
".",
"month",
",",
"1",
",",
"0",
",",
"0",
")",
"venus_date",
"=",
"datetime",
"(",
"now",
".",
"year",
",",
"now",
".",
"month",
",",
"15",
",",
"0",
",",
"0",
")",
"time_format",
"=",
"'%Y%m%d %H%M%S'",
"feed_item_operations",
"=",
"[",
"CreateFeedItemAddOperation",
"(",
"'Mars'",
",",
"'$1234.56'",
",",
"mars_date",
".",
"strftime",
"(",
"time_format",
")",
",",
"ad_customizer_feed",
")",
",",
"CreateFeedItemAddOperation",
"(",
"'Venus'",
",",
"'$1450.00'",
",",
"venus_date",
".",
"strftime",
"(",
"time_format",
")",
",",
"ad_customizer_feed",
")",
"]",
"response",
"=",
"feed_item_service",
".",
"mutate",
"(",
"feed_item_operations",
")",
"if",
"'value'",
"in",
"response",
":",
"for",
"feed_item",
"in",
"response",
"[",
"'value'",
"]",
":",
"print",
"'Added FeedItem with ID %d.'",
"%",
"feed_item",
"[",
"'feedItemId'",
"]",
"else",
":",
"raise",
"errors",
".",
"GoogleAdsError",
"(",
"'No FeedItems were added.'",
")",
"for",
"feed_item",
",",
"adgroup_id",
"in",
"zip",
"(",
"response",
"[",
"'value'",
"]",
",",
"adgroup_ids",
")",
":",
"RestrictFeedItemToAdGroup",
"(",
"client",
",",
"feed_item",
",",
"adgroup_id",
")"
] |
Visits a node updating workflow seen and fringe ..
|
def _visit_nodes ( self , node_id , dist , fringe , check_cutoff , no_call = False , * * kw ) : # Namespace shortcuts. wf_rm_edge , wf_has_edge = self . _wf_remove_edge , self . workflow . has_edge edge_weight , nodes = self . _edge_length , self . nodes self . dist [ node_id ] = dist # Set minimum dist. self . _visited . add ( node_id ) # Update visited nodes. if not self . _set_node_output ( node_id , no_call , * * kw ) : # Set output. # Some error occurs or inputs are not in the function domain. return True if self . check_targets ( node_id ) : # Check if the targets are satisfied. return False # Stop loop. for w , e_data in self . dmap [ node_id ] . items ( ) : if not wf_has_edge ( node_id , w ) : # Check wildcard option. continue node = nodes [ w ] # Get node attributes. vw_d = dist + edge_weight ( e_data , node ) # Evaluate dist. if check_cutoff ( vw_d ) : # Check the cutoff limit. wf_rm_edge ( node_id , w ) # Remove edge that cannot be see. continue if node [ 'type' ] == 'dispatcher' : self . _set_sub_dsp_node_input ( node_id , w , fringe , check_cutoff , no_call , vw_d ) else : # See the node. self . _see_node ( w , fringe , vw_d ) return True
| 4,744
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L912-L976
|
[
"def",
"static_cdn_url",
"(",
"request",
")",
":",
"cdn_url",
",",
"ssl_url",
"=",
"_get_container_urls",
"(",
"CumulusStaticStorage",
"(",
")",
")",
"static_url",
"=",
"settings",
".",
"STATIC_URL",
"return",
"{",
"\"STATIC_URL\"",
":",
"cdn_url",
"+",
"static_url",
",",
"\"STATIC_SSL_URL\"",
":",
"ssl_url",
"+",
"static_url",
",",
"\"LOCAL_STATIC_URL\"",
":",
"static_url",
",",
"}"
] |
See a node updating seen and fringe .
|
def _see_node ( self , node_id , fringe , dist , w_wait_in = 0 ) : # Namespace shortcuts. seen , dists = self . seen , self . dist wait_in = self . nodes [ node_id ] [ 'wait_inputs' ] # Wait inputs flag. self . _update_meeting ( node_id , dist ) # Update view distance. # Check if inputs are satisfied. if self . check_wait_in ( wait_in , node_id ) : pass # Pass the node elif node_id in dists : # The node w already estimated. if dist < dists [ node_id ] : # Error for negative paths. raise DispatcherError ( 'Contradictory paths found: ' 'negative weights?' , sol = self ) elif node_id not in seen or dist < seen [ node_id ] : # Check min dist. seen [ node_id ] = dist # Update dist. index = self . nodes [ node_id ] [ 'index' ] # Node index. # Virtual distance. vd = ( w_wait_in + int ( wait_in ) , node_id , self . index + index ) # Add to heapq. heapq . heappush ( fringe , ( dist , vd , ( node_id , self ) ) ) return True # The node is visible. return False
| 4,745
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L978-L1030
|
[
"def",
"validate_instance_dbname",
"(",
"self",
",",
"dbname",
")",
":",
"# 1-64 alphanumeric characters, cannot be a reserved MySQL word",
"if",
"re",
".",
"match",
"(",
"'[\\w-]+$'",
",",
"dbname",
")",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"dbname",
")",
"<=",
"41",
"and",
"len",
"(",
"dbname",
")",
">=",
"1",
":",
"if",
"dbname",
".",
"lower",
"(",
")",
"not",
"in",
"MYSQL_RESERVED_WORDS",
":",
"return",
"True",
"return",
"'*** Error: Database names must be 1-64 alphanumeric characters,\\\n cannot be a reserved MySQL word.'"
] |
Removes unused function and sub - dispatcher nodes .
|
def _remove_unused_nodes ( self ) : # Namespace shortcuts. nodes , wf_remove_node = self . nodes , self . workflow . remove_node add_visited , succ = self . _visited . add , self . workflow . succ # Remove unused function and sub-dispatcher nodes. for n in ( set ( self . _wf_pred ) - set ( self . _visited ) ) : node_type = nodes [ n ] [ 'type' ] # Node type. if node_type == 'data' : continue # Skip data node. if node_type == 'dispatcher' and succ [ n ] : add_visited ( n ) # Add to visited nodes. i = self . index + nodes [ n ] [ 'index' ] self . sub_sol [ i ] . _remove_unused_nodes ( ) continue # Skip sub-dispatcher node with outputs. wf_remove_node ( n )
| 4,746
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1032-L1054
|
[
"def",
"format_rst",
"(",
"self",
")",
":",
"res",
"=",
"''",
"num_cols",
"=",
"len",
"(",
"self",
".",
"header",
")",
"col_width",
"=",
"25",
"for",
"_",
"in",
"range",
"(",
"num_cols",
")",
":",
"res",
"+=",
"''",
".",
"join",
"(",
"[",
"'='",
"for",
"_",
"in",
"range",
"(",
"col_width",
"-",
"1",
")",
"]",
")",
"+",
"' '",
"res",
"+=",
"'\\n'",
"for",
"c",
"in",
"self",
".",
"header",
":",
"res",
"+=",
"c",
".",
"ljust",
"(",
"col_width",
")",
"res",
"+=",
"'\\n'",
"for",
"_",
"in",
"range",
"(",
"num_cols",
")",
":",
"res",
"+=",
"''",
".",
"join",
"(",
"[",
"'='",
"for",
"_",
"in",
"range",
"(",
"col_width",
"-",
"1",
")",
"]",
")",
"+",
"' '",
"res",
"+=",
"'\\n'",
"for",
"row",
"in",
"self",
".",
"arr",
":",
"for",
"c",
"in",
"row",
":",
"res",
"+=",
"self",
".",
"force_to_string",
"(",
"c",
")",
".",
"ljust",
"(",
"col_width",
")",
"res",
"+=",
"'\\n'",
"for",
"_",
"in",
"range",
"(",
"num_cols",
")",
":",
"res",
"+=",
"''",
".",
"join",
"(",
"[",
"'='",
"for",
"_",
"in",
"range",
"(",
"col_width",
"-",
"1",
")",
"]",
")",
"+",
"' '",
"res",
"+=",
"'\\n'",
"return",
"res"
] |
Initialize the dispatcher as sub - dispatcher and update the fringe .
|
def _init_sub_dsp ( self , dsp , fringe , outputs , no_call , initial_dist , index , full_name ) : # Initialize as sub-dispatcher. sol = self . __class__ ( dsp , { } , outputs , False , None , None , no_call , False , wait_in = self . _wait_in . get ( dsp , None ) , index = self . index + index , full_name = full_name ) sol . sub_sol = self . sub_sol for f in sol . fringe : # Update the fringe. item = ( initial_dist + f [ 0 ] , ( 2 , ) + f [ 1 ] [ 1 : ] , f [ - 1 ] ) heapq . heappush ( fringe , item ) return sol
| 4,747
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1056-L1087
|
[
"def",
"from_millis",
"(",
"cls",
",",
"timeout_ms",
")",
":",
"if",
"hasattr",
"(",
"timeout_ms",
",",
"'has_expired'",
")",
":",
"return",
"timeout_ms",
"if",
"timeout_ms",
"is",
"None",
":",
"return",
"cls",
"(",
"None",
")",
"return",
"cls",
"(",
"timeout_ms",
"/",
"1000.0",
")"
] |
Initializes the sub - dispatcher and set its inputs .
|
def _set_sub_dsp_node_input ( self , node_id , dsp_id , fringe , check_cutoff , no_call , initial_dist ) : # Namespace shortcuts. node = self . nodes [ dsp_id ] dsp , pred = node [ 'function' ] , self . _wf_pred [ dsp_id ] distances , sub_sol = self . dist , self . sub_sol iv_nodes = [ node_id ] # Nodes do be added as initial values. self . _meet [ dsp_id ] = initial_dist # Set view distance. # Check if inputs are satisfied. if self . check_wait_in ( node [ 'wait_inputs' ] , dsp_id ) : return False # Pass the node if dsp_id not in distances : kw = { } dom = self . _check_sub_dsp_domain ( dsp_id , node , pred , kw ) if dom is True : iv_nodes = pred # Args respect the domain. elif dom is False : return False # Initialize the sub-dispatcher. sub_sol [ self . index + node [ 'index' ] ] = sol = self . _init_sub_dsp ( dsp , fringe , node [ 'outputs' ] , no_call , initial_dist , node [ 'index' ] , self . full_name + ( dsp_id , ) ) self . workflow . add_node ( dsp_id , solution = sol , * * kw ) distances [ dsp_id ] = initial_dist # Update min distance. else : sol = sub_sol [ self . index + node [ 'index' ] ] for n_id in iv_nodes : # Namespace shortcuts. val = pred [ n_id ] for n in stlp ( node [ 'inputs' ] [ n_id ] ) : # Add initial value to the sub-dispatcher. sol . _add_initial_value ( n , val , initial_dist , fringe , check_cutoff , no_call ) return True
| 4,748
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1142-L1218
|
[
"def",
"op_token",
"(",
"self",
",",
"display_name",
",",
"opt",
")",
":",
"args",
"=",
"{",
"'lease'",
":",
"opt",
".",
"lease",
",",
"'display_name'",
":",
"display_name",
",",
"'meta'",
":",
"token_meta",
"(",
"opt",
")",
"}",
"try",
":",
"token",
"=",
"self",
".",
"create_token",
"(",
"*",
"*",
"args",
")",
"except",
"(",
"hvac",
".",
"exceptions",
".",
"InvalidRequest",
",",
"hvac",
".",
"exceptions",
".",
"Forbidden",
")",
"as",
"vault_exception",
":",
"if",
"vault_exception",
".",
"errors",
"[",
"0",
"]",
"==",
"'permission denied'",
":",
"emsg",
"=",
"\"Permission denied creating operational token\"",
"raise",
"aomi",
".",
"exceptions",
".",
"AomiCredentials",
"(",
"emsg",
")",
"else",
":",
"raise",
"LOG",
".",
"debug",
"(",
"\"Created operational token with lease of %s\"",
",",
"opt",
".",
"lease",
")",
"return",
"token",
"[",
"'auth'",
"]",
"[",
"'client_token'",
"]"
] |
Handles the error messages .
|
def _warning ( self , msg , node_id , ex , * args , * * kwargs ) : raises = self . raises ( ex ) if callable ( self . raises ) else self . raises if raises and isinstance ( ex , DispatcherError ) : ex . update ( self ) raise ex self . _errors [ node_id ] = msg % ( ( node_id , ex ) + args ) node_id = '/' . join ( self . full_name + ( node_id , ) ) if raises : raise DispatcherError ( msg , node_id , ex , * args , sol = self , * * kwargs ) else : kwargs [ 'exc_info' ] = kwargs . get ( 'exc_info' , 1 ) log . error ( msg , node_id , ex , * args , * * kwargs )
| 4,749
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1220-L1241
|
[
"def",
"OnAdjustVolume",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"volume",
"=",
"self",
".",
"player",
".",
"audio_get_volume",
"(",
")",
"if",
"event",
".",
"GetWheelRotation",
"(",
")",
"<",
"0",
":",
"self",
".",
"volume",
"=",
"max",
"(",
"0",
",",
"self",
".",
"volume",
"-",
"10",
")",
"elif",
"event",
".",
"GetWheelRotation",
"(",
")",
">",
"0",
":",
"self",
".",
"volume",
"=",
"min",
"(",
"200",
",",
"self",
".",
"volume",
"+",
"10",
")",
"self",
".",
"player",
".",
"audio_set_volume",
"(",
"self",
".",
"volume",
")"
] |
Grid Stream File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Keywords KEYWORDS = ( 'STREAMCELLS' , 'CELLIJ' ) # Parse file into chunks associated with keywords/cards with open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) # Parse chunks associated with each key for key , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : # Cases if key == 'STREAMCELLS' : # PIPECELLS Handler schunk = chunk [ 0 ] . strip ( ) . split ( ) self . streamCells = schunk [ 1 ] elif key == 'CELLIJ' : # CELLIJ Handler # Parse CELLIJ Chunk result = self . _cellChunk ( chunk ) # Create GSSHAPY object self . _createGsshaPyObjects ( result )
| 4,750
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gst.py#L60-L92
|
[
"async",
"def",
"description",
"(",
"self",
")",
":",
"resp",
"=",
"await",
"self",
".",
"_call_web",
"(",
"f'nation={self.id}'",
")",
"return",
"html",
".",
"unescape",
"(",
"re",
".",
"search",
"(",
"'<div class=\"nationsummary\">(.+?)<p class=\"nationranktext\">'",
",",
"resp",
".",
"text",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
".",
"group",
"(",
"1",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'</p>'",
",",
"''",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\n\\n'",
")",
".",
"strip",
"(",
")",
")"
] |
Grid Stream File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Write lines openFile . write ( 'GRIDSTREAMFILE\n' ) openFile . write ( 'STREAMCELLS %s\n' % self . streamCells ) for cell in self . gridStreamCells : openFile . write ( 'CELLIJ %s %s\n' % ( cell . cellI , cell . cellJ ) ) openFile . write ( 'NUMNODES %s\n' % cell . numNodes ) for node in cell . gridStreamNodes : openFile . write ( 'LINKNODE %s %s %.6f\n' % ( node . linkNumber , node . nodeNumber , node . nodePercentGrid ) )
| 4,751
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gst.py#L95-L111
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Create GSSHAPY PipeGridCell and PipeGridNode Objects Method
|
def _createGsshaPyObjects ( self , cell ) : # Initialize GSSHAPY PipeGridCell object gridCell = GridStreamCell ( cellI = cell [ 'i' ] , cellJ = cell [ 'j' ] , numNodes = cell [ 'numNodes' ] ) # Associate GridStreamCell with GridStreamFile gridCell . gridStreamFile = self for linkNode in cell [ 'linkNodes' ] : # Create GSSHAPY GridStreamNode object gridNode = GridStreamNode ( linkNumber = linkNode [ 'linkNumber' ] , nodeNumber = linkNode [ 'nodeNumber' ] , nodePercentGrid = linkNode [ 'percent' ] ) # Associate GridStreamNode with GridStreamCell gridNode . gridStreamCell = gridCell
| 4,752
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gst.py#L113-L132
|
[
"def",
"update",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_md",
".",
"update",
"(",
"data",
")",
"bufpos",
"=",
"self",
".",
"_nbytes",
"&",
"63",
"self",
".",
"_nbytes",
"+=",
"len",
"(",
"data",
")",
"if",
"self",
".",
"_rarbug",
"and",
"len",
"(",
"data",
")",
">",
"64",
":",
"dpos",
"=",
"self",
".",
"block_size",
"-",
"bufpos",
"while",
"dpos",
"+",
"self",
".",
"block_size",
"<=",
"len",
"(",
"data",
")",
":",
"self",
".",
"_corrupt",
"(",
"data",
",",
"dpos",
")",
"dpos",
"+=",
"self",
".",
"block_size"
] |
Index Map Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Open file and read plain text into text field with open ( path , 'r' ) as f : self . rasterText = f . read ( ) # Retrieve metadata from header lines = self . rasterText . split ( '\n' ) for line in lines [ 0 : 6 ] : spline = line . split ( ) if 'north' in spline [ 0 ] . lower ( ) : self . north = float ( spline [ 1 ] ) elif 'south' in spline [ 0 ] . lower ( ) : self . south = float ( spline [ 1 ] ) elif 'east' in spline [ 0 ] . lower ( ) : self . east = float ( spline [ 1 ] ) elif 'west' in spline [ 0 ] . lower ( ) : self . west = float ( spline [ 1 ] ) elif 'rows' in spline [ 0 ] . lower ( ) : self . rows = int ( spline [ 1 ] ) elif 'cols' in spline [ 0 ] . lower ( ) : self . columns = int ( spline [ 1 ] ) if spatial : # Get well known binary from the raster file using the MapKit RasterLoader wkbRaster = RasterLoader . grassAsciiRasterToWKB ( session = session , grassRasterPath = path , srid = str ( spatialReferenceID ) , noData = '-1' ) self . raster = wkbRaster self . srid = spatialReferenceID # Assign other properties self . filename = filename
| 4,753
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/idx.py#L96-L135
|
[
"def",
"syzygyJD",
"(",
"jd",
")",
":",
"sun",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"SUN",
",",
"jd",
")",
"moon",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"MOON",
",",
"jd",
")",
"dist",
"=",
"angle",
".",
"distance",
"(",
"sun",
",",
"moon",
")",
"# Offset represents the Syzygy type. ",
"# Zero is conjunction and 180 is opposition.",
"offset",
"=",
"180",
"if",
"(",
"dist",
">=",
"180",
")",
"else",
"0",
"while",
"abs",
"(",
"dist",
")",
">",
"MAX_ERROR",
":",
"jd",
"=",
"jd",
"-",
"dist",
"/",
"13.1833",
"# Moon mean daily motion",
"sun",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"SUN",
",",
"jd",
")",
"moon",
"=",
"swe",
".",
"sweObjectLon",
"(",
"const",
".",
"MOON",
",",
"jd",
")",
"dist",
"=",
"angle",
".",
"closestdistance",
"(",
"sun",
"-",
"offset",
",",
"moon",
")",
"return",
"jd"
] |
Index Map Write to File Method
|
def write ( self , directory , name = None , session = None , replaceParamFile = None ) : # Initiate file if name != None : filename = '%s.%s' % ( name , self . fileExtension ) filePath = os . path . join ( directory , filename ) else : filePath = os . path . join ( directory , self . filename ) # If the raster field is not empty, write from this field if type ( self . raster ) != type ( None ) : # Configure RasterConverter converter = RasterConverter ( session ) # Use MapKit RasterConverter to retrieve the raster as a GRASS ASCII Grid grassAsciiGrid = converter . getAsGrassAsciiRaster ( rasterFieldName = 'raster' , tableName = self . __tablename__ , rasterIdFieldName = 'id' , rasterId = self . id ) # Write to file with open ( filePath , 'w' ) as mapFile : mapFile . write ( grassAsciiGrid ) else : if self . rasterText is not None : # Open file and write, raster_text only with open ( filePath , 'w' ) as mapFile : mapFile . write ( self . rasterText )
| 4,754
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/idx.py#L137-L168
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Grid Pipe File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Write Lines openFile . write ( 'GRIDPIPEFILE\n' ) openFile . write ( 'PIPECELLS %s\n' % self . pipeCells ) for cell in self . gridPipeCells : openFile . write ( 'CELLIJ %s %s\n' % ( cell . cellI , cell . cellJ ) ) openFile . write ( 'NUMPIPES %s\n' % cell . numPipes ) for node in cell . gridPipeNodes : openFile . write ( 'SPIPE %s %s %.6f\n' % ( node . linkNumber , node . nodeNumber , node . fractPipeLength ) )
| 4,755
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gpi.py#L94-L110
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Create GSSHAPY GridPipeCell and GridPipeNode Objects Method
|
def _createGsshaPyObjects ( self , cell ) : # Initialize GSSHAPY GridPipeCell object gridCell = GridPipeCell ( cellI = cell [ 'i' ] , cellJ = cell [ 'j' ] , numPipes = cell [ 'numPipes' ] ) # Associate GridPipeCell with GridPipeFile gridCell . gridPipeFile = self for spipe in cell [ 'spipes' ] : # Create GSSHAPY GridPipeNode object gridNode = GridPipeNode ( linkNumber = spipe [ 'linkNumber' ] , nodeNumber = spipe [ 'nodeNumber' ] , fractPipeLength = spipe [ 'fraction' ] ) # Associate GridPipeNode with GridPipeCell gridNode . gridPipeCell = gridCell
| 4,756
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gpi.py#L112-L131
|
[
"def",
"permutation_entropy",
"(",
"x",
",",
"n",
",",
"tau",
")",
":",
"PeSeq",
"=",
"[",
"]",
"Em",
"=",
"embed_seq",
"(",
"x",
",",
"tau",
",",
"n",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"Em",
")",
")",
":",
"r",
"=",
"[",
"]",
"z",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"Em",
"[",
"i",
"]",
")",
")",
":",
"z",
".",
"append",
"(",
"Em",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"Em",
"[",
"i",
"]",
")",
")",
":",
"z",
".",
"sort",
"(",
")",
"r",
".",
"append",
"(",
"z",
".",
"index",
"(",
"Em",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"z",
"[",
"z",
".",
"index",
"(",
"Em",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"]",
"=",
"-",
"1",
"PeSeq",
".",
"append",
"(",
"r",
")",
"RankMat",
"=",
"[",
"]",
"while",
"len",
"(",
"PeSeq",
")",
">",
"0",
":",
"RankMat",
".",
"append",
"(",
"PeSeq",
".",
"count",
"(",
"PeSeq",
"[",
"0",
"]",
")",
")",
"x",
"=",
"PeSeq",
"[",
"0",
"]",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"PeSeq",
".",
"count",
"(",
"PeSeq",
"[",
"0",
"]",
")",
")",
":",
"PeSeq",
".",
"pop",
"(",
"PeSeq",
".",
"index",
"(",
"x",
")",
")",
"RankMat",
"=",
"numpy",
".",
"array",
"(",
"RankMat",
")",
"RankMat",
"=",
"numpy",
".",
"true_divide",
"(",
"RankMat",
",",
"RankMat",
".",
"sum",
"(",
")",
")",
"EntropyMat",
"=",
"numpy",
".",
"multiply",
"(",
"numpy",
".",
"log2",
"(",
"RankMat",
")",
",",
"RankMat",
")",
"PE",
"=",
"-",
"1",
"*",
"EntropyMat",
".",
"sum",
"(",
")",
"return",
"PE"
] |
Parse CELLIJ Chunk Method
|
def _cellChunk ( self , lines ) : KEYWORDS = ( 'CELLIJ' , 'NUMPIPES' , 'SPIPE' ) result = { 'i' : None , 'j' : None , 'numPipes' : None , 'spipes' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) # Parse chunks associated with each key for card , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) # Cases if card == 'CELLIJ' : # CELLIJ handler result [ 'i' ] = schunk [ 1 ] result [ 'j' ] = schunk [ 2 ] elif card == 'NUMPIPES' : # NUMPIPES handler result [ 'numPipes' ] = schunk [ 1 ] elif card == 'SPIPE' : # SPIPE handler pipe = { 'linkNumber' : schunk [ 1 ] , 'nodeNumber' : schunk [ 2 ] , 'fraction' : schunk [ 3 ] } result [ 'spipes' ] . append ( pipe ) return result
| 4,757
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gpi.py#L133-L172
|
[
"def",
"tree",
"(",
"path",
",",
"load_path",
"=",
"None",
")",
":",
"load_path",
"=",
"_check_load_paths",
"(",
"load_path",
")",
"aug",
"=",
"_Augeas",
"(",
"loadpath",
"=",
"load_path",
")",
"path",
"=",
"path",
".",
"rstrip",
"(",
"'/'",
")",
"+",
"'/'",
"match_path",
"=",
"path",
"return",
"dict",
"(",
"[",
"i",
"for",
"i",
"in",
"_recurmatch",
"(",
"match_path",
",",
"aug",
")",
"]",
")"
] |
Replace Param File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Open file and parse into a data structure with open ( path , 'r' ) as f : for line in f : sline = line . strip ( ) . split ( ) if len ( sline ) == 1 : self . numParameters = sline [ 0 ] else : # Create GSSHAPY TargetParameter object target = TargetParameter ( targetVariable = sline [ 0 ] , varFormat = sline [ 1 ] ) # Associate TargetParameter with ReplaceParamFile target . replaceParamFile = self
| 4,758
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L55-L74
|
[
"async",
"def",
"description",
"(",
"self",
")",
":",
"resp",
"=",
"await",
"self",
".",
"_call_web",
"(",
"f'nation={self.id}'",
")",
"return",
"html",
".",
"unescape",
"(",
"re",
".",
"search",
"(",
"'<div class=\"nationsummary\">(.+?)<p class=\"nationranktext\">'",
",",
"resp",
".",
"text",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
".",
"group",
"(",
"1",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'</p>'",
",",
"''",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\n\\n'",
")",
".",
"strip",
"(",
")",
")"
] |
Replace Param File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Retrieve TargetParameter objects targets = self . targetParameters # Write lines openFile . write ( '%s\n' % self . numParameters ) for target in targets : openFile . write ( '%s %s\n' % ( target . targetVariable , target . varFormat ) )
| 4,759
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L76-L87
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Replace Val File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Open file and parse into a data structure with open ( path , 'r' ) as f : for line in f : valLine = ReplaceValLine ( ) valLine . contents = line valLine . replaceValFile = self
| 4,760
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L147-L159
|
[
"async",
"def",
"description",
"(",
"self",
")",
":",
"resp",
"=",
"await",
"self",
".",
"_call_web",
"(",
"f'nation={self.id}'",
")",
"return",
"html",
".",
"unescape",
"(",
"re",
".",
"search",
"(",
"'<div class=\"nationsummary\">(.+?)<p class=\"nationranktext\">'",
",",
"resp",
".",
"text",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
".",
"group",
"(",
"1",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'</p>'",
",",
"''",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\n\\n'",
")",
".",
"strip",
"(",
")",
")"
] |
Replace Val File Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Write lines for line in self . lines : openFile . write ( line . contents )
| 4,761
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L161-L167
|
[
"def",
"devices",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw",
"# from Android system/core/adb/transport.c statename()",
"re_device_info",
"=",
"re",
".",
"compile",
"(",
"r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'",
")",
"devices",
"=",
"[",
"]",
"lines",
"=",
"self",
".",
"command_output",
"(",
"[",
"\"devices\"",
",",
"\"-l\"",
"]",
",",
"timeout",
"=",
"timeout",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"'List of devices attached '",
":",
"continue",
"match",
"=",
"re_device_info",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"device",
"=",
"{",
"'device_serial'",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"'state'",
":",
"match",
".",
"group",
"(",
"2",
")",
"}",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"2",
")",
":",
"]",
".",
"strip",
"(",
")",
"if",
"remainder",
":",
"try",
":",
"device",
".",
"update",
"(",
"dict",
"(",
"[",
"j",
".",
"split",
"(",
"':'",
")",
"for",
"j",
"in",
"remainder",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"'devices: Unable to parse '",
"'remainder for device %s'",
"%",
"line",
")",
"devices",
".",
"append",
"(",
"device",
")",
"return",
"devices"
] |
Use this function in emit data into the store .
|
def emit ( self , data_frame ) : if self . result is not None : raise MultipleEmitsError ( ) data_frame . columns = [ self . prefix + '__' + c for c in data_frame . columns ] self . result = data_frame
| 4,762
|
https://github.com/bayesimpact/fex/blob/2d9b4e9be2bf98847a36055b907411fd5557eb77/fex/feature_extractor.py#L41-L50
|
[
"def",
"get_placement_solver",
"(",
"service_instance",
")",
":",
"stub",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_new_service_instance_stub",
"(",
"service_instance",
",",
"ns",
"=",
"'pbm/2.0'",
",",
"path",
"=",
"'/pbm/sdk'",
")",
"pbm_si",
"=",
"pbm",
".",
"ServiceInstance",
"(",
"'ServiceInstance'",
",",
"stub",
")",
"try",
":",
"profile_manager",
"=",
"pbm_si",
".",
"RetrieveContent",
"(",
")",
".",
"placementSolver",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{0}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")",
"return",
"profile_manager"
] |
Returns a list with each script that is triggered .
|
def trigger_hats ( self , command , arg = None , callback = None ) : threads = [ ] for scriptable in [ self . project . stage ] + self . project . sprites : threads += self . trigger_scriptable_hats ( scriptable , command , arg , callback ) return threads
| 4,763
|
https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L115-L121
|
[
"def",
"render_category_averages",
"(",
"obj",
",",
"normalize_to",
"=",
"100",
")",
":",
"context",
"=",
"{",
"'reviewed_item'",
":",
"obj",
"}",
"ctype",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
"reviews",
"=",
"models",
".",
"Review",
".",
"objects",
".",
"filter",
"(",
"content_type",
"=",
"ctype",
",",
"object_id",
"=",
"obj",
".",
"id",
")",
"category_averages",
"=",
"{",
"}",
"for",
"review",
"in",
"reviews",
":",
"review_category_averages",
"=",
"review",
".",
"get_category_averages",
"(",
"normalize_to",
")",
"if",
"review_category_averages",
":",
"for",
"category",
",",
"average",
"in",
"review_category_averages",
".",
"items",
"(",
")",
":",
"if",
"category",
"not",
"in",
"category_averages",
":",
"category_averages",
"[",
"category",
"]",
"=",
"review_category_averages",
"[",
"category",
"]",
"else",
":",
"category_averages",
"[",
"category",
"]",
"+=",
"review_category_averages",
"[",
"category",
"]",
"if",
"reviews",
"and",
"category_averages",
":",
"for",
"category",
",",
"average",
"in",
"category_averages",
".",
"items",
"(",
")",
":",
"category_averages",
"[",
"category",
"]",
"=",
"category_averages",
"[",
"category",
"]",
"/",
"models",
".",
"Rating",
".",
"objects",
".",
"filter",
"(",
"category",
"=",
"category",
",",
"value__isnull",
"=",
"False",
",",
"review__content_type",
"=",
"ctype",
",",
"review__object_id",
"=",
"obj",
".",
"id",
")",
".",
"exclude",
"(",
"value",
"=",
"''",
")",
".",
"count",
"(",
")",
"else",
":",
"category_averages",
"=",
"{",
"}",
"for",
"category",
"in",
"models",
".",
"RatingCategory",
".",
"objects",
".",
"filter",
"(",
"counts_for_average",
"=",
"True",
")",
":",
"category_averages",
"[",
"category",
"]",
"=",
"0.0",
"context",
".",
"update",
"(",
"{",
"'category_averages'",
":",
"category_averages",
"}",
")",
"return",
"context"
] |
Run the script and add it to the list of threads .
|
def push_script ( self , scriptable , script , callback = None ) : if script in self . threads : self . threads [ script ] . finish ( ) thread = Thread ( self . run_script ( scriptable , script ) , scriptable , callback ) self . new_threads [ script ] = thread return thread
| 4,764
|
https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L134-L141
|
[
"def",
"get_columns",
"(",
"self",
",",
"connection",
",",
"table_name",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"cols",
"=",
"self",
".",
"_get_redshift_columns",
"(",
"connection",
",",
"table_name",
",",
"schema",
",",
"*",
"*",
"kw",
")",
"if",
"not",
"self",
".",
"_domains",
":",
"self",
".",
"_domains",
"=",
"self",
".",
"_load_domains",
"(",
"connection",
")",
"domains",
"=",
"self",
".",
"_domains",
"columns",
"=",
"[",
"]",
"for",
"col",
"in",
"cols",
":",
"column_info",
"=",
"self",
".",
"_get_column_info",
"(",
"name",
"=",
"col",
".",
"name",
",",
"format_type",
"=",
"col",
".",
"format_type",
",",
"default",
"=",
"col",
".",
"default",
",",
"notnull",
"=",
"col",
".",
"notnull",
",",
"domains",
"=",
"domains",
",",
"enums",
"=",
"[",
"]",
",",
"schema",
"=",
"col",
".",
"schema",
",",
"encode",
"=",
"col",
".",
"encode",
")",
"columns",
".",
"append",
"(",
"column_info",
")",
"return",
"columns"
] |
Execute one frame of the interpreter .
|
def tick ( self , events ) : self . add_new_threads ( ) if self . drag_sprite : ( mx , my ) = self . screen . get_mouse_pos ( ) ( ox , oy ) = self . drag_offset new_position = ( mx + ox , my + oy ) if self . drag_sprite . position != new_position : self . has_dragged = True self . drag_sprite . position = new_position for event in events : if event . kind == "key_pressed" : assert event . value in kurt . Insert ( None , "key" ) . options ( ) self . trigger_hats ( "whenKeyPressed" , event . value ) elif event . kind == "mouse_down" : mouse_pos = self . screen . get_mouse_pos ( ) for sprite in reversed ( self . project . sprites ) : rect = bounds ( sprite ) if rect . collide_point ( mouse_pos ) : if self . screen . touching_mouse ( sprite ) : scriptable = sprite break else : scriptable = self . project . stage if scriptable . is_draggable : ( mx , my ) = self . screen . get_mouse_pos ( ) ( x , y ) = scriptable . position self . drag_offset = ( x - mx , y - my ) self . drag_sprite = scriptable self . has_dragged = False go_to_front ( scriptable ) else : self . trigger_scriptable_hats ( scriptable , "whenClicked" ) elif event . kind == "mouse_up" : if self . drag_sprite : if not self . has_dragged : self . trigger_scriptable_hats ( self . drag_sprite , "whenClicked" ) self . drag_sprite = None remove_threads = [ ] while 1 : for ( script , thread ) in self . threads . items ( ) : modified = False for event in thread . tick ( ) : if event . kind == "stop" : if event . value == "all" : self . stop ( ) return elif event . value == "other scripts in sprite" : for ( script , other ) in self . threads . items ( ) : if other . scriptable == thread . scriptable : other . finish ( ) del self . threads [ script ] modified = True break else : thread . finish ( ) del self . threads [ script ] modified = True break else : # Pass to Screen yield event if modified : break else : break self . add_new_threads ( )
| 4,765
|
https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L147-L224
|
[
"def",
"get_capacity_grav",
"(",
"self",
",",
"min_voltage",
"=",
"None",
",",
"max_voltage",
"=",
"None",
",",
"use_overall_normalization",
"=",
"True",
")",
":",
"pairs_in_range",
"=",
"self",
".",
"_select_in_voltage_range",
"(",
"min_voltage",
",",
"max_voltage",
")",
"normalization_mass",
"=",
"self",
".",
"normalization_mass",
"if",
"use_overall_normalization",
"or",
"len",
"(",
"pairs_in_range",
")",
"==",
"0",
"else",
"pairs_in_range",
"[",
"-",
"1",
"]",
".",
"mass_discharge",
"return",
"sum",
"(",
"[",
"pair",
".",
"mAh",
"for",
"pair",
"in",
"pairs_in_range",
"]",
")",
"/",
"normalization_mass"
] |
Stop running threads .
|
def stop ( self ) : self . threads = { } self . new_threads = { } self . answer = "" self . ask_lock = False
| 4,766
|
https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L226-L231
|
[
"def",
"read_avro",
"(",
"file_path_or_buffer",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"file_path_or_buffer",
",",
"six",
".",
"string_types",
")",
":",
"with",
"open",
"(",
"file_path_or_buffer",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"__file_to_dataframe",
"(",
"f",
",",
"schema",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"__file_to_dataframe",
"(",
"file_path_or_buffer",
",",
"schema",
",",
"*",
"*",
"kwargs",
")"
] |
Expression evaluator .
|
def evaluate ( self , s , value , insert = None ) : assert not isinstance ( value , kurt . Script ) if insert and insert . unevaluated : return value if isinstance ( value , kurt . Block ) : if value . type . shape == "hat" : return [ ] if value . type not in self . COMMANDS : if getattr ( value . type , '_workaround' , None ) : value = value . type . _workaround ( value ) if not value : raise kurt . BlockNotSupported ( value . type ) else : raise kurt . BlockNotSupported ( value . type ) f = self . COMMANDS [ value . type ] args = [ self . evaluate ( s , arg , arg_insert ) for ( arg , arg_insert ) in zip ( list ( value . args ) , value . type . inserts ) ] value = f ( s , * args ) def flatten_generators ( gen ) : for item in gen : if inspect . isgenerator ( item ) : for x in flatten_generators ( item ) : yield x else : yield item if inspect . isgenerator ( value ) : value = flatten_generators ( value ) if value is None : value = [ ] if insert : if isinstance ( value , basestring ) : value = unicode ( value ) if insert . shape in ( "number" , "number-menu" , "string" ) : try : value = float ( value ) except ( TypeError , ValueError ) : if insert . shape == "number" : value = 0 if isinstance ( value , float ) and value == int ( value ) : value = int ( value ) if insert . kind in ( "spriteOrStage" , "spriteOrMouse" , "stageOrThis" , "spriteOnly" , "touching" ) : if value not in ( "mouse-pointer" , "edge" ) : value = ( self . project . stage if value == "Stage" else self . project . get_sprite ( value ) ) elif insert . kind == "var" : if value in s . variables : value = s . variables [ value ] else : value = s . project . variables [ value ] elif insert . kind == "list" : if value in s . lists : value = s . lists [ value ] else : value = s . project . lists [ value ] elif insert . kind == "sound" : for sound in s . sounds : if sound . name == value : value = sound break return value
| 4,767
|
https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L240-L320
|
[
"def",
"_add_dependency",
"(",
"self",
",",
"dependency",
",",
"var_name",
"=",
"None",
")",
":",
"if",
"var_name",
"is",
"None",
":",
"var_name",
"=",
"next",
"(",
"self",
".",
"temp_var_names",
")",
"# Don't add duplicate dependencies",
"if",
"(",
"dependency",
",",
"var_name",
")",
"not",
"in",
"self",
".",
"dependencies",
":",
"self",
".",
"dependencies",
".",
"append",
"(",
"(",
"dependency",
",",
"var_name",
")",
")",
"return",
"var_name"
] |
Name identifying this RabbitMQ cluster .
|
def get_cluster_name ( self ) : return self . _get ( url = self . url + '/api/cluster-name' , headers = self . headers , auth = self . auth )
| 4,768
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L16-L24
|
[
"def",
"__texUpdate",
"(",
"self",
",",
"frame",
")",
":",
"# Retrieve buffer from videosink",
"if",
"self",
".",
"texture_locked",
":",
"return",
"self",
".",
"buffer",
"=",
"frame",
"self",
".",
"texUpdated",
"=",
"True"
] |
An individual connection .
|
def get_connection ( self , name ) : return self . _api_get ( '/api/connections/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
| 4,769
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L93-L102
|
[
"def",
"setOverlayTransformTrackedDeviceRelative",
"(",
"self",
",",
"ulOverlayHandle",
",",
"unTrackedDevice",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"setOverlayTransformTrackedDeviceRelative",
"pmatTrackedDeviceToOverlayTransform",
"=",
"HmdMatrix34_t",
"(",
")",
"result",
"=",
"fn",
"(",
"ulOverlayHandle",
",",
"unTrackedDevice",
",",
"byref",
"(",
"pmatTrackedDeviceToOverlayTransform",
")",
")",
"return",
"result",
",",
"pmatTrackedDeviceToOverlayTransform"
] |
Closes an individual connection . Give an optional reason
|
def delete_connection ( self , name , reason = None ) : headers = { 'X-Reason' : reason } if reason else { } self . _api_delete ( '/api/connections/{0}' . format ( urllib . parse . quote_plus ( name ) ) , headers = headers , )
| 4,770
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L104-L121
|
[
"def",
"DetectExecutablePaths",
"(",
"source_values",
",",
"vars_map",
"=",
"None",
")",
":",
"detector",
"=",
"CreateWindowsRegistryExecutablePathsDetector",
"(",
"vars_map",
"=",
"vars_map",
")",
"for",
"source_value",
"in",
"source_values",
":",
"for",
"result",
"in",
"detector",
".",
"Detect",
"(",
"source_value",
")",
":",
"yield",
"result"
] |
List of all channels for a given connection .
|
def list_connection_channels ( self , name ) : return self . _api_get ( '/api/connections/{0}/channels' . format ( urllib . parse . quote_plus ( name ) ) )
| 4,771
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L123-L132
|
[
"def",
"get_metadata",
"(",
"self",
",",
"digest",
",",
"content",
",",
"mime_type",
")",
":",
"# XXX: ad-hoc for now, refactor later",
"if",
"mime_type",
".",
"startswith",
"(",
"\"image/\"",
")",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"BytesIO",
"(",
"content",
")",
")",
"ret",
"=",
"{",
"}",
"if",
"not",
"hasattr",
"(",
"img",
",",
"\"_getexif\"",
")",
":",
"return",
"{",
"}",
"info",
"=",
"img",
".",
"_getexif",
"(",
")",
"if",
"not",
"info",
":",
"return",
"{",
"}",
"for",
"tag",
",",
"value",
"in",
"info",
".",
"items",
"(",
")",
":",
"decoded",
"=",
"TAGS",
".",
"get",
"(",
"tag",
",",
"tag",
")",
"ret",
"[",
"\"EXIF:\"",
"+",
"str",
"(",
"decoded",
")",
"]",
"=",
"value",
"return",
"ret",
"else",
":",
"if",
"mime_type",
"!=",
"\"application/pdf\"",
":",
"content",
"=",
"self",
".",
"to_pdf",
"(",
"digest",
",",
"content",
",",
"mime_type",
")",
"with",
"make_temp_file",
"(",
"content",
")",
"as",
"in_fn",
":",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"pdfinfo\"",
",",
"in_fn",
"]",
")",
"except",
"OSError",
":",
"logger",
".",
"error",
"(",
"\"Conversion failed, probably pdfinfo is not installed\"",
")",
"raise",
"ret",
"=",
"{",
"}",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"b\"\\n\"",
")",
":",
"if",
"b\":\"",
"in",
"line",
":",
"key",
",",
"value",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"b\":\"",
",",
"1",
")",
"key",
"=",
"str",
"(",
"key",
")",
"ret",
"[",
"\"PDF:\"",
"+",
"key",
"]",
"=",
"str",
"(",
"value",
".",
"strip",
"(",
")",
",",
"errors",
"=",
"\"replace\"",
")",
"return",
"ret"
] |
Details about an individual channel .
|
def get_channel ( self , name ) : return self . _api_get ( '/api/channels/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
| 4,772
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L140-L149
|
[
"def",
"write_backup_state_to_json_file",
"(",
"self",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"state_file_path",
"=",
"self",
".",
"config",
"[",
"\"json_state_file_path\"",
"]",
"self",
".",
"state",
"[",
"\"walreceivers\"",
"]",
"=",
"{",
"key",
":",
"{",
"\"latest_activity\"",
":",
"value",
".",
"latest_activity",
",",
"\"running\"",
":",
"value",
".",
"running",
",",
"\"last_flushed_lsn\"",
":",
"value",
".",
"last_flushed_lsn",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"walreceivers",
".",
"items",
"(",
")",
"}",
"self",
".",
"state",
"[",
"\"pg_receivexlogs\"",
"]",
"=",
"{",
"key",
":",
"{",
"\"latest_activity\"",
":",
"value",
".",
"latest_activity",
",",
"\"running\"",
":",
"value",
".",
"running",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"receivexlogs",
".",
"items",
"(",
")",
"}",
"self",
".",
"state",
"[",
"\"pg_basebackups\"",
"]",
"=",
"{",
"key",
":",
"{",
"\"latest_activity\"",
":",
"value",
".",
"latest_activity",
",",
"\"running\"",
":",
"value",
".",
"running",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"basebackups",
".",
"items",
"(",
")",
"}",
"self",
".",
"state",
"[",
"\"compressors\"",
"]",
"=",
"[",
"compressor",
".",
"state",
"for",
"compressor",
"in",
"self",
".",
"compressors",
"]",
"self",
".",
"state",
"[",
"\"transfer_agents\"",
"]",
"=",
"[",
"ta",
".",
"state",
"for",
"ta",
"in",
"self",
".",
"transfer_agents",
"]",
"self",
".",
"state",
"[",
"\"queues\"",
"]",
"=",
"{",
"\"compression_queue\"",
":",
"self",
".",
"compression_queue",
".",
"qsize",
"(",
")",
",",
"\"transfer_queue\"",
":",
"self",
".",
"transfer_queue",
".",
"qsize",
"(",
")",
",",
"}",
"self",
".",
"log",
".",
"debug",
"(",
"\"Writing JSON state file to %r\"",
",",
"state_file_path",
")",
"write_json_file",
"(",
"state_file_path",
",",
"self",
".",
"state",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Wrote JSON state file to disk, took %.4fs\"",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")"
] |
A list of all consumers in a given virtual host .
|
def list_consumers_for_vhost ( self , vhost ) : return self . _api_get ( '/api/consumers/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
| 4,773
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L157-L166
|
[
"def",
"trace_integration",
"(",
"tracer",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"'Integrated module: {}'",
".",
"format",
"(",
"MODULE_NAME",
")",
")",
"# Wrap the httplib request function",
"request_func",
"=",
"getattr",
"(",
"httplib",
".",
"HTTPConnection",
",",
"HTTPLIB_REQUEST_FUNC",
")",
"wrapped_request",
"=",
"wrap_httplib_request",
"(",
"request_func",
")",
"setattr",
"(",
"httplib",
".",
"HTTPConnection",
",",
"request_func",
".",
"__name__",
",",
"wrapped_request",
")",
"# Wrap the httplib response function",
"response_func",
"=",
"getattr",
"(",
"httplib",
".",
"HTTPConnection",
",",
"HTTPLIB_RESPONSE_FUNC",
")",
"wrapped_response",
"=",
"wrap_httplib_response",
"(",
"response_func",
")",
"setattr",
"(",
"httplib",
".",
"HTTPConnection",
",",
"response_func",
".",
"__name__",
",",
"wrapped_response",
")"
] |
A list of all exchanges in a given virtual host .
|
def list_exchanges_for_vhost ( self , vhost ) : return self . _api_get ( '/api/exchanges/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
| 4,774
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L174-L183
|
[
"def",
"_wrap_result",
"(",
"data",
",",
"columns",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"parse_dates",
"=",
"None",
")",
":",
"frame",
"=",
"DataFrame",
".",
"from_records",
"(",
"data",
",",
"columns",
"=",
"columns",
",",
"coerce_float",
"=",
"coerce_float",
")",
"frame",
"=",
"_parse_date_columns",
"(",
"frame",
",",
"parse_dates",
")",
"if",
"index_col",
"is",
"not",
"None",
":",
"frame",
".",
"set_index",
"(",
"index_col",
",",
"inplace",
"=",
"True",
")",
"return",
"frame"
] |
An individual exchange
|
def get_exchange_for_vhost ( self , exchange , vhost ) : return self . _api_get ( '/api/exchanges/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( exchange ) ) )
| 4,775
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L185-L198
|
[
"def",
"to_volume",
"(",
"mesh",
",",
"file_name",
"=",
"None",
",",
"max_element",
"=",
"None",
",",
"mesher_id",
"=",
"1",
")",
":",
"# checks mesher selection",
"if",
"mesher_id",
"not",
"in",
"[",
"1",
",",
"4",
",",
"7",
",",
"10",
"]",
":",
"raise",
"ValueError",
"(",
"'unavilable mesher selected!'",
")",
"else",
":",
"mesher_id",
"=",
"int",
"(",
"mesher_id",
")",
"# set max element length to a best guess if not specified",
"if",
"max_element",
"is",
"None",
":",
"max_element",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"mesh",
".",
"area_faces",
")",
")",
"if",
"file_name",
"is",
"not",
"None",
":",
"# check extensions to make sure it is supported format",
"if",
"not",
"any",
"(",
"file_name",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"e",
")",
"for",
"e",
"in",
"[",
"'.bdf'",
",",
"'.msh'",
",",
"'.inp'",
",",
"'.diff'",
",",
"'.mesh'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), '",
"+",
"'Diffpack (*.diff) and Inria Medit (*.mesh) formats '",
"+",
"'are available!'",
")",
"# exports to disk for gmsh to read using a temp file",
"mesh_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.stl'",
",",
"delete",
"=",
"False",
")",
"mesh_file",
".",
"close",
"(",
")",
"mesh",
".",
"export",
"(",
"mesh_file",
".",
"name",
")",
"# starts Gmsh Python API script",
"gmsh",
".",
"initialize",
"(",
")",
"gmsh",
".",
"option",
".",
"setNumber",
"(",
"\"General.Terminal\"",
",",
"1",
")",
"gmsh",
".",
"model",
".",
"add",
"(",
"'Nastran_stl'",
")",
"gmsh",
".",
"merge",
"(",
"mesh_file",
".",
"name",
")",
"dimtag",
"=",
"gmsh",
".",
"model",
".",
"getEntities",
"(",
")",
"[",
"0",
"]",
"dim",
"=",
"dimtag",
"[",
"0",
"]",
"tag",
"=",
"dimtag",
"[",
"1",
"]",
"surf_loop",
"=",
"gmsh",
".",
"model",
".",
"geo",
".",
"addSurfaceLoop",
"(",
"[",
"tag",
"]",
")",
"gmsh",
".",
"model",
".",
"geo",
".",
"addVolume",
"(",
"[",
"surf_loop",
"]",
")",
"gmsh",
".",
"model",
".",
"geo",
".",
"synchronize",
"(",
")",
"# We can then generate a 3D mesh...",
"gmsh",
".",
"option",
".",
"setNumber",
"(",
"\"Mesh.Algorithm3D\"",
",",
"mesher_id",
")",
"gmsh",
".",
"option",
".",
"setNumber",
"(",
"\"Mesh.CharacteristicLengthMax\"",
",",
"max_element",
")",
"gmsh",
".",
"model",
".",
"mesh",
".",
"generate",
"(",
"3",
")",
"dimtag2",
"=",
"gmsh",
".",
"model",
".",
"getEntities",
"(",
")",
"[",
"1",
"]",
"dim2",
"=",
"dimtag2",
"[",
"0",
"]",
"tag2",
"=",
"dimtag2",
"[",
"1",
"]",
"p2",
"=",
"gmsh",
".",
"model",
".",
"addPhysicalGroup",
"(",
"dim2",
",",
"[",
"tag2",
"]",
")",
"gmsh",
".",
"model",
".",
"setPhysicalName",
"(",
"dim",
",",
"p2",
",",
"'Nastran_bdf'",
")",
"data",
"=",
"None",
"# if file name is None, return msh data using a tempfile",
"if",
"file_name",
"is",
"None",
":",
"out_data",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.msh'",
",",
"delete",
"=",
"False",
")",
"# windows gets mad if two processes try to open the same file",
"out_data",
".",
"close",
"(",
")",
"gmsh",
".",
"write",
"(",
"out_data",
".",
"name",
")",
"with",
"open",
"(",
"out_data",
".",
"name",
",",
"'rb'",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"else",
":",
"gmsh",
".",
"write",
"(",
"file_name",
")",
"# close up shop",
"gmsh",
".",
"finalize",
"(",
")",
"return",
"data"
] |
Delete an individual exchange . You can add the parameter if_unused = True . This prevents the delete from succeeding if the exchange is bound to a queue or as a source to another exchange .
|
def delete_exchange_for_vhost ( self , exchange , vhost , if_unused = False ) : self . _api_delete ( '/api/exchanges/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( exchange ) ) , params = { 'if-unused' : if_unused } , )
| 4,776
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L232-L254
|
[
"def",
"on_train_end",
"(",
"self",
",",
"logs",
")",
":",
"duration",
"=",
"timeit",
".",
"default_timer",
"(",
")",
"-",
"self",
".",
"train_start",
"print",
"(",
"'done, took {:.3f} seconds'",
".",
"format",
"(",
"duration",
")",
")"
] |
A list of all bindings in a given virtual host .
|
def list_bindings_for_vhost ( self , vhost ) : return self . _api_get ( '/api/bindings/{}' . format ( urllib . parse . quote_plus ( vhost ) ) )
| 4,777
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L262-L271
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"_initialize_run",
"(",
")",
"stimuli",
"=",
"self",
".",
"protocol_model",
".",
"allTests",
"(",
")",
"self",
".",
"acq_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_worker",
",",
"args",
"=",
"(",
"stimuli",
",",
")",
",",
")",
"# save the current calibration to data file doc ",
"if",
"self",
".",
"save_data",
":",
"info",
"=",
"{",
"'calibration_used'",
":",
"self",
".",
"calname",
",",
"'calibration_range'",
":",
"self",
".",
"cal_frange",
"}",
"self",
".",
"datafile",
".",
"set_metadata",
"(",
"self",
".",
"current_dataset_name",
",",
"info",
")",
"# save the start time and set last tick to expired, so first",
"# acquisition loop iteration executes immediately",
"self",
".",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"last_tick",
"=",
"self",
".",
"start_time",
"-",
"(",
"self",
".",
"interval",
"/",
"1000",
")",
"self",
".",
"acq_thread",
".",
"start",
"(",
")",
"return",
"self",
".",
"acq_thread"
] |
Details about an individual vhost .
|
def get_vhost ( self , name ) : return self . _api_get ( '/api/vhosts/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
| 4,778
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L279-L288
|
[
"def",
"wrap_conn",
"(",
"conn_func",
")",
":",
"def",
"call",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"conn",
"=",
"conn_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"cursor_func",
"=",
"getattr",
"(",
"conn",
",",
"CURSOR_WRAP_METHOD",
")",
"wrapped",
"=",
"wrap_cursor",
"(",
"cursor_func",
")",
"setattr",
"(",
"conn",
",",
"cursor_func",
".",
"__name__",
",",
"wrapped",
")",
"return",
"conn",
"except",
"Exception",
":",
"# pragma: NO COVER",
"logging",
".",
"warning",
"(",
"'Fail to wrap conn, mysql not traced.'",
")",
"return",
"conn_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"call"
] |
Create an individual vhost .
|
def create_vhost ( self , name , tracing = False ) : data = { 'tracing' : True } if tracing else { } self . _api_put ( '/api/vhosts/{0}' . format ( urllib . parse . quote_plus ( name ) ) , data = data , )
| 4,779
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L301-L315
|
[
"def",
"apply_crop_list",
"(",
"crop_list",
",",
"input_doc",
",",
"page_nums_to_crop",
",",
"already_cropped_by_this_program",
")",
":",
"if",
"args",
".",
"restore",
"and",
"not",
"already_cropped_by_this_program",
":",
"print",
"(",
"\"\\nWarning from pdfCropMargins: The Producer string indicates that\"",
"\"\\neither this document was not previously cropped by pdfCropMargins\"",
"\"\\nor else it was modified by another program after that. Trying the\"",
"\"\\nundo anyway...\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"args",
".",
"restore",
"and",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nRestoring the document to margins saved for each page in the ArtBox.\"",
")",
"if",
"args",
".",
"verbose",
"and",
"not",
"args",
".",
"restore",
":",
"print",
"(",
"\"\\nNew full page sizes after cropping, in PDF format (lbrt):\"",
")",
"# Copy over each page, after modifying the appropriate PDF boxes.",
"for",
"page_num",
"in",
"range",
"(",
"input_doc",
".",
"getNumPages",
"(",
")",
")",
":",
"curr_page",
"=",
"input_doc",
".",
"getPage",
"(",
"page_num",
")",
"# Restore any rotation which was originally on the page.",
"curr_page",
".",
"rotateClockwise",
"(",
"curr_page",
".",
"rotationAngle",
")",
"# Only do the restore from ArtBox if '--restore' option was selected.",
"if",
"args",
".",
"restore",
":",
"if",
"not",
"curr_page",
".",
"artBox",
":",
"print",
"(",
"\"\\nWarning from pdfCropMargins: Attempting to restore pages from\"",
"\"\\nthe ArtBox in each page, but page\"",
",",
"page_num",
",",
"\"has no readable\"",
"\"\\nArtBox. Leaving that page unchanged.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"curr_page",
".",
"mediaBox",
"=",
"curr_page",
".",
"artBox",
"curr_page",
".",
"cropBox",
"=",
"curr_page",
".",
"artBox",
"continue",
"# Do the save to ArtBox if that option is chosen and Producer is set.",
"if",
"not",
"args",
".",
"noundosave",
"and",
"not",
"already_cropped_by_this_program",
":",
"curr_page",
".",
"artBox",
"=",
"intersect_boxes",
"(",
"curr_page",
".",
"mediaBox",
",",
"curr_page",
".",
"cropBox",
")",
"# Reset the CropBox and MediaBox to their saved original values",
"# (which were set in getFullPageBox, in the curr_page object's namespace).",
"curr_page",
".",
"mediaBox",
"=",
"curr_page",
".",
"originalMediaBox",
"curr_page",
".",
"cropBox",
"=",
"curr_page",
".",
"originalCropBox",
"# Copy the original page without further mods if it wasn't in the range",
"# selected for cropping.",
"if",
"page_num",
"not",
"in",
"page_nums_to_crop",
":",
"continue",
"# Convert the computed \"box to crop to\" into a RectangleObject (for pyPdf).",
"new_cropped_box",
"=",
"RectangleObject",
"(",
"crop_list",
"[",
"page_num",
"]",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\t\"",
"+",
"str",
"(",
"page_num",
"+",
"1",
")",
"+",
"\"\\t\"",
",",
"new_cropped_box",
")",
"# page numbering from 1",
"if",
"not",
"args",
".",
"boxesToSet",
":",
"args",
".",
"boxesToSet",
"=",
"[",
"\"m\"",
",",
"\"c\"",
"]",
"# Now set any boxes which were selected to be set via the --boxesToSet option.",
"if",
"\"m\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"mediaBox",
"=",
"new_cropped_box",
"if",
"\"c\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"cropBox",
"=",
"new_cropped_box",
"if",
"\"t\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"trimBox",
"=",
"new_cropped_box",
"if",
"\"a\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"artBox",
"=",
"new_cropped_box",
"if",
"\"b\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"bleedBox",
"=",
"new_cropped_box",
"return"
] |
Details about an individual user .
|
def get_user ( self , name ) : return self . _api_get ( '/api/users/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
| 4,780
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L323-L332
|
[
"def",
"aux",
"(",
"self",
",",
"aux",
")",
":",
"if",
"aux",
"==",
"self",
".",
"_aux",
":",
"return",
"if",
"self",
".",
"_aux",
":",
"self",
".",
"_manager",
".",
"port_manager",
".",
"release_tcp_port",
"(",
"self",
".",
"_aux",
",",
"self",
".",
"_project",
")",
"self",
".",
"_aux",
"=",
"None",
"if",
"aux",
"is",
"not",
"None",
":",
"self",
".",
"_aux",
"=",
"self",
".",
"_manager",
".",
"port_manager",
".",
"reserve_tcp_port",
"(",
"aux",
",",
"self",
".",
"_project",
")",
"log",
".",
"info",
"(",
"\"{module}: '{name}' [{id}]: aux port set to {port}\"",
".",
"format",
"(",
"module",
"=",
"self",
".",
"manager",
".",
"module_name",
",",
"name",
"=",
"self",
".",
"name",
",",
"id",
"=",
"self",
".",
"id",
",",
"port",
"=",
"aux",
")",
")"
] |
A list of all permissions for a given user .
|
def list_user_permissions ( self , name ) : return self . _api_get ( '/api/users/{0}/permissions' . format ( urllib . parse . quote_plus ( name ) ) )
| 4,781
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L376-L385
|
[
"def",
"unindex_layers_with_issues",
"(",
"self",
",",
"use_cache",
"=",
"False",
")",
":",
"from",
"hypermap",
".",
"aggregator",
".",
"models",
"import",
"Issue",
",",
"Layer",
",",
"Service",
"from",
"django",
".",
"contrib",
".",
"contenttypes",
".",
"models",
"import",
"ContentType",
"layer_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"Layer",
")",
"service_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"Service",
")",
"for",
"issue",
"in",
"Issue",
".",
"objects",
".",
"filter",
"(",
"content_type__pk",
"=",
"layer_type",
".",
"id",
")",
":",
"unindex_layer",
"(",
"issue",
".",
"content_object",
".",
"id",
",",
"use_cache",
")",
"for",
"issue",
"in",
"Issue",
".",
"objects",
".",
"filter",
"(",
"content_type__pk",
"=",
"service_type",
".",
"id",
")",
":",
"for",
"layer",
"in",
"issue",
".",
"content_object",
".",
"layer_set",
".",
"all",
"(",
")",
":",
"unindex_layer",
"(",
"layer",
".",
"id",
",",
"use_cache",
")"
] |
A list of all policies for a vhost .
|
def list_policies_for_vhost ( self , vhost ) : return self . _api_get ( '/api/policies/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
| 4,782
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L468-L474
|
[
"def",
"match_color_index",
"(",
"self",
",",
"color",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"webcolors",
"import",
"color_diff",
"if",
"isinstance",
"(",
"color",
",",
"int",
")",
":",
"return",
"color",
"if",
"color",
":",
"if",
"isinstance",
"(",
"color",
",",
"six",
".",
"string_types",
")",
":",
"rgb",
"=",
"map",
"(",
"int",
",",
"color",
".",
"split",
"(",
"','",
")",
")",
"else",
":",
"rgb",
"=",
"color",
".",
"Get",
"(",
")",
"logging",
".",
"disable",
"(",
"logging",
".",
"DEBUG",
")",
"distances",
"=",
"[",
"color_diff",
"(",
"rgb",
",",
"x",
")",
"for",
"x",
"in",
"self",
".",
"xlwt_colors",
"]",
"logging",
".",
"disable",
"(",
"logging",
".",
"NOTSET",
")",
"result",
"=",
"distances",
".",
"index",
"(",
"min",
"(",
"distances",
")",
")",
"self",
".",
"unused_colors",
".",
"discard",
"(",
"self",
".",
"xlwt_colors",
"[",
"result",
"]",
")",
"return",
"result"
] |
Get a specific policy for a vhost .
|
def get_policy_for_vhost ( self , vhost , name ) : return self . _api_get ( '/api/policies/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( name ) , ) )
| 4,783
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L476-L488
|
[
"def",
"summary_width",
"(",
"self",
")",
":",
"chunk_counts",
"=",
"[",
"chunk",
".",
"count",
"for",
"chunk",
"in",
"self",
".",
"_progress_chunks",
"]",
"numbers_width",
"=",
"sum",
"(",
"max",
"(",
"1",
",",
"ceil",
"(",
"log10",
"(",
"count",
"+",
"1",
")",
")",
")",
"for",
"count",
"in",
"chunk_counts",
")",
"separators_with",
"=",
"len",
"(",
"chunk_counts",
")",
"-",
"1",
"return",
"numbers_width",
"+",
"separators_with"
] |
Create a policy for a vhost .
|
def create_policy_for_vhost ( self , vhost , name , definition , pattern = None , priority = 0 , apply_to = 'all' ) : data = { "pattern" : pattern , "definition" : definition , "priority" : priority , "apply-to" : apply_to } self . _api_put ( '/api/policies/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( name ) , ) , data = data , )
| 4,784
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L490-L537
|
[
"def",
"rename_sectors",
"(",
"self",
",",
"sectors",
")",
":",
"if",
"type",
"(",
"sectors",
")",
"is",
"list",
":",
"sectors",
"=",
"{",
"old",
":",
"new",
"for",
"old",
",",
"new",
"in",
"zip",
"(",
"self",
".",
"get_sectors",
"(",
")",
",",
"sectors",
")",
"}",
"for",
"df",
"in",
"self",
".",
"get_DataFrame",
"(",
"data",
"=",
"True",
")",
":",
"df",
".",
"rename",
"(",
"index",
"=",
"sectors",
",",
"columns",
"=",
"sectors",
",",
"inplace",
"=",
"True",
")",
"try",
":",
"for",
"ext",
"in",
"self",
".",
"get_extensions",
"(",
"data",
"=",
"True",
")",
":",
"for",
"df",
"in",
"ext",
".",
"get_DataFrame",
"(",
"data",
"=",
"True",
")",
":",
"df",
".",
"rename",
"(",
"index",
"=",
"sectors",
",",
"columns",
"=",
"sectors",
",",
"inplace",
"=",
"True",
")",
"except",
":",
"pass",
"self",
".",
"meta",
".",
"_add_modify",
"(",
"\"Changed sector names\"",
")",
"return",
"self"
] |
Delete a specific policy for a vhost .
|
def delete_policy_for_vhost ( self , vhost , name ) : self . _api_delete ( '/api/policies/{0}/{1}/' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( name ) , ) )
| 4,785
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L539-L551
|
[
"def",
"summary_width",
"(",
"self",
")",
":",
"chunk_counts",
"=",
"[",
"chunk",
".",
"count",
"for",
"chunk",
"in",
"self",
".",
"_progress_chunks",
"]",
"numbers_width",
"=",
"sum",
"(",
"max",
"(",
"1",
",",
"ceil",
"(",
"log10",
"(",
"count",
"+",
"1",
")",
")",
")",
"for",
"count",
"in",
"chunk_counts",
")",
"separators_with",
"=",
"len",
"(",
"chunk_counts",
")",
"-",
"1",
"return",
"numbers_width",
"+",
"separators_with"
] |
Declares a test queue then publishes and consumes a message . Intended for use by monitoring tools .
|
def is_vhost_alive ( self , vhost ) : return self . _api_get ( '/api/aliveness-test/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
| 4,786
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L553-L563
|
[
"def",
"add_optional_arg_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"blobs",
"=",
"self",
".",
"layers",
"[",
"layer_index",
"]",
".",
"blobs",
"if",
"blob_index",
"<",
"len",
"(",
"blobs",
")",
":",
"self",
".",
"add_arg_param",
"(",
"param_name",
",",
"layer_index",
",",
"blob_index",
")"
] |
Write from database to file .
|
def write ( self , session , directory , name , maskMap ) : # Assemble Path to file name_split = name . split ( '.' ) name = name_split [ 0 ] # Default extension extension = '' if len ( name_split ) >= 2 : extension = name_split [ - 1 ] # Run name preprocessor method if present try : name = self . _namePreprocessor ( name ) except : 'DO NOTHING' if extension == '' : filename = '{0}.{1}' . format ( name , self . fileExtension ) else : filename = '{0}.{1}' . format ( name , extension ) filePath = os . path . join ( directory , filename ) with open ( filePath , 'w' ) as openFile : # Write Lines self . _write ( session = session , openFile = openFile , maskMap = maskMap )
| 4,787
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L144-L180
|
[
"def",
"makeAggShkDstn",
"(",
"self",
")",
":",
"TranShkAggDstn",
"=",
"[",
"]",
"PermShkAggDstn",
"=",
"[",
"]",
"AggShkDstn",
"=",
"[",
"]",
"StateCount",
"=",
"self",
".",
"MrkvArray",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"StateCount",
")",
":",
"TranShkAggDstn",
".",
"append",
"(",
"approxMeanOneLognormal",
"(",
"sigma",
"=",
"self",
".",
"TranShkAggStd",
"[",
"i",
"]",
",",
"N",
"=",
"self",
".",
"TranShkAggCount",
")",
")",
"PermShkAggDstn",
".",
"append",
"(",
"approxMeanOneLognormal",
"(",
"sigma",
"=",
"self",
".",
"PermShkAggStd",
"[",
"i",
"]",
",",
"N",
"=",
"self",
".",
"PermShkAggCount",
")",
")",
"AggShkDstn",
".",
"append",
"(",
"combineIndepDstns",
"(",
"PermShkAggDstn",
"[",
"-",
"1",
"]",
",",
"TranShkAggDstn",
"[",
"-",
"1",
"]",
")",
")",
"self",
".",
"TranShkAggDstn",
"=",
"TranShkAggDstn",
"self",
".",
"PermShkAggDstn",
"=",
"PermShkAggDstn",
"self",
".",
"AggShkDstn",
"=",
"AggShkDstn"
] |
Retrieve the WMS dataset as a gridded time stamped KML string .
|
def getAsKmlGridAnimation ( self , session , projectFile = None , path = None , documentName = None , colorRamp = None , alpha = 1.0 , noDataValue = 0.0 ) : # Prepare rasters timeStampedRasters = self . _assembleRasterParams ( projectFile , self . rasters ) # Create a raster converter converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) # Configure color ramp if isinstance ( colorRamp , dict ) : converter . setCustomColorRamp ( colorRamp [ 'colors' ] , colorRamp [ 'interpolatedPoints' ] ) else : converter . setDefaultColorRamp ( colorRamp ) if documentName is None : documentName = self . fileExtension kmlString = converter . getAsKmlGridAnimation ( tableName = WMSDatasetRaster . tableName , timeStampedRasters = timeStampedRasters , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , documentName = documentName , alpha = alpha , noDataValue = noDataValue ) if path : with open ( path , 'w' ) as f : f . write ( kmlString ) return kmlString
| 4,788
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L184-L234
|
[
"def",
"substitute_partner",
"(",
"self",
",",
"state",
",",
"partners_recp",
",",
"recp",
",",
"alloc_id",
")",
":",
"partner",
"=",
"state",
".",
"partners",
".",
"find",
"(",
"recipient",
".",
"IRecipient",
"(",
"partners_recp",
")",
")",
"if",
"not",
"partner",
":",
"msg",
"=",
"'subsitute_partner() did not find the partner %r'",
"%",
"partners_recp",
"self",
".",
"error",
"(",
"msg",
")",
"return",
"fiber",
".",
"fail",
"(",
"partners",
".",
"FindPartnerError",
"(",
"msg",
")",
")",
"return",
"self",
".",
"establish_partnership",
"(",
"recp",
",",
"partner",
".",
"allocation_id",
",",
"alloc_id",
",",
"substitute",
"=",
"partner",
")"
] |
Retrieve the WMS dataset as a PNG time stamped KMZ
|
def getAsKmlPngAnimation ( self , session , projectFile = None , path = None , documentName = None , colorRamp = None , alpha = 1.0 , noDataValue = 0 , drawOrder = 0 , cellSize = None , resampleMethod = 'NearestNeighbour' ) : # Prepare rasters timeStampedRasters = self . _assembleRasterParams ( projectFile , self . rasters ) # Make sure the raster field is valid converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) # Configure color ramp if isinstance ( colorRamp , dict ) : converter . setCustomColorRamp ( colorRamp [ 'colors' ] , colorRamp [ 'interpolatedPoints' ] ) else : converter . setDefaultColorRamp ( colorRamp ) if documentName is None : documentName = self . fileExtension kmlString , binaryPngStrings = converter . getAsKmlPngAnimation ( tableName = WMSDatasetRaster . tableName , timeStampedRasters = timeStampedRasters , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , documentName = documentName , alpha = alpha , drawOrder = drawOrder , cellSize = cellSize , noDataValue = noDataValue , resampleMethod = resampleMethod ) if path : directory = os . path . dirname ( path ) archiveName = ( os . path . split ( path ) [ 1 ] ) . split ( '.' ) [ 0 ] kmzPath = os . path . join ( directory , ( archiveName + '.kmz' ) ) with ZipFile ( kmzPath , 'w' ) as kmz : kmz . writestr ( archiveName + '.kml' , kmlString ) for index , binaryPngString in enumerate ( binaryPngStrings ) : kmz . writestr ( 'raster{0}.png' . format ( index ) , binaryPngString ) return kmlString , binaryPngStrings
| 4,789
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L236-L306
|
[
"def",
"gipool",
"(",
"name",
",",
"start",
",",
"room",
")",
":",
"name",
"=",
"stypes",
".",
"stringToCharP",
"(",
"name",
")",
"start",
"=",
"ctypes",
".",
"c_int",
"(",
"start",
")",
"ivals",
"=",
"stypes",
".",
"emptyIntVector",
"(",
"room",
")",
"room",
"=",
"ctypes",
".",
"c_int",
"(",
"room",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"found",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"libspice",
".",
"gipool_c",
"(",
"name",
",",
"start",
",",
"room",
",",
"ctypes",
".",
"byref",
"(",
"n",
")",
",",
"ivals",
",",
"ctypes",
".",
"byref",
"(",
"found",
")",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"ivals",
")",
"[",
"0",
":",
"n",
".",
"value",
"]",
",",
"bool",
"(",
"found",
".",
"value",
")"
] |
WMS Dataset File Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , maskMap ) : # Assign file extension attribute to file object self . fileExtension = extension if isinstance ( maskMap , RasterMapFile ) and maskMap . fileExtension == 'msk' : # Vars from mask map columns = maskMap . columns rows = maskMap . rows upperLeftX = maskMap . west upperLeftY = maskMap . north # Derive the cell size (GSSHA cells are square, so it is the same in both directions) cellSizeX = int ( abs ( maskMap . west - maskMap . east ) / columns ) cellSizeY = - 1 * cellSizeX # Dictionary of keywords/cards and parse function names KEYWORDS = { 'DATASET' : wdc . datasetHeaderChunk , 'TS' : wdc . datasetScalarTimeStepChunk } # Open file and read plain text into text field with open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) # Parse header chunk first header = wdc . datasetHeaderChunk ( 'DATASET' , chunks [ 'DATASET' ] [ 0 ] ) # Parse each time step chunk and aggregate timeStepRasters = [ ] for chunk in chunks [ 'TS' ] : timeStepRasters . append ( wdc . datasetScalarTimeStepChunk ( chunk , columns , header [ 'numberCells' ] ) ) # Set WMS dataset file properties self . name = header [ 'name' ] self . numberCells = header [ 'numberCells' ] self . numberData = header [ 'numberData' ] self . objectID = header [ 'objectID' ] if header [ 'type' ] == 'BEGSCL' : self . objectType = header [ 'objectType' ] self . type = self . SCALAR_TYPE elif header [ 'type' ] == 'BEGVEC' : self . vectorType = header [ 'objectType' ] self . type = self . VECTOR_TYPE # Create WMS raster dataset files for each raster for timeStep , timeStepRaster in enumerate ( timeStepRasters ) : # Create new WMS raster dataset file object wmsRasterDatasetFile = WMSDatasetRaster ( ) # Set the wms dataset for this WMS raster dataset file wmsRasterDatasetFile . wmsDataset = self # Set the time step and timestamp and other properties wmsRasterDatasetFile . iStatus = timeStepRaster [ 'iStatus' ] wmsRasterDatasetFile . timestamp = timeStepRaster [ 'timestamp' ] wmsRasterDatasetFile . timeStep = timeStep + 1 # If spatial is enabled create PostGIS rasters if spatial : # Process the values/cell array wmsRasterDatasetFile . raster = RasterLoader . makeSingleBandWKBRaster ( session , columns , rows , upperLeftX , upperLeftY , cellSizeX , cellSizeY , 0 , 0 , spatialReferenceID , timeStepRaster [ 'cellArray' ] ) # Otherwise, set the raster text properties else : wmsRasterDatasetFile . rasterText = timeStepRaster [ 'rasterText' ] # Add current file object to the session session . add ( self ) else : log . warning ( "Could not read {0}. Mask Map must be supplied " "to read WMS Datasets." . format ( filename ) )
| 4,790
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L308-L390
|
[
"def",
"remove_stale_javascripts",
"(",
"portal",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing stale javascripts ...\"",
")",
"for",
"js",
"in",
"JAVASCRIPTS_TO_REMOVE",
":",
"logger",
".",
"info",
"(",
"\"Unregistering JS %s\"",
"%",
"js",
")",
"portal",
".",
"portal_javascripts",
".",
"unregisterResource",
"(",
"js",
")"
] |
WMS Dataset File Write to File Method
|
def _write ( self , session , openFile , maskMap ) : # Magic numbers FIRST_VALUE_INDEX = 12 # Write the header openFile . write ( 'DATASET\r\n' ) if self . type == self . SCALAR_TYPE : openFile . write ( 'OBJTYPE {0}\r\n' . format ( self . objectType ) ) openFile . write ( 'BEGSCL\r\n' ) elif self . type == self . VECTOR_TYPE : openFile . write ( 'VECTYPE {0}\r\n' . format ( self . vectorType ) ) openFile . write ( 'BEGVEC\r\n' ) openFile . write ( 'OBJID {0}\r\n' . format ( self . objectID ) ) openFile . write ( 'ND {0}\r\n' . format ( self . numberData ) ) openFile . write ( 'NC {0}\r\n' . format ( self . numberCells ) ) openFile . write ( 'NAME {0}\r\n' . format ( self . name ) ) # Retrieve the mask map to use as the status rasters statusString = '' if isinstance ( maskMap , RasterMapFile ) : # Convert Mask Map to GRASS ASCII Raster statusGrassRasterString = maskMap . getAsGrassAsciiGrid ( session ) if statusGrassRasterString is not None : # Split by lines statusValues = statusGrassRasterString . split ( ) else : statusValues = maskMap . rasterText . split ( ) # Assemble into a string in the WMS Dataset format for i in range ( FIRST_VALUE_INDEX , len ( statusValues ) ) : statusString += statusValues [ i ] + '\r\n' # Write time steps for timeStepRaster in self . rasters : # Write time step header openFile . write ( 'TS {0} {1}\r\n' . format ( timeStepRaster . iStatus , timeStepRaster . timestamp ) ) # Write status raster (mask map) if applicable if timeStepRaster . iStatus == 1 : openFile . write ( statusString ) # Write value raster valueString = timeStepRaster . getAsWmsDatasetString ( session ) if valueString is not None : openFile . write ( valueString ) else : openFile . write ( timeStepRaster . rasterText ) # Write ending tag for the dataset openFile . write ( 'ENDDS\r\n' )
| 4,791
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L392-L450
|
[
"def",
"remove_stale_javascripts",
"(",
"portal",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing stale javascripts ...\"",
")",
"for",
"js",
"in",
"JAVASCRIPTS_TO_REMOVE",
":",
"logger",
".",
"info",
"(",
"\"Unregistering JS %s\"",
"%",
"js",
")",
"portal",
".",
"portal_javascripts",
".",
"unregisterResource",
"(",
"js",
")"
] |
Retrieve the WMS Raster as a string in the WMS Dataset format
|
def getAsWmsDatasetString ( self , session ) : # Magic numbers FIRST_VALUE_INDEX = 12 # Write value raster if type ( self . raster ) != type ( None ) : # Convert to GRASS ASCII Raster valueGrassRasterString = self . getAsGrassAsciiGrid ( session ) # Split by lines values = valueGrassRasterString . split ( ) # Assemble into string wmsDatasetString = '' for i in range ( FIRST_VALUE_INDEX , len ( values ) ) : wmsDatasetString += '{0:.6f}\r\n' . format ( float ( values [ i ] ) ) return wmsDatasetString else : wmsDatasetString = self . rasterText
| 4,792
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L530-L553
|
[
"def",
"leave_group",
"(",
"self",
",",
"group_id",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_post",
"(",
"'/v2/bot/group/{group_id}/leave'",
".",
"format",
"(",
"group_id",
"=",
"group_id",
")",
",",
"timeout",
"=",
"timeout",
")"
] |
Make sure that there are no random artifacts in the file .
|
def check_watershed_boundary_geometry ( shapefile_path ) : wfg = gpd . read_file ( shapefile_path ) first_shape = wfg . iloc [ 0 ] . geometry if hasattr ( first_shape , 'geoms' ) : raise ValueError ( "Invalid watershed boundary geometry. " "To fix this, remove disconnected shapes or run " "gsshapy.modeling.GSSHAModel.clean_boundary_shapefile" )
| 4,793
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/check_geometry.py#L4-L12
|
[
"def",
"cublasDestroy",
"(",
"handle",
")",
":",
"status",
"=",
"_libcublas",
".",
"cublasDestroy_v2",
"(",
"ctypes",
".",
"c_void_p",
"(",
"handle",
")",
")",
"cublasCheckStatus",
"(",
"status",
")"
] |
Get a batch of inputs and outputs from given sentences .
|
def get_batch ( sentences , token_dict , ignore_case = False , unk_index = 1 , eos_index = 2 ) : batch_size = len ( sentences ) max_sentence_len = max ( map ( len , sentences ) ) inputs = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] outputs_forward = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] outputs_backward = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] for i , sentence in enumerate ( sentences ) : outputs_forward [ i ] [ len ( sentence ) - 1 ] = eos_index outputs_backward [ i ] [ 0 ] = eos_index for j , token in enumerate ( sentence ) : if ignore_case : index = token_dict . get ( token . lower ( ) , unk_index ) else : index = token_dict . get ( token , unk_index ) inputs [ i ] [ j ] = index if j - 1 >= 0 : outputs_forward [ i ] [ j - 1 ] = index if j + 1 < len ( sentence ) : outputs_backward [ i ] [ j + 1 ] = index outputs_forward = np . expand_dims ( np . asarray ( outputs_forward ) , axis = - 1 ) outputs_backward = np . expand_dims ( np . asarray ( outputs_backward ) , axis = - 1 ) return np . asarray ( inputs ) , [ outputs_forward , outputs_backward ]
| 4,794
|
https://github.com/CyberZHG/keras-bi-lm/blob/615e1131052d488420d759bab2370d504c9fc074/keras_bi_lm/model.py#L168-L203
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_access",
"is",
"not",
"None",
":",
"_logger",
".",
"debug",
"(",
"\"Cleaning up\"",
")",
"pci_cleanup",
"(",
"self",
".",
"_access",
")",
"self",
".",
"_access",
"=",
"None"
] |
Simple wrapper of model . fit .
|
def fit ( self , inputs , outputs , epochs = 1 ) : self . model . fit ( inputs , outputs , epochs = epochs )
| 4,795
|
https://github.com/CyberZHG/keras-bi-lm/blob/615e1131052d488420d759bab2370d504c9fc074/keras_bi_lm/model.py#L205-L214
|
[
"def",
"is_redundant_multiplicon",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_redundant_multiplicon_cache'",
")",
":",
"sql",
"=",
"'''SELECT id FROM multiplicons WHERE is_redundant=\"-1\"'''",
"cur",
"=",
"self",
".",
"_dbconn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"sql",
",",
"{",
"'id'",
":",
"str",
"(",
"value",
")",
"}",
")",
"result",
"=",
"[",
"int",
"(",
"r",
"[",
"0",
"]",
")",
"for",
"r",
"in",
"cur",
".",
"fetchall",
"(",
")",
"]",
"self",
".",
"_redundant_multiplicon_cache",
"=",
"set",
"(",
"result",
")",
"if",
"value",
"in",
"self",
".",
"_redundant_multiplicon_cache",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Get layers that output the Bi - LM feature .
|
def get_feature_layers ( self , input_layer = None , trainable = False , use_weighted_sum = False ) : model = keras . models . clone_model ( self . model , input_layer ) if not trainable : for layer in model . layers : layer . trainable = False if use_weighted_sum : rnn_layers_forward = list ( map ( lambda x : model . get_layer ( x . name . split ( '/' ) [ 0 ] . split ( ':' ) [ 0 ] . split ( '_' ) [ 0 ] ) . output , self . rnn_layers_forward , ) ) rnn_layers_backward = list ( map ( lambda x : model . get_layer ( x . name . split ( '/' ) [ 0 ] . split ( ':' ) [ 0 ] . split ( '_' ) [ 0 ] ) . output , self . rnn_layers_backward , ) ) forward_layer = WeightedSum ( name = 'Bi-LM-Forward-Sum' ) ( rnn_layers_forward ) backward_layer_rev = WeightedSum ( name = 'Bi-LM-Backward-Sum-Rev' ) ( rnn_layers_backward ) backward_layer = keras . layers . Lambda ( function = self . _reverse_x , mask = lambda _ , mask : self . _reverse_x ( mask ) , name = 'Bi-LM-Backward-Sum' ) ( backward_layer_rev ) else : forward_layer = model . get_layer ( name = 'Bi-LM-Forward' ) . output backward_layer = model . get_layer ( name = 'Bi-LM-Backward' ) . output output_layer = keras . layers . Concatenate ( name = 'Bi-LM-Feature' ) ( [ forward_layer , backward_layer ] ) if input_layer is None : input_layer = model . layers [ 0 ] . input return input_layer , output_layer return output_layer
| 4,796
|
https://github.com/CyberZHG/keras-bi-lm/blob/615e1131052d488420d759bab2370d504c9fc074/keras_bi_lm/model.py#L225-L261
|
[
"def",
"make_random_models_table",
"(",
"n_sources",
",",
"param_ranges",
",",
"random_state",
"=",
"None",
")",
":",
"prng",
"=",
"check_random_state",
"(",
"random_state",
")",
"sources",
"=",
"Table",
"(",
")",
"for",
"param_name",
",",
"(",
"lower",
",",
"upper",
")",
"in",
"param_ranges",
".",
"items",
"(",
")",
":",
"# Generate a column for every item in param_ranges, even if it",
"# is not in the model (e.g. flux). However, such columns will",
"# be ignored when rendering the image.",
"sources",
"[",
"param_name",
"]",
"=",
"prng",
".",
"uniform",
"(",
"lower",
",",
"upper",
",",
"n_sources",
")",
"return",
"sources"
] |
if join returns false the node did not entry the ring . Retry it
|
def join ( self , n1 ) : if self . id == n1 . get_id ( ) : for i in range ( k ) : self . finger [ i ] = self . proxy self . predecessor = self . proxy self . run = True return True else : try : self . init_finger_table ( n1 ) except Exception : print 'Join failed' # raise Exception('Join failed') return False else : self . run = True return True
| 4,797
|
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/examples/chord/chord.py#L123-L140
|
[
"def",
"adjust_privileges",
"(",
"state",
",",
"privileges",
")",
":",
"with",
"win32",
".",
"OpenProcessToken",
"(",
"win32",
".",
"GetCurrentProcess",
"(",
")",
",",
"win32",
".",
"TOKEN_ADJUST_PRIVILEGES",
")",
"as",
"hToken",
":",
"NewState",
"=",
"(",
"(",
"priv",
",",
"state",
")",
"for",
"priv",
"in",
"privileges",
")",
"win32",
".",
"AdjustTokenPrivileges",
"(",
"hToken",
",",
"NewState",
")"
] |
NWSRFS Read from File Method
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Open file and parse with open ( path , 'r' ) as nwsrfsFile : for line in nwsrfsFile : sline = line . strip ( ) . split ( ) # Cases if sline [ 0 ] . lower ( ) == 'number_bands:' : self . numBands = sline [ 1 ] elif sline [ 0 ] . lower ( ) == 'lower_elevation' : """DO NOTHING""" else : # Create GSSHAPY NwsrfsRecord object record = NwsrfsRecord ( lowerElev = sline [ 0 ] , upperElev = sline [ 1 ] , mfMin = sline [ 2 ] , mfMax = sline [ 3 ] , scf = sline [ 4 ] , frUse = sline [ 5 ] , tipm = sline [ 6 ] , nmf = sline [ 7 ] , fua = sline [ 8 ] , plwhc = sline [ 9 ] ) # Associate NwsrfsRecord with NwsrfsFile record . nwsrfsFile = self
| 4,798
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L56-L87
|
[
"def",
"label_to_latex",
"(",
"text",
")",
":",
"# pylint: disable=anomalous-backslash-in-string",
"if",
"text",
"is",
"None",
":",
"return",
"''",
"out",
"=",
"[",
"]",
"x",
"=",
"None",
"# loop over matches in reverse order and replace",
"for",
"m",
"in",
"re_latex_control",
".",
"finditer",
"(",
"text",
")",
":",
"a",
",",
"b",
"=",
"m",
".",
"span",
"(",
")",
"char",
"=",
"m",
".",
"group",
"(",
")",
"[",
"0",
"]",
"out",
".",
"append",
"(",
"text",
"[",
"x",
":",
"a",
"]",
")",
"out",
".",
"append",
"(",
"r'\\%s'",
"%",
"char",
")",
"x",
"=",
"b",
"if",
"not",
"x",
":",
"# no match",
"return",
"text",
"# append prefix and return joined components",
"out",
".",
"append",
"(",
"text",
"[",
"b",
":",
"]",
")",
"return",
"''",
".",
"join",
"(",
"out",
")"
] |
NWSRFS Write to File Method
|
def _write ( self , session , openFile , replaceParamFile ) : # Write lines openFile . write ( 'Number_Bands: %s\n' % self . numBands ) openFile . write ( 'Lower_Elevation Upper_Elevation MF_Min MF_Max SCF FR_USE TIPM NMF FUA PCWHC\n' ) # Retrieve NwsrfsRecords records = self . nwsrfsRecords for record in records : openFile . write ( '%s%s%s%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f\n' % ( record . lowerElev , ' ' * ( 17 - len ( str ( record . lowerElev ) ) ) , # Num Spaces record . upperElev , ' ' * ( 17 - len ( str ( record . upperElev ) ) ) , # Num Spaces record . mfMin , ' ' * ( 8 - len ( str ( record . mfMin ) ) ) , # Num Spaces record . mfMax , ' ' * ( 8 - len ( str ( record . mfMax ) ) ) , # Num Spaces record . scf , ' ' * ( 5 - len ( str ( record . scf ) ) ) , # Num Spaces record . frUse , ' ' * ( 8 - len ( str ( record . frUse ) ) ) , # Num Spaces record . tipm , ' ' * ( 6 - len ( str ( record . tipm ) ) ) , # Num Spaces record . nmf , ' ' * ( 5 - len ( str ( record . nmf ) ) ) , # Num Spaces record . fua , ' ' * ( 5 - len ( str ( record . fua ) ) ) , # Num Spaces record . plwhc ) )
| 4,799
|
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L89-L120
|
[
"def",
"get_gmn_version",
"(",
"base_url",
")",
":",
"home_url",
"=",
"d1_common",
".",
"url",
".",
"joinPathElements",
"(",
"base_url",
",",
"'home'",
")",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"home_url",
",",
"verify",
"=",
"False",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"e",
":",
"return",
"False",
",",
"str",
"(",
"e",
")",
"if",
"not",
"response",
".",
"ok",
":",
"return",
"False",
",",
"'invalid /home. status={}'",
".",
"format",
"(",
"response",
".",
"status_code",
")",
"soup",
"=",
"bs4",
".",
"BeautifulSoup",
"(",
"response",
".",
"content",
",",
"'html.parser'",
")",
"version_str",
"=",
"soup",
".",
"find",
"(",
"string",
"=",
"'GMN version:'",
")",
".",
"find_next",
"(",
"'td'",
")",
".",
"string",
"if",
"version_str",
"is",
"None",
":",
"return",
"False",
",",
"'Parse failed'",
"return",
"True",
",",
"version_str"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.