query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Seaborn Joint Hexplot with marginal KDE + hists .
def joint_hex ( x , y , * * kwargs ) : return sns . jointplot ( x , y , kind = 'hex' , stat_func = None , marginal_kws = { 'kde' : True } , * * kwargs )
9,800
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L111-L115
[ "def", "scrape_links", "(", "self", ",", "text", ",", "context", "=", "False", ")", ":", "return", "self", ".", "iter_processed_links", "(", "io", ".", "StringIO", "(", "text", ")", ",", "context", "=", "context", ")" ]
Here we execute the factors over the streams in the workflow Execute the factors in reverse order . We can t just execute the last factor because there may be multiple leaf factors that aren t triggered by upstream computations .
def execute ( self , time_interval ) : # TODO: What if the leaf nodes have different time intervals? # if not self._hyperstream: # raise ValueError("") with WorkflowMonitor ( self ) : # First look for asset writers for factor in self . factors [ : : - 1 ] : if factor . tool . name == "asset_writer" : factor . execute ( time_interval ) for factor in self . factors [ : : - 1 ] : if factor . sink is None or factor . sink . is_leaf and factor . tool . name != "asset_writer" : factor . execute ( time_interval )
9,801
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L87-L107
[ "def", "union", "(", "self", ",", "other", ")", ":", "union", "=", "Rect", "(", ")", "lib", ".", "SDL_UnionRect", "(", "self", ".", "_ptr", ",", "other", ".", "_ptr", ",", "union", ".", "_ptr", ")", "return", "union" ]
Add a node to the workflow
def _add_node ( self , node ) : self . nodes [ node . node_id ] = node logging . info ( "Added node with id {} containing {} streams" . format ( node . node_id , len ( node . streams ) ) )
9,802
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L109-L118
[ "def", "vapour_pressure", "(", "Temperature", ",", "element", ")", ":", "if", "element", "==", "\"Rb\"", ":", "Tmelt", "=", "39.30", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.857", "-", "4215.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.312", "-", "4040.0", "/", "Temperature", ")", "# Torr.", "elif", "element", "==", "\"Cs\"", ":", "Tmelt", "=", "28.5", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.711", "-", "3999.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.165", "-", "3830.0", "/", "Temperature", ")", "# Torr.", "else", ":", "s", "=", "str", "(", "element", ")", "s", "+=", "\" is not an element in the database for this function.\"", "raise", "ValueError", "(", "s", ")", "P", "=", "P", "*", "101325.0", "/", "760.0", "# Pascals.", "return", "P" ]
Add a factor to the workflow
def _add_factor ( self , factor ) : self . factors . append ( factor ) logging . info ( "Added factor with tool {} " . format ( factor . tool ) )
9,803
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L120-L129
[ "def", "vapour_pressure", "(", "Temperature", ",", "element", ")", ":", "if", "element", "==", "\"Rb\"", ":", "Tmelt", "=", "39.30", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.857", "-", "4215.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.312", "-", "4040.0", "/", "Temperature", ")", "# Torr.", "elif", "element", "==", "\"Cs\"", ":", "Tmelt", "=", "28.5", "+", "273.15", "# K.", "if", "Temperature", "<", "Tmelt", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.711", "-", "3999.0", "/", "Temperature", ")", "# Torr.", "else", ":", "P", "=", "10", "**", "(", "2.881", "+", "4.165", "-", "3830.0", "/", "Temperature", ")", "# Torr.", "else", ":", "s", "=", "str", "(", "element", ")", "s", "+=", "\" is not an element in the database for this function.\"", "raise", "ValueError", "(", "s", ")", "P", "=", "P", "*", "101325.0", "/", "760.0", "# Pascals.", "return", "P" ]
General signature for factor creation that tries each of the factor creation types using duck typing
def create_factor_general ( self , * args , * * kwargs ) : try : return self . create_factor ( * args , * * kwargs ) except TypeError : pass try : return self . create_multi_output_factor ( * args , * * kwargs ) except TypeError : pass try : return self . create_node_creation_factor ( * args , * * kwargs ) except TypeError : pass raise FactorDefinitionError ( "Could not find a matching signature" )
9,804
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L164-L184
[ "def", "release", "(", "self", ")", ":", "if", "self", ".", "_subscription", "and", "self", ".", "_subscription", ".", "subscribed", ":", "self", ".", "_subscription", ".", "unsubscribe", "(", ")", "self", ".", "_subscription", ".", "reset", "(", ")" ]
Creates a multi - output factor . This takes a single node applies a MultiOutputTool to create multiple nodes on a new plate Instantiates a single tool for all of the input plate values and connects the source and sink nodes with that tool .
def create_multi_output_factor ( self , tool , source , splitting_node , sink ) : if source and not isinstance ( source , Node ) : raise ValueError ( "Expected Node, got {}" . format ( type ( source ) ) ) if not isinstance ( sink , Node ) : raise ValueError ( "Expected Node, got {}" . format ( type ( sink ) ) ) # if isinstance(tool, dict): # tool = self.channels.get_tool(**tool) if not isinstance ( tool , MultiOutputTool ) : raise ValueError ( "Expected MultiOutputTool, got {}" . format ( type ( tool ) ) ) # Check that the input_plate are compatible - note this is the opposite way round to a normal factor input_plates = source . plates if source else [ ] output_plates = sink . plates if len ( input_plates ) > 1 : raise NotImplementedError if len ( output_plates ) == 0 : raise ValueError ( "No output plate found" ) if len ( output_plates ) == 1 : if not self . check_multi_output_plate_compatibility ( input_plates , output_plates [ 0 ] ) : raise IncompatiblePlatesError ( "Parent plate does not match input plate" ) factor = MultiOutputFactor ( tool = tool , source_node = source , splitting_node = splitting_node , sink_node = sink , input_plate = input_plates [ 0 ] if input_plates else None , output_plates = output_plates [ 0 ] ) else : # The output plates should be the same as the input plates, except for one # additional plate. Since we're currently only supporting one input plate, # we can safely assume that there is a single matching plate. # Finally, note that the output plate must either have no parents # (i.e. it is at the root of the tree), or the parent plate is somewhere # in the input plate's ancestry if len ( output_plates ) > 2 : raise NotImplementedError if len ( input_plates ) != 1 : raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" ) if output_plates [ 0 ] == input_plates [ 0 ] : # Found a match, so the output plate should be the other plate output_plate = output_plates [ 1 ] else : if output_plates [ 1 ] . plate_id != input_plates [ 0 ] . plate_id : raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" ) output_plate = output_plates [ 0 ] # Swap them round so the new plate is the last plate - this is required by the factor output_plates [ 1 ] , output_plates [ 0 ] = output_plates [ 0 ] , output_plates [ 1 ] if not output_plate . is_root : # We need to walk up the input plate's parent tree match = False parent = input_plates [ 0 ] . parent while parent is not None : if parent . plate_id == output_plate . parent . plate_id : match = True break parent = parent . parent if not match : raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" ) factor = MultiOutputFactor ( tool = tool , source_node = source , sink_node = sink , splitting_node = splitting_node , input_plate = input_plates [ 0 ] , output_plates = output_plates ) self . _add_factor ( factor ) return factor
9,805
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L287-L374
[ "def", "reset_flags", "(", "self", ")", ":", "self", ".", "C", "=", "None", "self", ".", "Z", "=", "None", "self", ".", "P", "=", "None", "self", ".", "S", "=", "None" ]
Creates a factor that itself creates an output node and ensures that the plate for the output node exists along with all relevant meta - data
def create_node_creation_factor ( self , tool , source , output_plate , plate_manager ) : # if isinstance(tool, dict): # tool = self.channels.get_tool(**tool) if not isinstance ( tool , PlateCreationTool ) : raise ValueError ( "Expected PlateCreationTool, got {}" . format ( type ( tool ) ) ) input_plates = source . plates if source else [ ] if len ( input_plates ) > 1 : raise NotImplementedError factor = NodeCreationFactor ( tool = tool , source_node = source , input_plate = input_plates [ 0 ] if input_plates else None , output_plate = output_plate , plate_manager = plate_manager ) self . _add_factor ( factor ) return factor
9,806
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L376-L409
[ "def", "_convert_volume", "(", "self", ",", "volume", ")", ":", "data", "=", "{", "'host'", ":", "volume", ".", "get", "(", "'hostPath'", ")", ",", "'container'", ":", "volume", ".", "get", "(", "'containerPath'", ")", ",", "'readonly'", ":", "volume", ".", "get", "(", "'mode'", ")", "==", "'RO'", ",", "}", "return", "data" ]
Checks whether the source and sink plate are compatible given the tool
def check_plate_compatibility ( tool , source_plate , sink_plate ) : if sink_plate == source_plate . parent : return None # could be that they have the same meta data, but the sink plate is a simplification of the source # plate (e.g. when using IndexOf tool) if sink_plate . meta_data_id == source_plate . meta_data_id : if sink_plate . is_sub_plate ( source_plate ) : return None return "Sink plate {} is not a simplification of source plate {}" . format ( sink_plate . plate_id , source_plate . plate_id ) # Also check to see if the meta data differs by only one value meta_data_diff = set ( source_plate . ancestor_meta_data_ids ) - set ( sink_plate . ancestor_meta_data_ids ) if len ( meta_data_diff ) == 1 : # Is the diff value the same as the aggregation meta id passed to the aggregate tool if tool . aggregation_meta_data not in meta_data_diff : return "Aggregate tool meta data ({}) " "does not match the diff between source and sink plates ({})" . format ( tool . aggregation_meta_data , list ( meta_data_diff ) [ 0 ] ) else : return "{} not in source's parent plates" . format ( sink_plate . plate_id )
9,807
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L412-L445
[ "def", "divide", "(", "cls", ",", "jobs", ",", "count", ")", ":", "jobs", "=", "list", "(", "zip", "(", "*", "zip_longest", "(", "*", "[", "iter", "(", "jobs", ")", "]", "*", "count", ")", ")", ")", "# If we had no jobs to resume, then we get an empty list", "jobs", "=", "jobs", "or", "[", "(", ")", "]", "*", "count", "for", "index", "in", "range", "(", "count", ")", ":", "# Filter out the items in jobs that are Nones", "jobs", "[", "index", "]", "=", "[", "j", "for", "j", "in", "jobs", "[", "index", "]", "if", "j", "!=", "None", "]", "return", "jobs" ]
Check multi - output plate compatibility . This ensures that the source plates and sink plates match for a multi - output plate
def check_multi_output_plate_compatibility ( source_plates , sink_plate ) : if len ( source_plates ) == 0 : if sink_plate . parent is not None : return False else : if sink_plate . parent is None : return False else : if sink_plate . parent . plate_id != source_plates [ 0 ] . plate_id : return False return True
9,808
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L448-L466
[ "def", "copy", "(", "self", ")", ":", "return", "Character", "(", "self", ".", "name", ",", "self", ".", "race", ",", "self", ".", "ch_class", ",", "self", ".", "stats", ",", "self", ".", "skills", ",", "self", ".", "story", ",", "self", ".", "inventory", ")" ]
Get a representation of the workflow as a dictionary for display purposes
def to_dict ( self , tool_long_names = True ) : d = dict ( nodes = [ ] , factors = [ ] , plates = defaultdict ( list ) ) for node in self . nodes : node_id = self . nodes [ node ] . node_id d [ 'nodes' ] . append ( { 'id' : node_id } ) for plate_id in self . nodes [ node ] . plate_ids : d [ 'plates' ] [ plate_id ] . append ( { 'id' : node_id , 'type' : 'node' } ) for factor in self . factors : tool = str ( factor . tool ) if tool_long_names else factor . tool . name try : sources = [ s . node_id for s in factor . sources ] except AttributeError : if factor . source : sources = [ factor . source . node_id ] else : sources = [ ] d [ 'factors' ] . append ( { 'id' : tool , 'sources' : sources , 'sink' : factor . sink . node_id } ) try : if factor . plates : for plate in factor . plates : d [ 'plates' ] [ plate . plate_id ] . append ( { 'id' : tool , 'type' : 'factor' } ) else : d [ 'plates' ] [ 'root' ] . append ( { 'id' : tool , 'type' : 'factor' } ) except AttributeError : pass d [ 'plates' ] = dict ( d [ 'plates' ] ) return d
9,809
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L510-L553
[ "def", "update_reg", "(", "self", ",", "addr", ",", "mask", ",", "new_val", ")", ":", "shift", "=", "_mask_to_shift", "(", "mask", ")", "val", "=", "self", ".", "read_reg", "(", "addr", ")", "val", "&=", "~", "mask", "val", "|=", "(", "new_val", "<<", "shift", ")", "&", "mask", "self", ".", "write_reg", "(", "addr", ",", "val", ")", "return", "val" ]
Get a JSON representation of the workflow
def to_json ( self , formatter = None , tool_long_names = True , * * kwargs ) : d = self . to_dict ( tool_long_names = tool_long_names ) if formatter : d = formatter ( d ) return json . dumps ( d , * * kwargs )
9,810
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L555-L570
[ "def", "_make_ta_service_dict", "(", "self", ")", ":", "res", "=", "{", "}", "for", "svc_name", "in", "self", ".", "all_services", ":", "svc_obj", "=", "self", ".", "all_services", "[", "svc_name", "]", "for", "lim_name", ",", "lim", "in", "svc_obj", ".", "get_limits", "(", ")", ".", "items", "(", ")", ":", "if", "lim", ".", "ta_service_name", "not", "in", "res", ":", "res", "[", "lim", ".", "ta_service_name", "]", "=", "{", "}", "res", "[", "lim", ".", "ta_service_name", "]", "[", "lim", ".", "ta_limit_name", "]", "=", "lim", "return", "res" ]
Get the tool parameters as a simple dictionary
def parameters_dict ( self ) : d = { } for k , v in self . __dict__ . items ( ) : if not k . startswith ( "_" ) : d [ k ] = v return d
9,811
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L85-L95
[ "def", "visit_compare", "(", "self", ",", "node", ")", ":", "rhs_str", "=", "\" \"", ".", "join", "(", "[", "\"%s %s\"", "%", "(", "op", ",", "self", ".", "_precedence_parens", "(", "node", ",", "expr", ",", "is_left", "=", "False", ")", ")", "for", "op", ",", "expr", "in", "node", ".", "ops", "]", ")", "return", "\"%s %s\"", "%", "(", "self", ".", "_precedence_parens", "(", "node", ",", "node", ".", "left", ")", ",", "rhs_str", ")" ]
Get the tool parameters
def parameters ( self ) : parameters = [ ] for k , v in self . __dict__ . items ( ) : if k . startswith ( "_" ) : continue is_function = False is_set = False if callable ( v ) : value = pickle . dumps ( func_dump ( v ) ) is_function = True elif isinstance ( v , set ) : value = list ( v ) is_set = True else : value = v parameters . append ( dict ( key = k , value = value , is_function = is_function , is_set = is_set ) ) return parameters
9,812
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L98-L128
[ "def", "_timestamp_regulator", "(", "self", ")", ":", "unified_timestamps", "=", "_PrettyDefaultDict", "(", "list", ")", "staged_files", "=", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", "for", "timestamp_basename", "in", "self", ".", "__timestamps_unregulated", ":", "if", "len", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ">", "1", ":", "# File has been splitted", "timestamp_name", "=", "''", ".", "join", "(", "timestamp_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "staged_splitted_files_of_timestamp", "=", "list", "(", "filter", "(", "lambda", "staged_file", ":", "(", "timestamp_name", "==", "staged_file", "[", ":", "-", "3", "]", "and", "all", "(", "[", "(", "x", "in", "set", "(", "map", "(", "str", ",", "range", "(", "10", ")", ")", ")", ")", "for", "x", "in", "staged_file", "[", "-", "3", ":", "]", "]", ")", ")", ",", "staged_files", ")", ")", "if", "len", "(", "staged_splitted_files_of_timestamp", ")", "==", "0", ":", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "timestamp_basename", ")", "]", "=", "{", "\"reason\"", ":", "\"Missing staged file\"", ",", "\"current_staged_files\"", ":", "staged_files", "}", "continue", "staged_splitted_files_of_timestamp", ".", "sort", "(", ")", "unified_timestamp", "=", "list", "(", ")", "for", "staging_digits", ",", "splitted_file", "in", "enumerate", "(", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", ")", ":", "prev_splits_sec", "=", "0", "if", "int", "(", "staging_digits", ")", "!=", "0", ":", "prev_splits_sec", "=", "self", ".", "_get_audio_duration_seconds", "(", "\"{}/staging/{}{:03d}\"", ".", "format", "(", "self", ".", "src_dir", ",", "timestamp_name", ",", "staging_digits", "-", "1", ")", ")", "for", "word_block", "in", "splitted_file", ":", "unified_timestamp", ".", "append", "(", "_WordBlock", "(", "word", "=", "word_block", ".", "word", ",", "start", "=", "round", "(", "word_block", ".", "start", "+", "prev_splits_sec", ",", "2", ")", ",", "end", "=", "round", "(", "word_block", ".", "end", "+", "prev_splits_sec", ",", "2", ")", ")", ")", "unified_timestamps", "[", "str", "(", "timestamp_basename", ")", "]", "+=", "unified_timestamp", "else", ":", "unified_timestamps", "[", "timestamp_basename", "]", "+=", "self", ".", "__timestamps_unregulated", "[", "timestamp_basename", "]", "[", "0", "]", "self", ".", "__timestamps", ".", "update", "(", "unified_timestamps", ")", "self", ".", "__timestamps_unregulated", "=", "_PrettyDefaultDict", "(", "list", ")" ]
Get the tool parameters model from dictionaries
def parameters_from_model ( parameters_model ) : parameters = { } for p in parameters_model : if p . is_function : code , defaults , closure = pickle . loads ( p . value ) parameters [ p . key ] = func_load ( code , defaults , closure , globs = globals ( ) ) elif p . is_set : parameters [ p . key ] = set ( p . value ) else : parameters [ p . key ] = p . value return parameters
9,813
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L131-L147
[ "def", "bind", "(", "self", ",", "prefix", ":", "str", ",", "namespace", ":", "str", ",", "override", "=", "True", ",", "replace", "=", "False", ")", ":", "namespace", "=", "URIRef", "(", "str", "(", "namespace", ")", ")", "# When documenting explain that override only applies in what cases", "if", "prefix", "is", "None", ":", "prefix", "=", "''", "bound_namespace", "=", "self", ".", "store", ".", "namespace", "(", "prefix", ")", "# Check if the bound_namespace contains a URI and if so convert it into a URIRef for", "# comparison. This is to prevent duplicate namespaces with the same URI.", "if", "bound_namespace", ":", "bound_namespace", "=", "URIRef", "(", "bound_namespace", ")", "if", "bound_namespace", "and", "bound_namespace", "!=", "namespace", ":", "if", "replace", ":", "self", ".", "store", ".", "bind", "(", "prefix", ",", "namespace", ")", "# prefix already in use for different namespace", "raise", "PrefixAlreadyUsedException", "(", "\"Prefix (%s, %s) already used, instead of (%s, %s).\"", ",", "prefix", ",", "self", ".", "store", ".", "namespace", "(", "prefix", ")", ".", "toPython", "(", ")", ",", "prefix", ",", "namespace", ".", "toPython", "(", ")", ")", "else", ":", "bound_prefix", "=", "self", ".", "store", ".", "prefix", "(", "namespace", ")", "if", "bound_prefix", "is", "None", ":", "self", ".", "store", ".", "bind", "(", "prefix", ",", "namespace", ")", "elif", "bound_prefix", "==", "prefix", ":", "pass", "# already bound", "else", ":", "if", "override", "or", "bound_prefix", ".", "startswith", "(", "\"_\"", ")", ":", "self", ".", "store", ".", "bind", "(", "prefix", ",", "namespace", ")" ]
Gets the mongoengine model for this tool which serializes parameters that are functions
def get_model ( self ) : return ToolModel ( name = self . name , version = "0.0.0" , parameters = self . parameters_from_dicts ( self . parameters ) )
9,814
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L159-L170
[ "def", "close", "(", "self", ")", "->", "Awaitable", "[", "None", "]", ":", "for", "ev", "in", "self", ".", "_throttle_dns_events", ".", "values", "(", ")", ":", "ev", ".", "cancel", "(", ")", "return", "super", "(", ")", ".", "close", "(", ")" ]
Write to the history of executions of this tool
def write_to_history ( * * kwargs ) : from hyperstream import HyperStream hs = HyperStream ( loglevel = logging . CRITICAL , file_logger = False , console_logger = False , mqtt_logger = None ) if hs . current_session : hs . current_session . write_to_history ( * * kwargs )
9,815
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L173-L183
[ "def", "return_port", "(", "port", ")", ":", "if", "port", "in", "_random_ports", ":", "_random_ports", ".", "remove", "(", "port", ")", "elif", "port", "in", "_owned_ports", ":", "_owned_ports", ".", "remove", "(", "port", ")", "_free_ports", ".", "add", "(", "port", ")", "elif", "port", "in", "_free_ports", ":", "logging", ".", "info", "(", "\"Returning a port that was already returned: %s\"", ",", "port", ")", "else", ":", "logging", ".", "info", "(", "\"Returning a port that wasn't given by portpicker: %s\"", ",", "port", ")" ]
Creates a plot in the classical monitoring . km3net . de style .
def plot_dom_parameters ( data , detector , filename , label , title , vmin = 0.0 , vmax = 10.0 , cmap = 'RdYlGn_r' , under = 'deepskyblue' , over = 'deeppink' , underfactor = 1.0 , overfactor = 1.0 , missing = 'lightgray' , hide_limits = False ) : x , y , _ = zip ( * detector . doms . values ( ) ) fig , ax = plt . subplots ( figsize = ( 10 , 6 ) ) cmap = plt . get_cmap ( cmap ) cmap . set_over ( over , 1.0 ) cmap . set_under ( under , 1.0 ) m_size = 100 scatter_args = { 'edgecolors' : 'None' , 'vmin' : vmin , 'vmax' : vmax , } sc_inactive = ax . scatter ( x , y , c = missing , label = 'missing' , s = m_size * 0.9 , * * scatter_args ) xa , ya = map ( np . array , zip ( * data . keys ( ) ) ) zs = np . array ( list ( data . values ( ) ) ) in_range_idx = np . logical_and ( zs >= vmin , zs <= vmax ) sc = ax . scatter ( xa [ in_range_idx ] , ya [ in_range_idx ] , c = zs [ in_range_idx ] , cmap = cmap , s = m_size , * * scatter_args ) if not hide_limits : under_idx = zs < vmin ax . scatter ( xa [ under_idx ] , ya [ under_idx ] , c = under , label = '< {0}' . format ( vmin ) , s = m_size * underfactor , * * scatter_args ) over_idx = zs > vmax ax . scatter ( xa [ over_idx ] , ya [ over_idx ] , c = over , label = '> {0}' . format ( vmax ) , s = m_size * overfactor , * * scatter_args ) cb = plt . colorbar ( sc ) cb . set_label ( label ) ax . set_title ( "{0}\n{1} UTC" . format ( title , datetime . utcnow ( ) . strftime ( "%c" ) ) ) ax . set_xlabel ( "DU" ) ax . set_ylabel ( "DOM" ) ax . set_ylim ( - 2 ) ax . set_yticks ( range ( 1 , 18 + 1 ) ) major_locator = pylab . MaxNLocator ( integer = True ) sc_inactive . axes . xaxis . set_major_locator ( major_locator ) ax . legend ( bbox_to_anchor = ( 0. , - .16 , 1. , .102 ) , loc = 1 , ncol = 2 , mode = "expand" , borderaxespad = 0. ) fig . tight_layout ( ) plt . savefig ( filename , dpi = 120 , bbox_inches = "tight" ) plt . close ( 'all' )
9,816
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/plot.py#L27-L129
[ "def", "_create_peephole_variables", "(", "self", ",", "dtype", ")", ":", "self", ".", "_w_f_diag", "=", "tf", ".", "get_variable", "(", "self", ".", "W_F_DIAG", ",", "shape", "=", "[", "self", ".", "_hidden_size", "]", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "self", ".", "W_F_DIAG", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "self", ".", "W_F_DIAG", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "self", ".", "W_F_DIAG", ")", ")", "self", ".", "_w_i_diag", "=", "tf", ".", "get_variable", "(", "self", ".", "W_I_DIAG", ",", "shape", "=", "[", "self", ".", "_hidden_size", "]", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "self", ".", "W_I_DIAG", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "self", ".", "W_I_DIAG", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "self", ".", "W_I_DIAG", ")", ")", "self", ".", "_w_o_diag", "=", "tf", ".", "get_variable", "(", "self", ".", "W_O_DIAG", ",", "shape", "=", "[", "self", ".", "_hidden_size", "]", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "self", ".", "W_O_DIAG", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "self", ".", "W_O_DIAG", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "self", ".", "W_O_DIAG", ")", ")" ]
Create a mollweide projection of a DOM with given PMTs .
def make_dom_map ( pmt_directions , values , nside = 512 , d = 0.2 , smoothing = 0.1 ) : import healpy as hp discs = [ hp . query_disc ( nside , dir , 0.2 ) for dir in pmt_directions ] npix = hp . nside2npix ( nside ) pixels = np . zeros ( npix ) for disc , value in zip ( discs , values ) : for d in disc : pixels [ d ] = value if smoothing > 0 : return hp . sphtfunc . smoothing ( pixels , fwhm = smoothing , iter = 1 ) return pixels
9,817
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/plot.py#L132-L146
[ "def", "split", "(", "self", ",", "verbose", "=", "None", ",", "end_in_new_line", "=", "None", ")", ":", "elapsed_time", "=", "self", ".", "get_elapsed_time", "(", ")", "self", ".", "split_elapsed_time", ".", "append", "(", "elapsed_time", ")", "self", ".", "_cumulative_elapsed_time", "+=", "elapsed_time", "self", ".", "_elapsed_time", "=", "datetime", ".", "timedelta", "(", ")", "if", "verbose", "is", "None", ":", "verbose", "=", "self", ".", "verbose_end", "if", "verbose", ":", "if", "end_in_new_line", "is", "None", ":", "end_in_new_line", "=", "self", ".", "end_in_new_line", "if", "end_in_new_line", ":", "self", ".", "log", "(", "\"{} done in {}\"", ".", "format", "(", "self", ".", "description", ",", "elapsed_time", ")", ")", "else", ":", "self", ".", "log", "(", "\" done in {}\"", ".", "format", "(", "elapsed_time", ")", ")", "self", ".", "_start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Set the calculated intervals This will be written to the stream_status collection if it s in the database channel
def calculated_intervals ( self , value ) : if not value : self . _calculated_intervals = TimeIntervals ( ) return if isinstance ( value , TimeInterval ) : value = TimeIntervals ( [ value ] ) elif isinstance ( value , TimeIntervals ) : pass elif isinstance ( value , list ) : value = TimeIntervals ( value ) else : raise TypeError ( "Expected list/TimeInterval/TimeIntervals, got {}" . format ( type ( value ) ) ) for interval in value : if interval . end > utcnow ( ) : raise ValueError ( "Calculated intervals should not be in the future" ) self . _calculated_intervals = value
9,818
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L115-L140
[ "def", "_read_vmx_file", "(", "self", ")", ":", "try", ":", "self", ".", "_vmx_pairs", "=", "self", ".", "manager", ".", "parse_vmware_file", "(", "self", ".", "_vmx_path", ")", "except", "OSError", "as", "e", ":", "raise", "VMwareError", "(", "'Could not read VMware VMX file \"{}\": {}'", ".", "format", "(", "self", ".", "_vmx_path", ",", "e", ")", ")" ]
Purge the stream . This removes all data and clears the calculated intervals
def purge ( self ) : self . channel . purge_stream ( self . stream_id , remove_definition = False , sandbox = None )
9,819
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L142-L148
[ "def", "update_group_alias", "(", "self", ",", "entity_id", ",", "name", ",", "mount_accessor", "=", "\"\"", ",", "canonical_id", "=", "\"\"", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'mount_accessor'", ":", "mount_accessor", ",", "'canonical_id'", ":", "canonical_id", ",", "}", "api_path", "=", "'/v1/{mount_point}/group-alias/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Gets a view on this stream for the time interval given
def window ( self , time_interval = None , force_calculation = False ) : if not time_interval : if self . calculated_intervals : time_interval = self . calculated_intervals [ - 1 ] else : raise ValueError ( "No calculations have been performed and no time interval was provided" ) elif isinstance ( time_interval , TimeInterval ) : time_interval = TimeInterval ( time_interval . start , time_interval . end ) elif isinstance ( time_interval , Iterable ) : time_interval = parse_time_tuple ( * time_interval ) if isinstance ( time_interval , RelativeTimeInterval ) : raise NotImplementedError elif isinstance ( time_interval , RelativeTimeInterval ) : raise NotImplementedError else : raise TypeError ( "Expected TimeInterval or (start, end) tuple of type str or datetime, got {}" . format ( type ( time_interval ) ) ) return StreamView ( stream = self , time_interval = time_interval , force_calculation = force_calculation )
9,820
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L154-L180
[ "def", "make_choice_validator", "(", "choices", ",", "default_key", "=", "None", ",", "normalizer", "=", "None", ")", ":", "def", "normalize_all", "(", "_choices", ")", ":", "# normalize all the keys for easier comparison", "if", "normalizer", ":", "_choices", "=", "[", "(", "normalizer", "(", "key", ")", ",", "value", ")", "for", "key", ",", "value", "in", "choices", "]", "return", "_choices", "choices", "=", "normalize_all", "(", "choices", ")", "def", "choice_validator", "(", "value", ")", ":", "if", "normalizer", ":", "value", "=", "normalizer", "(", "value", ")", "if", "not", "value", "and", "default_key", ":", "value", "=", "choices", "[", "default_key", "]", "[", "0", "]", "results", "=", "[", "]", "for", "choice", ",", "mapped", "in", "choices", ":", "if", "value", "==", "choice", ":", "return", "mapped", "if", "choice", ".", "startswith", "(", "value", ")", ":", "results", ".", "append", "(", "(", "choice", ",", "mapped", ")", ")", "if", "len", "(", "results", ")", "==", "1", ":", "return", "results", "[", "0", "]", "[", "1", "]", "elif", "not", "results", ":", "raise", "ValueError", "(", "'Invalid choice.'", ")", "else", ":", "raise", "ValueError", "(", "'Choice ambiguous between (%s)'", "%", "', '", ".", "join", "(", "k", "for", "k", ",", "v", "in", "normalize_all", "(", "results", ")", ")", ")", "return", "choice_validator" ]
Load the stream definition from the database
def load ( self ) : with switch_db ( StreamDefinitionModel , 'hyperstream' ) : self . mongo_model = StreamDefinitionModel . objects . get ( __raw__ = self . stream_id . as_raw ( ) ) self . _calculated_intervals = self . mongo_model . get_calculated_intervals ( )
9,821
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L212-L220
[ "def", "verts_str", "(", "verts", ",", "pad", "=", "1", ")", ":", "if", "verts", "is", "None", ":", "return", "'None'", "fmtstr", "=", "', '", ".", "join", "(", "[", "'%'", "+", "six", ".", "text_type", "(", "pad", ")", "+", "'d'", "+", "', %'", "+", "six", ".", "text_type", "(", "pad", ")", "+", "'d'", "]", "*", "1", ")", "return", "', '", ".", "join", "(", "[", "'('", "+", "fmtstr", "%", "vert", "+", "')'", "for", "vert", "in", "verts", "]", ")" ]
Gets the calculated intervals from the database
def calculated_intervals ( self ) : if self . _calculated_intervals is None : logging . debug ( "get calculated intervals" ) self . load ( ) return self . mongo_model . get_calculated_intervals ( ) return self . _calculated_intervals
9,822
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L233-L243
[ "def", "hugoniot_t", "(", "rho", ",", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", ",", "n", ",", "mass", ",", "three_r", "=", "3.", "*", "constants", ".", "R", ",", "t_ref", "=", "300.", ",", "c_v", "=", "0.", ")", ":", "if", "isuncertainties", "(", "[", "rho", ",", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", "]", ")", ":", "f_v", "=", "np", ".", "vectorize", "(", "uct", ".", "wrap", "(", "hugoniot_t_single", ")", ",", "excluded", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "10", ",", "11", "]", ")", "else", ":", "f_v", "=", "np", ".", "vectorize", "(", "hugoniot_t_single", ",", "excluded", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "10", ",", "11", "]", ")", "return", "f_v", "(", "rho", ",", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", ",", "n", ",", "mass", ",", "three_r", "=", "three_r", ",", "t_ref", "=", "t_ref", ",", "c_v", "=", "c_v", ")" ]
A generic pump which utilises the appropriate pump .
def GenericPump ( filenames , use_jppy = False , name = "GenericPump" , * * kwargs ) : if isinstance ( filenames , str ) : filenames = [ filenames ] try : iter ( filenames ) except TypeError : log . critical ( "Don't know how to iterate through filenames." ) raise TypeError ( "Invalid filenames." ) extensions = set ( os . path . splitext ( fn ) [ 1 ] for fn in filenames ) if len ( extensions ) > 1 : log . critical ( "Mixed filetypes, please use only files of the same type" ) raise IOError ( "Mixed filetypes." ) extension = list ( extensions ) [ 0 ] io = { '.evt' : EvtPump , '.h5' : HDF5Pump , '.root' : EventPump if use_jppy else AanetPump , '.dat' : DAQPump , '.dqd' : CLBPump , } if extension not in io : log . critical ( "No pump found for file extension '{0}'" . format ( extension ) ) raise ValueError ( "Unknown filetype" ) missing_files = [ fn for fn in filenames if not os . path . exists ( fn ) ] if missing_files : if len ( missing_files ) == len ( filenames ) : message = "None of the given files could be found." log . critical ( message ) raise SystemExit ( message ) else : log . warning ( "The following files are missing and ignored: {}" . format ( ', ' . join ( missing_files ) ) ) input_files = set ( filenames ) - set ( missing_files ) if len ( input_files ) == 1 : return io [ extension ] ( filename = filenames [ 0 ] , name = name , * * kwargs ) else : return io [ extension ] ( filenames = filenames , name = name , * * kwargs )
9,823
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/__init__.py#L36-L87
[ "def", "validate_account_status", "(", "self", ",", "data", ")", ":", "deleted_status", "=", "'deleted'", "region_status", "=", "data", ".", "get", "(", "'status'", ")", "account_status", "=", "data", ".", "get", "(", "'account_status'", ")", "for", "region", "in", "region_status", ":", "if", "region", "[", "'status'", "]", "!=", "deleted_status", "and", "account_status", "==", "deleted_status", ":", "raise", "ValidationError", "(", "'Account Status cannot be \"deleted\" if a region is not \"deleted\"'", ")" ]
Retrive calibration from file the DB .
def read_calibration ( detx = None , det_id = None , from_file = False , det_id_table = None ) : from km3pipe . calib import Calibration # noqa if not ( detx or det_id or from_file ) : return None if detx is not None : return Calibration ( filename = detx ) if from_file : det_ids = np . unique ( det_id_table ) if len ( det_ids ) > 1 : log . critical ( "Multiple detector IDs found in events." ) det_id = det_ids [ 0 ] if det_id is not None : if det_id < 0 : log . warning ( "Negative detector ID found ({0}). This is a MC " "detector and cannot be retrieved from the DB." . format ( det_id ) ) return None return Calibration ( det_id = det_id ) return None
9,824
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/__init__.py#L90-L113
[ "def", "_put_bucket_policy", "(", "self", ")", ":", "if", "self", ".", "s3props", "[", "'bucket_policy'", "]", ":", "policy_str", "=", "json", ".", "dumps", "(", "self", ".", "s3props", "[", "'bucket_policy'", "]", ")", "_response", "=", "self", ".", "s3client", ".", "put_bucket_policy", "(", "Bucket", "=", "self", ".", "bucket", ",", "Policy", "=", "policy_str", ")", "else", ":", "_response", "=", "self", ".", "s3client", ".", "delete_bucket_policy", "(", "Bucket", "=", "self", ".", "bucket", ")", "LOG", ".", "debug", "(", "'Response adding bucket policy: %s'", ",", "_response", ")", "LOG", ".", "info", "(", "'S3 Bucket Policy Attached'", ")" ]
Edit a text using an external editor .
def edit ( self , text ) : if isinstance ( text , unicode ) : text = text . encode ( self . _encoding ) if self . _editor is None : printer . p ( 'Warning: no editor found, skipping edit' ) return text with tempfile . NamedTemporaryFile ( mode = 'w+' , suffix = 'kolekto-edit' ) as ftmp : ftmp . write ( text ) ftmp . flush ( ) subprocess . Popen ( [ self . _editor , ftmp . name ] ) . wait ( ) ftmp . seek ( 0 ) edited = ftmp . read ( ) return edited
9,825
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/printer.py#L276-L290
[ "def", "_build_prior", "(", "self", ",", "unconstrained_tensor", ",", "constrained_tensor", ")", ":", "if", "not", "misc", ".", "is_tensor", "(", "unconstrained_tensor", ")", ":", "raise", "GPflowError", "(", "\"Unconstrained input must be a tensor.\"", ")", "if", "not", "misc", ".", "is_tensor", "(", "constrained_tensor", ")", ":", "raise", "GPflowError", "(", "\"Constrained input must be a tensor.\"", ")", "prior_name", "=", "'prior'", "if", "self", ".", "prior", "is", "None", ":", "return", "tf", ".", "constant", "(", "0.0", ",", "settings", ".", "float_type", ",", "name", "=", "prior_name", ")", "log_jacobian", "=", "self", ".", "transform", ".", "log_jacobian_tensor", "(", "unconstrained_tensor", ")", "logp_var", "=", "self", ".", "prior", ".", "logp", "(", "constrained_tensor", ")", "return", "tf", ".", "squeeze", "(", "tf", ".", "add", "(", "logp_var", ",", "log_jacobian", ",", "name", "=", "prior_name", ")", ")" ]
Take a feather . plugin . Plugin and tell our dispatcher about it .
def register ( self , plugin ) : self . needed_listeners -= plugin . listeners self . needed_messengers -= plugin . messengers if self . needed_messengers == self . needed_listeners == set ( ) : self . valid = True self . dispatcher . register ( plugin )
9,826
https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/application.py#L29-L43
[ "def", "get_conflicts", "(", "self", ")", ":", "conflicts", "=", "[", "]", "if", "self", ".", "_array", "and", "self", ".", "_range", ":", "conflicts", ".", "append", "(", "'cannot use range expressions on arrays'", ")", "return", "conflicts" ]
If we have a set of plugins that provide our expected listeners and messengers tell our dispatcher to start up . Otherwise raise InvalidApplication
def start ( self ) : if not self . valid : err = ( "\nMessengers and listeners that still need set:\n\n" "messengers : %s\n\n" "listeners : %s\n" ) raise InvalidApplication ( err % ( self . needed_messengers , self . needed_listeners ) ) self . dispatcher . start ( )
9,827
https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/application.py#L45-L56
[ "def", "map_pixels_to_ascii_chars", "(", "image", ",", "range_width", "=", "25", ")", ":", "pixels_in_image", "=", "list", "(", "image", ".", "getdata", "(", ")", ")", "pixels_to_chars", "=", "[", "ASCII_CHARS", "[", "pixel_value", "/", "range_width", "]", "for", "pixel_value", "in", "pixels_in_image", "]", "return", "\"\"", ".", "join", "(", "pixels_to_chars", ")" ]
Get a rule instance for given operator and return condition lambda func
def execute_condition ( cond ) : condition_method = 'rulengine.conditions.c_{0}_{1}' . format ( cond . data_type , cond . operator ) try : func = import_class ( condition_method ) except AttributeError : condition_method = 'rulengine.conditions.c_{0}' . format ( cond . data_type ) func = import_class ( condition_method ) executable_cond = convert_condition_to_executable ( cond ) return func ( executable_cond )
9,828
https://github.com/baranbartu/rulengine/blob/f4d1e6258927cb171cb7fc8a90a3cba546a2aee5/rulengine/conditions.py#L5-L21
[ "def", "setOverlayTransformTrackedDeviceRelative", "(", "self", ",", "ulOverlayHandle", ",", "unTrackedDevice", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTransformTrackedDeviceRelative", "pmatTrackedDeviceToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "unTrackedDevice", ",", "byref", "(", "pmatTrackedDeviceToOverlayTransform", ")", ")", "return", "result", ",", "pmatTrackedDeviceToOverlayTransform" ]
Do database migrations 1 . Creates new tables from models 2 . Updates columns and columns
def makemigrations ( self ) : UNCHANGED = [ ] with Session ( self . settings ) as conn : cursor = conn . cursor ( ) for name , model in self . models . items ( ) : print ( "Running migrations... on table: %s" % model . __name__ . lower ( ) ) columns = self . description ( model ) table = name . lower ( ) QUERY = "CREATE TABLE IF NOT EXISTS %s (" % table for field , FieldType in model . columns . items ( ) : QUERY += "%s %s, " % ( field , FieldType ) # If no columns --> Table not created yet if columns : self . UpdateColums ( cursor , field , FieldType , model , columns , UNCHANGED ) QUERY = QUERY [ : - 2 ] + ") ENGINE=InnoDB" print ( QUERY ) try : cursor . execute ( QUERY ) except mysql . Error as e : raise e return True
9,829
https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/session.py#L307-L342
[ "def", "wait", "(", "self", ",", "sensor_name", ",", "condition_or_value", ",", "timeout", "=", "5", ")", ":", "sensor_name", "=", "escape_name", "(", "sensor_name", ")", "sensor", "=", "self", ".", "sensor", "[", "sensor_name", "]", "try", ":", "yield", "sensor", ".", "wait", "(", "condition_or_value", ",", "timeout", ")", "except", "tornado", ".", "gen", ".", "TimeoutError", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "False", ")", "else", ":", "raise", "tornado", ".", "gen", ".", "Return", "(", "True", ")" ]
Updates the columns . Dont call directly
def UpdateColums ( self , cursor , field , FieldType , model , columns , UNCHANGED ) : table = model . __name__ . lower ( ) if field not in columns : n = UNCHANGED . pop ( ) new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}" cursor . execute ( new_sql ) print ( "\n\n" , new_sql ) else : UNCHANGED . append ( field ) # We drop the fields in the table not in models TCOLS = set ( columns ) - set ( model . _fields ) for col in TCOLS : columns . remove ( col ) QRY = f"ALTER TABLE {table} DROP COLUMN {col}" cursor . execute ( QRY ) print ( "\n\n" , QRY ) return True
9,830
https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/session.py#L344-L364
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
Serve event to RainbowAlga
def srv_event ( token , hits , url = RBA_URL ) : if url is None : log . error ( "Please provide a valid RainbowAlga URL." ) return ws_url = url + '/message' if isinstance ( hits , pd . core . frame . DataFrame ) : pos = [ tuple ( x ) for x in hits [ [ 'x' , 'y' , 'z' ] ] . values ] time = list ( hits [ 'time' ] ) tot = list ( hits [ 'tot' ] ) elif isinstance ( hits , Table ) : pos = list ( zip ( hits . pos_x , hits . pos_y , hits . pos_z ) ) time = list ( hits . time ) tot = list ( hits . tot ) else : log . error ( "No calibration information found in hits (type: {0})" . format ( type ( hits ) ) ) return event = { "hits" : { 'pos' : pos , 'time' : time , 'tot' : tot , } } srv_data ( ws_url , token , event , 'event' )
9,831
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L289-L322
[ "def", "_intersection", "(", "self", ",", "keys", ",", "rows", ")", ":", "# If there are no other keys with start and end date (i.e. nothing to merge) return immediately.", "if", "not", "keys", ":", "return", "rows", "ret", "=", "list", "(", ")", "for", "row", "in", "rows", ":", "start_date", "=", "row", "[", "self", ".", "_key_start_date", "]", "end_date", "=", "row", "[", "self", ".", "_key_end_date", "]", "for", "key_start_date", ",", "key_end_date", "in", "keys", ":", "start_date", ",", "end_date", "=", "Type2JoinHelper", ".", "_intersect", "(", "start_date", ",", "end_date", ",", "row", "[", "key_start_date", "]", ",", "row", "[", "key_end_date", "]", ")", "if", "not", "start_date", ":", "break", "if", "key_start_date", "not", "in", "[", "self", ".", "_key_start_date", ",", "self", ".", "_key_end_date", "]", ":", "del", "row", "[", "key_start_date", "]", "if", "key_end_date", "not", "in", "[", "self", ".", "_key_start_date", ",", "self", ".", "_key_end_date", "]", ":", "del", "row", "[", "key_end_date", "]", "if", "start_date", ":", "row", "[", "self", ".", "_key_start_date", "]", "=", "start_date", "row", "[", "self", ".", "_key_end_date", "]", "=", "end_date", "ret", ".", "append", "(", "row", ")", "return", "ret" ]
Serve data to RainbowAlga
def srv_data ( url , token , data , kind ) : ws = websocket . create_connection ( url ) message = { 'token' : token , 'data' : data , 'kind' : kind } ws . send ( pd . io . json . dumps ( message ) ) ws . close ( )
9,832
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L325-L330
[ "def", "block", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'/users/%s/block'", "%", "self", ".", "id", "server_data", "=", "self", ".", "manager", ".", "gitlab", ".", "http_post", "(", "path", ",", "*", "*", "kwargs", ")", "if", "server_data", "is", "True", ":", "self", ".", "_attrs", "[", "'state'", "]", "=", "'blocked'", "return", "server_data" ]
Convert message to JSON and send it to the client with token
def raw_message_to ( self , token , message ) : if token not in self . _clients : log . critical ( "Client with token '{0}' not found!" . format ( token ) ) return client = self . _clients [ token ] try : client . write_message ( message ) except ( AttributeError , tornado . websocket . WebSocketClosedError ) : log . error ( "Lost connection to client '{0}'" . format ( client ) ) else : print ( "Sent {0} bytes." . format ( len ( message ) ) )
9,833
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L114-L125
[ "def", "remove_reaction", "(", "self", ",", "reaction", ")", ":", "if", "reaction", "not", "in", "self", ".", "_reaction_set", ":", "return", "self", ".", "_reaction_set", ".", "remove", "(", "reaction", ")", "self", ".", "_limits_lower", ".", "pop", "(", "reaction", ",", "None", ")", "self", ".", "_limits_upper", ".", "pop", "(", "reaction", ",", "None", ")", "# Remove compound from compound_set if it is not referenced", "# by any other reactions in the model.", "for", "compound", ",", "value", "in", "self", ".", "_database", ".", "get_reaction_values", "(", "reaction", ")", ":", "reactions", "=", "frozenset", "(", "self", ".", "_database", ".", "get_compound_reactions", "(", "compound", ")", ")", "if", "all", "(", "other_reaction", "not", "in", "self", ".", "_reaction_set", "for", "other_reaction", "in", "reactions", ")", ":", "self", ".", "_compound_set", ".", "remove", "(", "compound", ")" ]
Convert message to json and send it to the clients
def message ( self , data , kind = "info" ) : message = pd . io . json . dumps ( { 'kind' : kind , 'data' : data } ) print ( "Sent {0} bytes." . format ( len ( message ) ) ) self . write_message ( message )
9,834
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L282-L286
[ "def", "setOverlayTexelAspect", "(", "self", ",", "ulOverlayHandle", ",", "fTexelAspect", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTexelAspect", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fTexelAspect", ")", "return", "result" ]
Execute only one rule .
def execute_once ( self , string ) : for rule in self . rules : if rule [ 0 ] in string : pos = string . find ( rule [ 0 ] ) self . last_rule = rule return string [ : pos ] + rule [ 1 ] + string [ pos + len ( rule [ 0 ] ) : ] self . last_rule = None return string
9,835
https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/markov.py#L51-L59
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Return python code for create and execute algo .
def compile ( self ) : result = TEMPLATE for rule in self . rules : if rule [ 2 ] : arrow = '=>' else : arrow = '->' repr_rule = repr ( rule [ 0 ] + arrow + rule [ 1 ] ) result += "algo.add_rule({repr_rule})\n" . format ( repr_rule = repr_rule ) result += "for line in stdin:\n" result += " print(algo.execute(''.join(line.split())))" return result
9,836
https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/markov.py#L76-L91
[ "def", "cancelHistoricalData", "(", "self", ",", "bars", ":", "BarDataList", ")", ":", "self", ".", "client", ".", "cancelHistoricalData", "(", "bars", ".", "reqId", ")", "self", ".", "wrapper", ".", "endSubscription", "(", "bars", ")" ]
Gets the source streams for a given plate value on a plate . Also populates with source streams that are valid for the parent plates of this plate with the appropriate meta - data for the parent plate .
def get_sources ( self , plate , plate_value , sources = None ) : if sources is None : sources = [ ] if self . sources : for si , source in enumerate ( self . sources ) : if len ( source . streams ) == 1 and None in source . streams : sources . append ( source . streams [ None ] ) elif plate_value in source . streams : sources . append ( source . streams [ plate_value ] ) else : # # TODO - determine whether this should raise an exception or not, or even log a warning # logging.warn("{} with value {} not valid for source {}" # .format(plate, plate_value, source)) pass if not plate . is_root : # Populate with sources defined on parent plate parent_plate_value = tuple ( pv for pv in plate_value if pv [ 0 ] != plate . meta_data_id ) sources = self . get_sources ( plate . parent , parent_plate_value , sources ) # sources.extend(self.get_global_sources()) return sources
9,837
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L204-L240
[ "def", "similarity", "(", "self", ",", "other", ":", "'Trigram'", ")", "->", "float", ":", "if", "not", "len", "(", "self", ".", "_trigrams", ")", "or", "not", "len", "(", "other", ".", "_trigrams", ")", ":", "return", "0", "count", "=", "float", "(", "len", "(", "self", ".", "_trigrams", "&", "other", ".", "_trigrams", ")", ")", "len1", "=", "float", "(", "len", "(", "self", ".", "_trigrams", ")", ")", "len2", "=", "float", "(", "len", "(", "other", ".", "_trigrams", ")", ")", "return", "count", "/", "(", "len1", "+", "len2", "-", "count", ")" ]
Gets streams that live outside of the plates
def get_global_sources ( self ) : sources = [ ] if self . sources : for source in self . sources : if None in source . streams : sources . append ( source . streams [ None ] ) return sources
9,838
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L242-L253
[ "def", "main", "(", "arguments", "=", "None", ",", "config", "=", "None", ")", ":", "# Parse options, initialize gathered stats", "options", "=", "LoggOptions", "(", "arguments", "=", "arguments", ")", ".", "parse", "(", ")", "# FIXME: pass in only config; set config.journal = options.journal", "if", "not", "config", ":", "config", "=", "options", ".", "config_file", "logg", "=", "Logg", "(", "config", ",", "options", ".", "journal", ")", "return", "logg", ".", "logg_record", "(", "options", ".", "logg", ",", "options", ".", "date", ")" ]
Gets the alignment stream for a particular plate value
def get_alignment_stream ( self , plate = None , plate_value = None ) : if not self . alignment_node : return None if plate is not None or plate_value is not None : # TODO: Need to implement alignment nodes that live inside plates raise NotImplementedError ( "Currently only alignment nodes outside of plates are supported" ) return self . alignment_node . streams [ plate ]
9,839
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L255-L268
[ "def", "fix_spelling", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "WORDS", ")", ":", "self", ".", "tokenize_words", "(", ")", "text", "=", "self", ".", "text", "fixed", "=", "vabamorf", ".", "fix_spelling", "(", "self", ".", "word_texts", ",", "join", "=", "False", ")", "spans", "=", "self", ".", "word_spans", "assert", "len", "(", "fixed", ")", "==", "len", "(", "spans", ")", "if", "len", "(", "spans", ")", ">", "0", ":", "newtoks", "=", "[", "]", "lastend", "=", "0", "for", "fix", ",", "(", "start", ",", "end", ")", "in", "zip", "(", "fixed", ",", "spans", ")", ":", "newtoks", ".", "append", "(", "text", "[", "lastend", ":", "start", "]", ")", "newtoks", ".", "append", "(", "fix", ")", "lastend", "=", "end", "newtoks", ".", "append", "(", "text", "[", "lastend", ":", "]", ")", "return", "Text", "(", "''", ".", "join", "(", "newtoks", ")", ",", "*", "*", "self", ".", "__kwargs", ")", "return", "self" ]
Get the splitting stream
def get_splitting_stream ( self , input_plate_value ) : if not self . splitting_node : return None if len ( self . splitting_node . plates ) == 0 : # Use global plate value return self . splitting_node . streams [ None ] if len ( self . splitting_node . plates ) > 1 : raise ValueError ( "Splitting node cannot live on multiple plates for factor {}" . format ( self . factor_id ) ) # now len(self.splitting_node.plates) == 1: if not self . input_plate and len ( self . splitting_node . plates ) > 0 : raise ValueError ( "Splitting node cannot live on a plate if there is no input plate" ) splitting_plate = self . splitting_node . plates [ 0 ] if self . input_plate == splitting_plate : # Use matching plate value splitting_stream = self . splitting_node . streams [ input_plate_value ] else : # First check if it's a direct child if splitting_plate . is_child ( self . input_plate ) : ppv = filter ( lambda x : all ( p in input_plate_value for p in x ) , self . input_plate . parent . values ) if len ( ppv ) != 1 : raise ValueError ( "Parent plate value not found" ) splitting_stream = self . splitting_node . streams [ ppv [ 0 ] ] # Then more generally if it's a descendant elif splitting_plate . is_descendant ( self . input_plate ) : # Here we need to find the splitting plate value that is valid for the # current input plate value # TODO: This needs checking - is the logic still the same as for the case above? ppv = filter ( lambda x : all ( p in input_plate_value for p in x ) , self . input_plate . parent . values ) if len ( ppv ) != 1 : raise ValueError ( "Parent plate value not found" ) splitting_stream = self . splitting_node . streams [ ppv ] else : raise IncompatiblePlatesError ( "Splitting node plate {} does not match input plate {} for factor {}" . format ( self . input_plate , self . splitting_node . plates [ 0 ] , self . factor_id ) ) return splitting_stream
9,840
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L390-L436
[ "def", "paintGL", "(", "self", ")", ":", "if", "self", ".", "post_processing", ":", "# Render to the first framebuffer", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "self", ".", "fb0", ")", "glViewport", "(", "0", ",", "0", ",", "self", ".", "width", "(", ")", ",", "self", ".", "height", "(", ")", ")", "status", "=", "glCheckFramebufferStatus", "(", "GL_FRAMEBUFFER", ")", "if", "(", "status", "!=", "GL_FRAMEBUFFER_COMPLETE", ")", ":", "reason", "=", "dict", "(", "GL_FRAMEBUFFER_UNDEFINED", "=", "'UNDEFINED'", ",", "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT", "=", "'INCOMPLETE_ATTACHMENT'", ",", "GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT", "=", "'INCOMPLETE_MISSING_ATTACHMENT'", ",", "GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER", "=", "'INCOMPLETE_DRAW_BUFFER'", ",", "GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER", "=", "'INCOMPLETE_READ_BUFFER'", ",", "GL_FRAMEBUFFER_UNSUPPORTED", "=", "'UNSUPPORTED'", ",", ")", "[", "status", "]", "raise", "Exception", "(", "'Framebuffer is not complete: {}'", ".", "format", "(", "reason", ")", ")", "else", ":", "glBindFramebuffer", "(", "GL_FRAMEBUFFER", ",", "DEFAULT_FRAMEBUFFER", ")", "# Clear color take floats", "bg_r", ",", "bg_g", ",", "bg_b", ",", "bg_a", "=", "self", ".", "background_color", "glClearColor", "(", "bg_r", "/", "255", ",", "bg_g", "/", "255", ",", "bg_b", "/", "255", ",", "bg_a", "/", "255", ")", "glClear", "(", "GL_COLOR_BUFFER_BIT", "|", "GL_DEPTH_BUFFER_BIT", ")", "proj", "=", "self", ".", "camera", ".", "projection", "cam", "=", "self", ".", "camera", ".", "matrix", "self", ".", "mvproj", "=", "np", ".", "dot", "(", "proj", ",", "cam", ")", "self", ".", "ldir", "=", "cam", "[", ":", "3", ",", ":", "3", "]", ".", "T", ".", "dot", "(", "self", ".", "light_dir", ")", "# Draw World", "self", ".", "on_draw_world", "(", ")", "# Iterate over all of the post processing effects", "if", "self", ".", "post_processing", ":", "if", "len", "(", "self", ".", "post_processing", ")", ">", "1", ":", "newarg", "=", "self", ".", "textures", ".", "copy", "(", ")", "# Ping-pong framebuffer rendering", "for", "i", ",", "pp", "in", "enumerate", "(", "self", ".", "post_processing", "[", ":", "-", "1", "]", ")", ":", "if", "i", "%", "2", ":", "outfb", "=", "self", ".", "fb1", "outtex", "=", "self", ".", "_extra_textures", "[", "'fb1'", "]", "else", ":", "outfb", "=", "self", ".", "fb2", "outtex", "=", "self", ".", "_extra_textures", "[", "'fb2'", "]", "pp", ".", "render", "(", "outfb", ",", "newarg", ")", "newarg", "[", "'color'", "]", "=", "outtex", "self", ".", "post_processing", "[", "-", "1", "]", ".", "render", "(", "DEFAULT_FRAMEBUFFER", ",", "newarg", ")", "else", ":", "self", ".", "post_processing", "[", "0", "]", ".", "render", "(", "DEFAULT_FRAMEBUFFER", ",", "self", ".", "textures", ")", "# Draw the UI at the very last step", "self", ".", "on_draw_ui", "(", ")" ]
Update computed intervals
def update_computed_intervals ( sinks , time_interval ) : for sink in sinks : sink . calculated_intervals += time_interval required_intervals = TimeIntervals ( [ time_interval ] ) - sink . calculated_intervals if not required_intervals . is_empty : raise RuntimeError ( 'Tool execution did not cover the time interval {}' . format ( required_intervals ) )
9,841
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L439-L451
[ "def", "vn_release", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The vn_reserve function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "vn_id", "=", "kwargs", ".", "get", "(", "'vn_id'", ",", "None", ")", "vn_name", "=", "kwargs", ".", "get", "(", "'vn_name'", ",", "None", ")", "path", "=", "kwargs", ".", "get", "(", "'path'", ",", "None", ")", "data", "=", "kwargs", ".", "get", "(", "'data'", ",", "None", ")", "if", "vn_id", ":", "if", "vn_name", ":", "log", ".", "warning", "(", "'Both the \\'vn_id\\' and \\'vn_name\\' arguments were provided. '", "'\\'vn_id\\' will take precedence.'", ")", "elif", "vn_name", ":", "vn_id", "=", "get_vn_id", "(", "kwargs", "=", "{", "'name'", ":", "vn_name", "}", ")", "else", ":", "raise", "SaltCloudSystemExit", "(", "'The vn_release function requires a \\'vn_id\\' or a \\'vn_name\\' to '", "'be provided.'", ")", "if", "data", ":", "if", "path", ":", "log", ".", "warning", "(", "'Both the \\'data\\' and \\'path\\' arguments were provided. '", "'\\'data\\' will take precedence.'", ")", "elif", "path", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "mode", "=", "'r'", ")", "as", "rfh", ":", "data", "=", "rfh", ".", "read", "(", ")", "else", ":", "raise", "SaltCloudSystemExit", "(", "'The vn_release function requires either \\'data\\' or a \\'path\\' to '", "'be provided.'", ")", "server", ",", "user", ",", "password", "=", "_get_xml_rpc", "(", ")", "auth", "=", "':'", ".", "join", "(", "[", "user", ",", "password", "]", ")", "response", "=", "server", ".", "one", ".", "vn", ".", "release", "(", "auth", ",", "int", "(", "vn_id", ")", ",", "data", ")", "ret", "=", "{", "'action'", ":", "'vn.release'", ",", "'released'", ":", "response", "[", "0", "]", ",", "'resource_id'", ":", "response", "[", "1", "]", ",", "'error_code'", ":", "response", "[", "2", "]", ",", "}", "return", "ret" ]
Download bibtex format and parse it from EvoBib
def getEvoBibAsBibtex ( * keys , * * kw ) : res = [ ] for key in keys : bib = get_url ( "http://bibliography.lingpy.org/raw.php?key=" + key , log = kw . get ( 'log' ) ) . text try : res . append ( '@' + bib . split ( '@' ) [ 1 ] . split ( '</pre>' ) [ 0 ] ) except IndexError : # pragma: no cover res . append ( '@misc{' + key + ',\nNote={missing source}\n\n}' ) return '\n\n' . join ( res )
9,842
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/util.py#L233-L244
[ "def", "get_config_object", "(", ")", ":", "global", "_DEFAULT_CONFIG_WRAPPER", "if", "_DEFAULT_CONFIG_WRAPPER", "is", "not", "None", ":", "return", "_DEFAULT_CONFIG_WRAPPER", "with", "_DEFAULT_CONFIG_WRAPPER_LOCK", ":", "if", "_DEFAULT_CONFIG_WRAPPER", "is", "not", "None", ":", "return", "_DEFAULT_CONFIG_WRAPPER", "_DEFAULT_CONFIG_WRAPPER", "=", "ConfigWrapper", "(", ")", "return", "_DEFAULT_CONFIG_WRAPPER" ]
Download a zipfile and immediately unpack selected content .
def download_and_unpack ( self , url , * paths , * * kw ) : with self . temp_download ( url , 'ds.zip' , log = kw . pop ( 'log' , None ) ) as zipp : with TemporaryDirectory ( ) as tmpdir : with zipfile . ZipFile ( zipp . as_posix ( ) ) as zipf : for path in paths : zipf . extract ( as_posix ( path ) , path = tmpdir . as_posix ( ) ) copy ( tmpdir . joinpath ( path ) , self )
9,843
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/util.py#L216-L230
[ "def", "_get_site_amplification_term", "(", "self", ",", "C", ",", "vs30", ")", ":", "s_b", ",", "s_c", ",", "s_d", "=", "self", ".", "_get_site_dummy_variables", "(", "vs30", ")", "return", "(", "C", "[", "\"sB\"", "]", "*", "s_b", ")", "+", "(", "C", "[", "\"sC\"", "]", "*", "s_c", ")", "+", "(", "C", "[", "\"sD\"", "]", "*", "s_d", ")" ]
Returns a list of lists that orders all candidates in tiers from best to worst when we use MCMC approximation to compute Bayesian utilities for an election profile .
def getRanking ( self , profile , sampleFileName = None ) : if sampleFileName != None : candScoresMap = self . getCandScoresMapFromSamplesFile ( profile , sampleFileName ) else : candScoresMap = self . getCandScoresMap ( profile ) # We generate a map that associates each score with the candidates that have that acore. reverseCandScoresMap = dict ( ) for key , value in candScoresMap . items ( ) : if value not in reverseCandScoresMap . keys ( ) : reverseCandScoresMap [ value ] = [ key ] else : reverseCandScoresMap [ value ] . append ( key ) # We sort the scores by either decreasing order or increasing order. if self . maximizeCandScore == True : sortedCandScores = sorted ( reverseCandScoresMap . keys ( ) , reverse = True ) else : sortedCandScores = sorted ( reverseCandScoresMap . keys ( ) ) # We put the candidates into our ranking based on the order in which their score appears ranking = [ ] for candScore in sortedCandScores : for cand in reverseCandScoresMap [ candScore ] : ranking . append ( cand ) return ranking
9,844
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L48-L84
[ "def", "replaceData", "(", "self", ",", "offset", ":", "int", ",", "count", ":", "int", ",", "string", ":", "str", ")", "->", "None", ":", "self", ".", "_replace_data", "(", "offset", ",", "count", ",", "string", ")" ]
Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from our sampling of the profile .
def getCandScoresMap ( self , profile ) : wmg = profile . getWmg ( True ) V = self . getInitialSample ( wmg ) utilities = dict ( ) for cand in profile . candMap . keys ( ) : utilities [ cand ] = 0.0 for i in range ( 0 , self . burnIn ) : V = self . sampleGenerator . getNextSample ( V ) for i in range ( 0 , self . n2 ) : for j in range ( 0 , self . n1 ) : V = self . sampleGenerator . getNextSample ( V ) for cand in profile . candMap . keys ( ) : utilities [ cand ] += self . utilityFunction . getUtility ( [ cand ] , V ) for cand in profile . candMap . keys ( ) : utilities [ cand ] = utilities [ cand ] / self . n2 return utilities
9,845
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L86-L113
[ "def", "is_readable_dir", "(", "path", ")", ":", "return", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "os", ".", "access", "(", "path", ",", "os", ".", "R_OK", ")", "and", "os", ".", "access", "(", "path", ",", "os", ".", "X_OK", ")" ]
Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from the samples we generated into a file .
def getCandScoresMapFromSamplesFile ( self , profile , sampleFileName ) : wmg = profile . getWmg ( True ) # Initialize our list of expected utilities. utilities = dict ( ) for cand in wmg . keys ( ) : utilities [ cand ] = 0.0 # Open the file and skip the lines of meta data in the file and skip samples for burn-in. sampleFile = open ( sampleFileName ) for i in range ( 0 , SAMPLESFILEMETADATALINECOUNT ) : sampleFile . readline ( ) for i in range ( 0 , self . burnIn ) : sampleFile . readline ( ) # We update our utilities as we read the file. numSamples = 0 for i in range ( 0 , self . n2 * self . n1 ) : line = sampleFile . readline ( ) if i % self . n1 != 0 : continue sample = json . loads ( line ) for cand in wmg . keys ( ) : utilities [ cand ] += self . utilityFunction . getUtility ( [ cand ] , sample ) numSamples += 1 sampleFile . close ( ) for key in utilities . keys ( ) : utilities [ key ] = utilities [ key ] / numSamples return utilities
9,846
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L176-L212
[ "def", "validate", "(", "self", ",", "data", ")", ":", "for", "v", "in", "self", ".", "validators", ":", "v", "(", "self", ",", "data", ")", "return", "data" ]
Generate samples to a file .
def printMcmcSamplesToFile ( self , profile , numSamples , outFileName ) : wmg = profile . getWmg ( True ) V = self . getInitialSample ( wmg ) # Print the number of candidates, phi, and the number of samples. outFile = open ( outFileName , 'w' ) outFile . write ( "m," + str ( profile . numCands ) + '\n' ) outFile . write ( "phi," + str ( self . phi ) + '\n' ) outFile . write ( "numSamples," + str ( numSamples ) ) for i in range ( 0 , numSamples ) : V = self . sampleGenerator . getNextSample ( V ) outFile . write ( "\n" + json . dumps ( V ) ) outFile . close ( )
9,847
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L214-L235
[ "def", "AND", "(", "queryArr", ",", "exclude", "=", "None", ")", ":", "assert", "isinstance", "(", "queryArr", ",", "list", ")", ",", "\"provided argument as not a list\"", "assert", "len", "(", "queryArr", ")", ">", "0", ",", "\"queryArr had an empty list\"", "q", "=", "CombinedQuery", "(", ")", "q", ".", "setQueryParam", "(", "\"$and\"", ",", "[", "]", ")", "for", "item", "in", "queryArr", ":", "assert", "isinstance", "(", "item", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"item in the list was not a CombinedQuery or BaseQuery instance\"", "q", ".", "getQuery", "(", ")", "[", "\"$and\"", "]", ".", "append", "(", "item", ".", "getQuery", "(", ")", ")", "if", "exclude", "!=", "None", ":", "assert", "isinstance", "(", "exclude", ",", "(", "CombinedQuery", ",", "BaseQuery", ")", ")", ",", "\"exclude parameter was not a CombinedQuery or BaseQuery instance\"", "q", ".", "setQueryParam", "(", "\"$not\"", ",", "exclude", ".", "getQuery", "(", ")", ")", "return", "q" ]
Given a ranking for a single vote and a wmg for the entire election calculate the kendall - tau distance . a . k . a the number of discordant pairs between the wmg for the vote and the wmg for the election . Currently we expect the vote to be a strict complete ordering over the candidates .
def kendallTau ( self , orderVector , wmgMap ) : discordantPairs = 0.0 for i in itertools . combinations ( orderVector , 2 ) : discordantPairs = discordantPairs + max ( 0 , wmgMap [ i [ 1 ] ] [ i [ 0 ] ] ) return discordantPairs
9,848
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L253-L270
[ "def", "render_filter", "(", "self", ",", "next_filter", ")", ":", "next", "(", "next_filter", ")", "while", "True", ":", "data", "=", "(", "yield", ")", "res", "=", "[", "self", ".", "cell_format", "(", "access", "(", "data", ")", ")", "for", "access", "in", "self", ".", "accessors", "]", "next_filter", ".", "send", "(", "res", ")" ]
Generate an initial sample for the Markov chain . This function will return a list containing integer representations of each candidate in order of their rank in the current vote from first to last . The list will be a complete strict ordering over the candidates . Initially we rank the candidates in random order .
def getInitialSample ( self , wmg ) : V = copy . deepcopy ( wmg . keys ( ) ) random . shuffle ( V ) return V
9,849
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L272-L287
[ "def", "RecurseKeys", "(", "self", ")", ":", "yield", "self", "for", "subkey", "in", "self", ".", "GetSubkeys", "(", ")", ":", "for", "key", "in", "subkey", ".", "RecurseKeys", "(", ")", ":", "yield", "key" ]
Generate an initial sample for the Markov chain . This function will return a two - dimensional array of integers such that for each pair of candidates cand1 and cand2 the array contains 1 if more votes rank cand1 above cand2 and 0 otherwise .
def getInitialSample ( self , wmg ) : cands = range ( len ( wmg ) ) allPairs = itertools . combinations ( cands , 2 ) V = self . createBinaryRelation ( len ( cands ) ) for pair in allPairs : if wmg [ pair [ 0 ] + 1 ] [ pair [ 1 ] + 1 ] > 0 : V [ pair [ 0 ] ] [ pair [ 1 ] ] = 1 V [ pair [ 1 ] ] [ pair [ 0 ] ] = 0 else : V [ pair [ 0 ] ] [ pair [ 1 ] ] = 0 V [ pair [ 1 ] ] [ pair [ 0 ] ] = 1 return V
9,850
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L341-L363
[ "def", "on_exception", "(", "self", ",", "exception", ")", ":", "logger", ".", "error", "(", "'Exception from stream!'", ",", "exc_info", "=", "True", ")", "self", ".", "streaming_exception", "=", "exception" ]
Adds fancy mouse wheel functionality and VI navigation to ListBox
def filter_input ( keys , raw ) : if len ( keys ) == 1 : if keys [ 0 ] in UI . keys [ 'up' ] : keys [ 0 ] = 'up' elif keys [ 0 ] in UI . keys [ 'down' ] : keys [ 0 ] = 'down' elif len ( keys [ 0 ] ) == 4 and keys [ 0 ] [ 0 ] == 'mouse press' : if keys [ 0 ] [ 1 ] == 4 : keys [ 0 ] = 'up' elif keys [ 0 ] [ 1 ] == 5 : keys [ 0 ] = 'down' return keys
9,851
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/pipeinspector/app.py#L39-L51
[ "def", "countryccys", "(", ")", ":", "global", "_country_ccys", "if", "not", "_country_ccys", ":", "v", "=", "{", "}", "_country_ccys", "=", "v", "ccys", "=", "currencydb", "(", ")", "for", "c", "in", "eurozone", ":", "v", "[", "c", "]", "=", "'EUR'", "for", "c", "in", "ccys", ".", "values", "(", ")", ":", "if", "c", ".", "default_country", ":", "v", "[", "c", ".", "default_country", "]", "=", "c", ".", "code", "return", "_country_ccys" ]
Turn a wordlist into a cognate set list using the cldf parameters .
def wordlist2cognates ( wordlist , source , expert = 'expert' , ref = 'cogid' ) : for k in wordlist : yield dict ( Form_ID = wordlist [ k , 'lid' ] , ID = k , Form = wordlist [ k , 'ipa' ] , Cognateset_ID = '{0}-{1}' . format ( slug ( wordlist [ k , 'concept' ] ) , wordlist [ k , ref ] ) , Cognate_Detection_Method = expert , Source = source )
9,852
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L9-L19
[ "def", "GetExtractionStatusUpdateCallback", "(", "self", ")", ":", "if", "self", ".", "_mode", "==", "self", ".", "MODE_LINEAR", ":", "return", "self", ".", "_PrintExtractionStatusUpdateLinear", "if", "self", ".", "_mode", "==", "self", ".", "MODE_WINDOW", ":", "return", "self", ".", "_PrintExtractionStatusUpdateWindow", "return", "None" ]
Make lingpy - compatible dictinary out of cldf main data .
def _cldf2wld ( dataset ) : header = [ f for f in dataset . dataset . lexeme_class . fieldnames ( ) if f != 'ID' ] D = { 0 : [ 'lid' ] + [ h . lower ( ) for h in header ] } for idx , row in enumerate ( dataset . objects [ 'FormTable' ] ) : row = deepcopy ( row ) row [ 'Segments' ] = ' ' . join ( row [ 'Segments' ] ) D [ idx + 1 ] = [ row [ 'ID' ] ] + [ row [ h ] for h in header ] return D
9,853
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L22-L30
[ "def", "namedb_get_names_with_value_hash", "(", "cur", ",", "value_hash", ",", "block_number", ")", ":", "unexpired_query", ",", "unexpired_args", "=", "namedb_select_where_unexpired_names", "(", "block_number", ")", "select_query", "=", "\"SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id \"", "+", "\"WHERE value_hash = ? AND revoked = 0 AND \"", "+", "unexpired_query", "+", "\";\"", "args", "=", "(", "value_hash", ",", ")", "+", "unexpired_args", "name_rows", "=", "namedb_query_execute", "(", "cur", ",", "select_query", ",", "args", ")", "names", "=", "[", "]", "for", "name_row", "in", "name_rows", ":", "names", ".", "append", "(", "name_row", "[", "'name'", "]", ")", "if", "len", "(", "names", ")", "==", "0", ":", "return", "None", "else", ":", "return", "names" ]
Read LexStat object from cldf dataset .
def _cldf2lexstat ( dataset , segments = 'segments' , transcription = 'value' , row = 'parameter_id' , col = 'language_id' ) : D = _cldf2wld ( dataset ) return lingpy . LexStat ( D , segments = segments , transcription = transcription , row = row , col = col )
9,854
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L33-L41
[ "def", "remove_binary_support", "(", "self", ",", "api_id", ",", "cors", "=", "False", ")", ":", "response", "=", "self", ".", "apigateway_client", ".", "get_rest_api", "(", "restApiId", "=", "api_id", ")", "if", "\"binaryMediaTypes\"", "in", "response", "and", "\"*/*\"", "in", "response", "[", "\"binaryMediaTypes\"", "]", ":", "self", ".", "apigateway_client", ".", "update_rest_api", "(", "restApiId", "=", "api_id", ",", "patchOperations", "=", "[", "{", "'op'", ":", "'remove'", ",", "'path'", ":", "'/binaryMediaTypes/*~1*'", "}", "]", ")", "if", "cors", ":", "# go through each resource and change the contentHandling type", "response", "=", "self", ".", "apigateway_client", ".", "get_resources", "(", "restApiId", "=", "api_id", ")", "resource_ids", "=", "[", "item", "[", "'id'", "]", "for", "item", "in", "response", "[", "'items'", "]", "if", "'OPTIONS'", "in", "item", ".", "get", "(", "'resourceMethods'", ",", "{", "}", ")", "]", "for", "resource_id", "in", "resource_ids", ":", "self", ".", "apigateway_client", ".", "update_integration", "(", "restApiId", "=", "api_id", ",", "resourceId", "=", "resource_id", ",", "httpMethod", "=", "'OPTIONS'", ",", "patchOperations", "=", "[", "{", "\"op\"", ":", "\"replace\"", ",", "\"path\"", ":", "\"/contentHandling\"", ",", "\"value\"", ":", "\"\"", "}", "]", ")" ]
Read worldist object from cldf dataset .
def _cldf2wordlist ( dataset , row = 'parameter_id' , col = 'language_id' ) : return lingpy . Wordlist ( _cldf2wld ( dataset ) , row = row , col = col )
9,855
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L44-L46
[ "def", "bind", "(", "self", ",", "func", ":", "Callable", "[", "[", "Any", "]", ",", "'Writer'", "]", ")", "->", "'Writer'", ":", "a", ",", "w", "=", "self", ".", "run", "(", ")", "b", ",", "w_", "=", "func", "(", "a", ")", ".", "run", "(", ")", "if", "isinstance", "(", "w_", ",", "Monoid", ")", ":", "w__", "=", "cast", "(", "Monoid", ",", "w", ")", ".", "append", "(", "w_", ")", "else", ":", "w__", "=", "w", "+", "w_", "return", "Writer", "(", "b", ",", "w__", ")" ]
Compute cognates automatically for a given dataset .
def iter_cognates ( dataset , column = 'Segments' , method = 'turchin' , threshold = 0.5 , * * kw ) : if method == 'turchin' : for row in dataset . objects [ 'FormTable' ] : sounds = '' . join ( lingpy . tokens2class ( row [ column ] , 'dolgo' ) ) if sounds . startswith ( 'V' ) : sounds = 'H' + sounds sounds = '-' . join ( [ s for s in sounds if s != 'V' ] [ : 2 ] ) cogid = slug ( row [ 'Parameter_ID' ] ) + '-' + sounds if '0' not in sounds : yield dict ( Form_ID = row [ 'ID' ] , Form = row [ 'Value' ] , Cognateset_ID = cogid , Cognate_Detection_Method = 'CMM' ) if method in [ 'sca' , 'lexstat' ] : lex = _cldf2lexstat ( dataset ) if method == 'lexstat' : lex . get_scorer ( * * kw ) lex . cluster ( method = method , threshold = threshold , ref = 'cogid' ) for k in lex : yield Cognate ( Form_ID = lex [ k , 'lid' ] , Form = lex [ k , 'value' ] , Cognateset_ID = lex [ k , 'cogid' ] , Cognate_Detection_Method = method + '-t{0:.2f}' . format ( threshold ) )
9,856
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L49-L77
[ "def", "_routes_updated", "(", "self", ",", "ri", ")", ":", "new_routes", "=", "ri", ".", "router", "[", "'routes'", "]", "old_routes", "=", "ri", ".", "routes", "adds", ",", "removes", "=", "bc", ".", "common_utils", ".", "diff_list_of_dict", "(", "old_routes", ",", "new_routes", ")", "for", "route", "in", "adds", ":", "LOG", ".", "debug", "(", "\"Added route entry is '%s'\"", ",", "route", ")", "# remove replaced route from deleted route", "for", "del_route", "in", "removes", ":", "if", "route", "[", "'destination'", "]", "==", "del_route", "[", "'destination'", "]", ":", "removes", ".", "remove", "(", "del_route", ")", "driver", "=", "self", ".", "driver_manager", ".", "get_driver", "(", "ri", ".", "id", ")", "driver", ".", "routes_updated", "(", "ri", ",", "'replace'", ",", "route", ")", "for", "route", "in", "removes", ":", "LOG", ".", "debug", "(", "\"Removed route entry is '%s'\"", ",", "route", ")", "driver", "=", "self", ".", "driver_manager", ".", "get_driver", "(", "ri", ".", "id", ")", "driver", ".", "routes_updated", "(", "ri", ",", "'delete'", ",", "route", ")", "ri", ".", "routes", "=", "new_routes" ]
Function computes automatic alignments and writes them to file .
def iter_alignments ( dataset , cognate_sets , column = 'Segments' , method = 'library' ) : if not isinstance ( dataset , lingpy . basic . parser . QLCParser ) : wordlist = _cldf2wordlist ( dataset ) cognates = { r [ 'Form_ID' ] : r for r in cognate_sets } wordlist . add_entries ( 'cogid' , 'lid' , lambda x : cognates [ x ] [ 'Cognateset_ID' ] if x in cognates else 0 ) alm = lingpy . Alignments ( wordlist , ref = 'cogid' , row = 'parameter_id' , col = 'language_id' , segments = column . lower ( ) ) alm . align ( method = method ) for k in alm : if alm [ k , 'lid' ] in cognates : cognate = cognates [ alm [ k , 'lid' ] ] cognate [ 'Alignment' ] = alm [ k , 'alignment' ] cognate [ 'Alignment_Method' ] = method else : alm = lingpy . Alignments ( dataset , ref = 'cogid' ) alm . align ( method = method ) for cognate in cognate_sets : idx = cognate [ 'ID' ] or cognate [ 'Form_ID' ] cognate [ 'Alignment' ] = alm [ int ( idx ) , 'alignment' ] cognate [ 'Alignment_Method' ] = 'SCA-' + method
9,857
https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L80-L110
[ "def", "fill_hazard_class", "(", "layer", ")", ":", "hazard_field", "=", "layer", ".", "keywords", "[", "'inasafe_fields'", "]", "[", "hazard_class_field", "[", "'key'", "]", "]", "expression", "=", "'\"%s\" is NULL OR \"%s\" = \\'\\''", "%", "(", "hazard_field", ",", "hazard_field", ")", "index", "=", "layer", ".", "fields", "(", ")", ".", "lookupField", "(", "hazard_field", ")", "request", "=", "QgsFeatureRequest", "(", ")", ".", "setFilterExpression", "(", "expression", ")", "layer", ".", "startEditing", "(", ")", "for", "feature", "in", "layer", ".", "getFeatures", "(", "request", ")", ":", "layer", ".", "changeAttributeValue", "(", "feature", ".", "id", "(", ")", ",", "index", ",", "not_exposed_class", "[", "'key'", "]", ")", "layer", ".", "commitChanges", "(", ")", "return", "layer" ]
Convert Any file to HDF5 file
def tohdf5 ( input_files , output_file , n_events , conv_times_to_jte , * * kwargs ) : if len ( input_files ) > 1 : cprint ( "Preparing to convert {} files to HDF5." . format ( len ( input_files ) ) ) from km3pipe import Pipeline # noqa from km3pipe . io import GenericPump , HDF5Sink , HDF5MetaData # noqa for input_file in input_files : cprint ( "Converting '{}'..." . format ( input_file ) ) if len ( input_files ) > 1 : output_file = input_file + '.h5' meta_data = kwargs . copy ( ) meta_data [ 'origin' ] = input_file pipe = Pipeline ( ) pipe . attach ( HDF5MetaData , data = meta_data ) pipe . attach ( GenericPump , filenames = input_file , * * kwargs ) pipe . attach ( StatusBar , every = 250 ) if conv_times_to_jte : from km3modules . mc import MCTimeCorrector pipe . attach ( MCTimeCorrector ) pipe . attach ( HDF5Sink , filename = output_file , * * kwargs ) pipe . drain ( n_events ) cprint ( "File '{}' was converted." . format ( input_file ) )
9,858
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/tohdf5.py#L46-L73
[ "def", "acquire", "(", "self", ",", "*", "*", "kwargs", ")", ":", "token", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "attempted", "=", "False", "while", "self", ".", "token", "is", "None", ":", "try", ":", "self", ".", "client", ".", "test_and_set", "(", "self", ".", "key", ",", "token", ",", "\"0\"", ",", "ttl", "=", "self", ".", "ttl", ")", "self", ".", "token", "=", "token", "except", "etcd", ".", "EtcdKeyNotFound", ",", "e", ":", "try", ":", "self", ".", "client", ".", "write", "(", "self", ".", "key", ",", "token", ",", "prevExist", "=", "False", ",", "recursive", "=", "True", ",", "ttl", "=", "self", ".", "ttl", ")", "self", ".", "token", "=", "token", "except", "etcd", ".", "EtcdAlreadyExist", ",", "e", ":", "pass", "# someone created the right before us", "except", "ValueError", ",", "e", ":", "# someone else has the lock", "if", "'timeout'", "in", "kwargs", "or", "self", ".", "timeout", "is", "not", "None", ":", "if", "attempted", "is", "True", ":", "return", "False", "kwargs", ".", "setdefault", "(", "\"timeout\"", ",", "self", ".", "timeout", ")", "try", ":", "self", ".", "client", ".", "read", "(", "self", ".", "key", ",", "wait", "=", "True", ",", "timeout", "=", "kwargs", "[", "\"timeout\"", "]", ")", "attempted", "=", "True", "except", "etcd", ".", "EtcdException", ",", "e", ":", "return", "False", "else", ":", "self", ".", "client", ".", "watch", "(", "self", ".", "key", ")", "if", "self", ".", "renewSecondsPrior", "is", "not", "None", ":", "timer_ttl", "=", "self", ".", "ttl", "-", "self", ".", "renewSecondsPrior", "if", "timer_ttl", ">", "0", ":", "def", "renew", "(", ")", ":", "if", "self", ".", "renew", "(", ")", ":", "Timer", "(", "timer_ttl", ",", "renew", ")", ".", "start", "(", ")", "Timer", "(", "timer_ttl", ",", "renew", ")", ".", "start", "(", ")", "else", ":", "def", "cleanup", "(", ")", ":", "if", "self", ".", "token", "is", "token", ":", "self", ".", "token", "=", "None", "Timer", "(", "self", ".", "ttl", ",", "cleanup", ")", ".", "start", "(", ")", "return", "True" ]
Pulls out all of the stream definitions from the database and populates the channels with stream references
def update_channels ( self ) : logging . info ( "Updating channels" ) with switch_db ( StreamDefinitionModel , 'hyperstream' ) : for s in StreamDefinitionModel . objects ( ) : try : stream_id = StreamId ( name = s . stream_id . name , meta_data = s . stream_id . meta_data ) except AttributeError as e : raise e logging . debug ( "Processing {}" . format ( stream_id ) ) try : # This can fail if a plugin has been defined by a different instantiation of HyperStream on the same # database. channel = self . get_channel ( s . channel_id ) except ChannelNotFoundError as e : logging . warn ( e ) continue # calculated_intervals = TimeIntervals(map(lambda x: (x.start, x.end), s.calculated_intervals)) last_accessed = utcnow ( ) last_updated = s . last_updated if s . last_updated else utcnow ( ) if stream_id in channel . streams : if isinstance ( channel , ( AssetsChannel , AssetsFileChannel ) ) : continue raise StreamAlreadyExistsError ( stream_id ) from . import MemoryChannel , DatabaseChannel if isinstance ( channel , MemoryChannel ) : channel . create_stream ( stream_id ) elif isinstance ( channel , DatabaseChannel ) : if channel == self . assets : stream_type = AssetStream else : stream_type = DatabaseStream channel . streams [ stream_id ] = stream_type ( channel = channel , stream_id = stream_id , calculated_intervals = None , # Not required since it's initialised from mongo_model in __init__ last_accessed = last_accessed , last_updated = last_updated , sandbox = s . sandbox , mongo_model = s ) else : logging . warn ( "Unable to parse stream {}" . format ( stream_id ) )
9,859
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/channel_manager.py#L97-L146
[ "def", "clip_joint_velocities", "(", "self", ",", "velocities", ")", ":", "for", "i", "in", "range", "(", "len", "(", "velocities", ")", ")", ":", "if", "velocities", "[", "i", "]", ">=", "1.0", ":", "velocities", "[", "i", "]", "=", "1.0", "elif", "velocities", "[", "i", "]", "<=", "-", "1.0", ":", "velocities", "[", "i", "]", "=", "-", "1.0", "return", "velocities" ]
Gets the actual class which can then be instantiated with its parameters
def get_tool_class ( self , tool ) : if isinstance ( tool , string_types ) : tool_id = StreamId ( tool ) elif isinstance ( tool , StreamId ) : tool_id = tool else : raise TypeError ( tool ) tool_stream_view = None # Look in the main tool channel first if tool_id in self . tools : tool_stream_view = self . tools [ tool_id ] . window ( ( MIN_DATE , self . tools . up_to_timestamp ) ) else : # Otherwise look through all the channels in the order they were defined for tool_channel in self . tool_channels : if tool_channel == self . tools : continue if tool_id in tool_channel : # noinspection PyTypeChecker tool_stream_view = tool_channel [ tool_id ] . window ( ( MIN_DATE , tool_channel . up_to_timestamp ) ) if tool_stream_view is None : raise ToolNotFoundError ( tool ) # TODO: Use tool versions - here we just take the latest one last = tool_stream_view . last ( ) if last is None : raise ToolNotFoundError ( tool ) return tool_stream_view . last ( ) . value
9,860
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/channel_manager.py#L148-L186
[ "def", "get_game_logs", "(", "self", ")", ":", "logs", "=", "self", ".", "response", ".", "json", "(", ")", "[", "'resultSets'", "]", "[", "0", "]", "[", "'rowSet'", "]", "headers", "=", "self", ".", "response", ".", "json", "(", ")", "[", "'resultSets'", "]", "[", "0", "]", "[", "'headers'", "]", "df", "=", "pd", ".", "DataFrame", "(", "logs", ",", "columns", "=", "headers", ")", "df", ".", "GAME_DATE", "=", "pd", ".", "to_datetime", "(", "df", ".", "GAME_DATE", ")", "return", "df" ]
Determines if this plate is a sub - plate of another plate - i . e . has the same meta data but a restricted set of values
def is_sub_plate ( self , other ) : if all ( v in set ( other . values ) for v in self . values ) : return True if all ( any ( all ( spv in m for spv in v ) for m in map ( set , other . values ) ) for v in self . values ) : return True if other in self . ancestor_plates : # added by MK, but still not sure whether all cases are covered return True return False
9,861
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/plate/plate.py#L130-L144
[ "def", "exportpriv", "(", "self", ",", "format", "=", "\"PEM\"", ",", "password", "=", "None", ",", "cipher", "=", "None", ")", ":", "bio", "=", "Membio", "(", ")", "if", "cipher", "is", "None", ":", "evp_cipher", "=", "None", "else", ":", "evp_cipher", "=", "cipher", ".", "cipher", "if", "format", "==", "\"PEM\"", ":", "ret", "=", "libcrypto", ".", "PEM_write_bio_PrivateKey", "(", "bio", ".", "bio", ",", "self", ".", "key", ",", "evp_cipher", ",", "None", ",", "0", ",", "_password_callback", "(", "password", ")", ",", "None", ")", "if", "ret", "==", "0", ":", "raise", "PKeyError", "(", "\"error serializing private key\"", ")", "return", "str", "(", "bio", ")", "else", ":", "ret", "=", "libcrypto", ".", "i2d_PKCS8PrivateKey_bio", "(", "bio", ".", "bio", ",", "self", ".", "key", ",", "evp_cipher", ",", "None", ",", "0", ",", "_password_callback", "(", "password", ")", ",", "None", ")", "if", "ret", "==", "0", ":", "raise", "PKeyError", "(", "\"error serializing private key\"", ")", "return", "bintype", "(", "bio", ")" ]
Normalize metadata value to improve match accuracy .
def normalize_value ( value ) : value = str ( value ) value = value . casefold ( ) value = re . sub ( r'\/\s*\d+' , '' , value ) # Remove "/<totaltracks>" from track number. value = re . sub ( r'^0+([0-9]+)' , r'\1' , value ) # Remove leading zero(s) from track number. value = re . sub ( r'^(\d+)\.+' , r'\1' , value ) # Remove dots from track number. value = re . sub ( r'[^\w\s]' , '' , value ) # Remove leading non-word characters. value = re . sub ( r'^the\s+' , '' , value ) # Remove leading "the". value = re . sub ( r'^\s+' , '' , value ) # Remove leading spaces. value = re . sub ( r'\s+$' , '' , value ) # Remove trailing spaces. value = re . sub ( r'\s+' , ' ' , value ) # Reduce multiple spaces to a single space. return value
9,862
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/utils.py#L53-L68
[ "def", "write_dict_to_new_file", "(", "file_name", ",", "localization_key_to_comment", ")", ":", "output_file_descriptor", "=", "open_strings_file", "(", "file_name", ",", "\"w\"", ")", "for", "entry_key", ",", "entry_comment", "in", "sorted", "(", "localization_key_to_comment", ".", "iteritems", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", ":", "write_entry_to_file", "(", "output_file_descriptor", ",", "entry_comment", ",", "entry_key", ")", "output_file_descriptor", ".", "write", "(", "u'\\n'", ")", "output_file_descriptor", ".", "close", "(", ")" ]
Create detector from detx file .
def _init_from_file ( self , filename ) : if not filename . endswith ( "detx" ) : raise NotImplementedError ( 'Only the detx format is supported.' ) self . _open_file ( filename ) self . _extract_comments ( ) self . _parse_header ( ) self . _parse_doms ( ) self . _det_file . close ( )
9,863
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L92-L100
[ "def", "_sorted_keys", "(", "self", ",", "keys", ")", ":", "sorted_keys", "=", "[", "]", "if", "(", "'epoch'", "in", "keys", ")", "and", "(", "'epoch'", "not", "in", "self", ".", "keys_ignored_", ")", ":", "sorted_keys", ".", "append", "(", "'epoch'", ")", "for", "key", "in", "sorted", "(", "keys", ")", ":", "if", "not", "(", "(", "key", "in", "(", "'epoch'", ",", "'dur'", ")", ")", "or", "(", "key", "in", "self", ".", "keys_ignored_", ")", "or", "key", ".", "endswith", "(", "'_best'", ")", "or", "key", ".", "startswith", "(", "'event_'", ")", ")", ":", "sorted_keys", ".", "append", "(", "key", ")", "for", "key", "in", "sorted", "(", "keys", ")", ":", "if", "key", ".", "startswith", "(", "'event_'", ")", "and", "(", "key", "not", "in", "self", ".", "keys_ignored_", ")", ":", "sorted_keys", ".", "append", "(", "key", ")", "if", "(", "'dur'", "in", "keys", ")", "and", "(", "'dur'", "not", "in", "self", ".", "keys_ignored_", ")", ":", "sorted_keys", ".", "append", "(", "'dur'", ")", "return", "sorted_keys" ]
The next line of the DETX file optionally ignores comments
def _readline ( self , ignore_comments = True ) : while True : line = self . _det_file . readline ( ) if line == '' : return line # To conform the EOF behaviour of .readline() line = line . strip ( ) if line == '' : continue # white-space-only line if line . startswith ( '#' ) : if not ignore_comments : return line else : return line
9,864
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L106-L119
[ "def", "ready", "(", "self", ")", ":", "if", "self", ".", "_clustering", ":", "return", "(", "all", "(", "[", "c", ".", "connected", "for", "c", "in", "self", ".", "_cluster", ".", "values", "(", ")", "]", ")", "and", "len", "(", "self", ".", "_cluster", ")", ")", "return", "(", "self", ".", "_connection", "and", "self", ".", "_connection", ".", "connected", ")" ]
Retrieve all comments from the file
def _extract_comments ( self ) : self . _det_file . seek ( 0 , 0 ) for line in self . _det_file . readlines ( ) : line = line . strip ( ) if line . startswith ( '#' ) : self . add_comment ( line [ 1 : ] )
9,865
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L121-L127
[ "def", "format_log_context", "(", "msg", ",", "connection", "=", "None", ",", "keyspace", "=", "None", ")", ":", "connection_info", "=", "connection", "or", "'DEFAULT_CONNECTION'", "if", "keyspace", ":", "msg", "=", "'[Connection: {0}, Keyspace: {1}] {2}'", ".", "format", "(", "connection_info", ",", "keyspace", ",", "msg", ")", "else", ":", "msg", "=", "'[Connection: {0}] {1}'", ".", "format", "(", "connection_info", ",", "msg", ")", "return", "msg" ]
Extract information from the header of the detector file
def _parse_header ( self ) : self . print ( "Parsing the DETX header" ) self . _det_file . seek ( 0 , 0 ) first_line = self . _readline ( ) try : self . det_id , self . n_doms = split ( first_line , int ) self . version = 'v1' except ValueError : det_id , self . version = first_line . split ( ) self . det_id = int ( det_id ) validity = self . _readline ( ) . strip ( ) self . valid_from , self . valid_until = split ( validity , float ) raw_utm_info = self . _readline ( ) . strip ( ) . split ( ' ' ) try : self . utm_info = UTMInfo ( * raw_utm_info [ 1 : ] ) except TypeError : log . warning ( "Missing UTM information." ) n_doms = self . _readline ( ) self . n_doms = int ( n_doms )
9,866
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L129-L148
[ "def", "update_sandbox_product", "(", "self", ",", "product_id", ",", "surge_multiplier", "=", "None", ",", "drivers_available", "=", "None", ",", ")", ":", "args", "=", "{", "'surge_multiplier'", ":", "surge_multiplier", ",", "'drivers_available'", ":", "drivers_available", ",", "}", "endpoint", "=", "'v1.2/sandbox/products/{}'", ".", "format", "(", "product_id", ")", "return", "self", ".", "_api_call", "(", "'PUT'", ",", "endpoint", ",", "args", "=", "args", ")" ]
The positions of the DOMs calculated from PMT directions .
def dom_positions ( self ) : if not self . _dom_positions : for dom_id in self . dom_ids : mask = self . pmts . dom_id == dom_id pmt_pos = self . pmts [ mask ] . pos pmt_dir = self . pmts [ mask ] . dir centre = intersect_3d ( pmt_pos , pmt_pos - pmt_dir * 10 ) self . _dom_positions [ dom_id ] = centre return self . _dom_positions
9,867
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L252-L261
[ "def", "end_stream", "(", "self", ",", "stream_id", ")", ":", "with", "(", "yield", "from", "self", ".", "_get_stream", "(", "stream_id", ")", ".", "wlock", ")", ":", "yield", "from", "self", ".", "_resumed", ".", "wait", "(", ")", "self", ".", "_conn", ".", "end_stream", "(", "stream_id", ")", "self", ".", "_flush", "(", ")" ]
A Table containing DOM attributes
def dom_table ( self ) : if self . _dom_table is None : data = defaultdict ( list ) for dom_id , ( du , floor , _ ) in self . doms . items ( ) : data [ 'dom_id' ] . append ( dom_id ) data [ 'du' ] . append ( du ) data [ 'floor' ] . append ( floor ) dom_position = self . dom_positions [ dom_id ] data [ 'pos_x' ] . append ( dom_position [ 0 ] ) data [ 'pos_y' ] . append ( dom_position [ 1 ] ) data [ 'pos_z' ] . append ( dom_position [ 2 ] ) self . _dom_table = Table ( data , name = 'DOMs' , h5loc = '/dom_table' ) return self . _dom_table
9,868
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L264-L277
[ "def", "_cmd_create", "(", "self", ")", ":", "assert", "self", ".", "_message", ",", "\"need to supply a message for the \\\"create\\\" command\"", "if", "not", "self", ".", "_revisions", ":", "self", ".", "_revisions", ".", "append", "(", "\"1\"", ")", "# get the migration folder", "rev_folder", "=", "self", ".", "_revisions", "[", "-", "1", "]", "full_rev_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_migration_path", ",", "rev_folder", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "full_rev_path", ")", ":", "os", ".", "mkdir", "(", "full_rev_path", ")", "else", ":", "count", "=", "len", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "full_rev_path", ",", "\"*\"", ")", ")", ")", "# create next revision folder if needed", "if", "count", "and", "self", ".", "_rev", "and", "int", "(", "self", ".", "_rev", ")", "==", "0", ":", "rev_folder", "=", "str", "(", "int", "(", "rev_folder", ")", "+", "1", ")", "full_rev_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_migration_path", ",", "rev_folder", ")", "os", ".", "mkdir", "(", "full_rev_path", ")", "self", ".", "_revisions", ".", "append", "(", "rev_folder", ")", "# format file name", "filename", "=", "'_'", ".", "join", "(", "[", "s", ".", "lower", "(", ")", "for", "s", "in", "self", ".", "_message", ".", "split", "(", "' '", ")", "if", "s", ".", "strip", "(", ")", "]", ")", "for", "p", "in", "string", ".", "punctuation", ":", "if", "p", "in", "filename", ":", "filename", "=", "filename", ".", "replace", "(", "p", ",", "'_'", ")", "filename", "=", "\"%s_%s\"", "%", "(", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "\"%Y%m%d%H%M%S\"", ")", ",", "filename", ".", "replace", "(", "'__'", ",", "'_'", ")", ")", "# create the migration files", "self", ".", "_log", "(", "0", ",", "\"creating files: \"", ")", "for", "s", "in", "(", "'up'", ",", "'down'", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "full_rev_path", ",", "\"%s.%s.sql\"", "%", "(", "filename", ",", "s", ")", ")", "with", "open", "(", "file_path", ",", "'a+'", ")", "as", "w", ":", "w", ".", "write", "(", "'\\n'", ".", "join", "(", "[", "'-- *** %s ***'", "%", "s", ".", "upper", "(", ")", ",", "'-- file: %s'", "%", "os", ".", "path", ".", "join", "(", "rev_folder", ",", "filename", ")", ",", "'-- comment: %s'", "%", "self", ".", "_message", "]", ")", ")", "self", ".", "_log", "(", "0", ",", "file_path", ")" ]
Center of mass calculated from the mean of the PMT positions
def com ( self ) : if self . _com is None : self . _com = np . mean ( self . pmts . pos , axis = 0 ) return self . _com
9,869
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L280-L284
[ "def", "initWithComplexQuery", "(", "query", ")", ":", "q", "=", "QueryEvents", "(", ")", "# provided an instance of ComplexEventQuery", "if", "isinstance", "(", "query", ",", "ComplexEventQuery", ")", ":", "q", ".", "_setVal", "(", "\"query\"", ",", "json", ".", "dumps", "(", "query", ".", "getQuery", "(", ")", ")", ")", "# provided query as a string containing the json object", "elif", "isinstance", "(", "query", ",", "six", ".", "string_types", ")", ":", "foo", "=", "json", ".", "loads", "(", "query", ")", "q", ".", "_setVal", "(", "\"query\"", ",", "query", ")", "# provided query as a python dict", "elif", "isinstance", "(", "query", ",", "dict", ")", ":", "q", ".", "_setVal", "(", "\"query\"", ",", "json", ".", "dumps", "(", "query", ")", ")", "# unrecognized value provided", "else", ":", "assert", "False", ",", "\"The instance of query parameter was not a ComplexEventQuery, a string or a python dict\"", "return", "q" ]
XY positions of the DUs given by the DOMs on floor 1 .
def xy_positions ( self ) : if self . _xy_positions is None or len ( self . _xy_positions ) == 0 : xy_pos = [ ] for dom_id , pos in self . dom_positions . items ( ) : if self . domid2floor ( dom_id ) == 1 : xy_pos . append ( np . array ( [ pos [ 0 ] , pos [ 1 ] ] ) ) self . _xy_positions = np . array ( xy_pos ) return self . _xy_positions
9,870
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L287-L295
[ "def", "seed", "(", "vault_client", ",", "opt", ")", ":", "if", "opt", ".", "thaw_from", ":", "opt", ".", "secrets", "=", "tempfile", ".", "mkdtemp", "(", "'aomi-thaw'", ")", "auto_thaw", "(", "vault_client", ",", "opt", ")", "Context", ".", "load", "(", "get_secretfile", "(", "opt", ")", ",", "opt", ")", ".", "fetch", "(", "vault_client", ")", ".", "sync", "(", "vault_client", ",", "opt", ")", "if", "opt", ".", "thaw_from", ":", "rmtree", "(", "opt", ".", "secrets", ")" ]
Translate the detector by a given vector
def translate_detector ( self , vector ) : vector = np . array ( vector , dtype = float ) self . pmts . pos_x += vector [ 0 ] self . pmts . pos_y += vector [ 1 ] self . pmts . pos_z += vector [ 2 ] self . reset_caches ( )
9,871
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L297-L303
[ "def", "get_random", "(", "self", ")", ":", "import", "random", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "session", "=", "self", ".", "Session", "(", ")", "count", "=", "self", ".", "count", "(", ")", "if", "count", "<", "1", ":", "raise", "self", ".", "EmptyDatabaseException", "(", ")", "random_index", "=", "random", ".", "randrange", "(", "0", ",", "count", ")", "random_statement", "=", "session", ".", "query", "(", "Statement", ")", "[", "random_index", "]", "statement", "=", "self", ".", "model_to_object", "(", "random_statement", ")", "session", ".", "close", "(", ")", "return", "statement" ]
A list of PMT directions sorted by PMT channel on DU - 1 floor - 1
def pmt_angles ( self ) : if self . _pmt_angles == [ ] : mask = ( self . pmts . du == 1 ) & ( self . pmts . floor == 1 ) self . _pmt_angles = self . pmts . dir [ mask ] return self . _pmt_angles
9,872
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L351-L356
[ "def", "get_stream_url", "(", "self", ",", "session_id", ",", "stream_id", "=", "None", ")", ":", "url", "=", "self", ".", "api_url", "+", "'/v2/project/'", "+", "self", ".", "api_key", "+", "'/session/'", "+", "session_id", "+", "'/stream'", "if", "stream_id", ":", "url", "=", "url", "+", "'/'", "+", "stream_id", "return", "url" ]
The ascii representation of the detector
def ascii ( self ) : comments = '' if self . version == 'v3' : for comment in self . comments : if not comment . startswith ( ' ' ) : comment = ' ' + comment comments += "#" + comment + "\n" if self . version == 'v1' : header = "{det.det_id} {det.n_doms}" . format ( det = self ) else : header = "{det.det_id} {det.version}" . format ( det = self ) header += "\n{0} {1}" . format ( self . valid_from , self . valid_until ) header += "\n" + str ( self . utm_info ) + "\n" header += str ( self . n_doms ) doms = "" for dom_id , ( line , floor , n_pmts ) in self . doms . items ( ) : doms += "{0} {1} {2} {3}\n" . format ( dom_id , line , floor , n_pmts ) for channel_id in range ( n_pmts ) : pmt_idx = self . _pmt_index_by_omkey [ ( line , floor , channel_id ) ] pmt = self . pmts [ pmt_idx ] doms += " {0} {1} {2} {3} {4} {5} {6} {7}" . format ( pmt . pmt_id , pmt . pos_x , pmt . pos_y , pmt . pos_z , pmt . dir_x , pmt . dir_y , pmt . dir_z , pmt . t0 ) if self . version == 'v3' : doms += " {0}" . format ( pmt . status ) doms += "\n" return comments + header + "\n" + doms
9,873
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L359-L389
[ "def", "return_port", "(", "port", ")", ":", "if", "port", "in", "_random_ports", ":", "_random_ports", ".", "remove", "(", "port", ")", "elif", "port", "in", "_owned_ports", ":", "_owned_ports", ".", "remove", "(", "port", ")", "_free_ports", ".", "add", "(", "port", ")", "elif", "port", "in", "_free_ports", ":", "logging", ".", "info", "(", "\"Returning a port that was already returned: %s\"", ",", "port", ")", "else", ":", "logging", ".", "info", "(", "\"Returning a port that wasn't given by portpicker: %s\"", ",", "port", ")" ]
Save detx file .
def write ( self , filename ) : with open ( filename , 'w' ) as f : f . write ( self . ascii ) self . print ( "Detector file saved as '{0}'" . format ( filename ) )
9,874
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L391-L395
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
Get PMT with global pmt_id
def pmt_with_id ( self , pmt_id ) : try : return self . pmts [ self . _pmt_index_by_pmt_id [ pmt_id ] ] except KeyError : raise KeyError ( "No PMT found for ID: {0}" . format ( pmt_id ) )
9,875
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L397-L402
[ "def", "link_files", "(", "files", ":", "set", ",", "workspace_src_dir", ":", "str", ",", "common_parent", ":", "str", ",", "conf", ")", ":", "norm_dir", "=", "normpath", "(", "workspace_src_dir", ")", "base_dir", "=", "''", "if", "common_parent", ":", "common_parent", "=", "normpath", "(", "common_parent", ")", "base_dir", "=", "commonpath", "(", "list", "(", "files", ")", "+", "[", "common_parent", "]", ")", "if", "base_dir", "!=", "common_parent", ":", "raise", "ValueError", "(", "'{} is not the common parent of all target '", "'sources and data'", ".", "format", "(", "common_parent", ")", ")", "logger", ".", "debug", "(", "'Rebasing files in image relative to common parent dir {}'", ",", "base_dir", ")", "num_linked", "=", "0", "for", "src", "in", "files", ":", "abs_src", "=", "join", "(", "conf", ".", "project_root", ",", "src", ")", "abs_dest", "=", "join", "(", "conf", ".", "project_root", ",", "workspace_src_dir", ",", "relpath", "(", "src", ",", "base_dir", ")", ")", "link_node", "(", "abs_src", ",", "abs_dest", ",", "conf", ".", "builders_workspace_dir", "in", "src", ")", "num_linked", "+=", "1", "return", "num_linked" ]
Return PMT with DOM ID and DAQ channel ID
def get_pmt ( self , dom_id , channel_id ) : du , floor , _ = self . doms [ dom_id ] pmt = self . pmts [ self . _pmt_index_by_omkey [ ( du , floor , channel_id ) ] ] return pmt
9,876
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L404-L408
[ "def", "expand_file_arguments", "(", ")", ":", "new_args", "=", "[", "]", "expanded", "=", "False", "for", "arg", "in", "sys", ".", "argv", ":", "if", "arg", ".", "startswith", "(", "\"@\"", ")", ":", "expanded", "=", "True", "with", "open", "(", "arg", "[", "1", ":", "]", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "new_args", "+=", "shlex", ".", "split", "(", "line", ")", "else", ":", "new_args", ".", "append", "(", "arg", ")", "if", "expanded", ":", "print", "(", "\"esptool.py %s\"", "%", "(", "\" \"", ".", "join", "(", "new_args", "[", "1", ":", "]", ")", ")", ")", "sys", ".", "argv", "=", "new_args" ]
Function that converts MC times to JTE times .
def convert_mc_times_to_jte_times ( times_mc , evt_timestamp_in_ns , evt_mc_time ) : # needs to be cast to normal ndarray (not recarray), or else we # would get invalid type promotion times_mc = np . array ( times_mc ) . astype ( float ) times_jte = times_mc - evt_timestamp_in_ns + evt_mc_time return times_jte
9,877
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/mc.py#L86-L108
[ "def", "_put_bucket_policy", "(", "self", ")", ":", "if", "self", ".", "s3props", "[", "'bucket_policy'", "]", ":", "policy_str", "=", "json", ".", "dumps", "(", "self", ".", "s3props", "[", "'bucket_policy'", "]", ")", "_response", "=", "self", ".", "s3client", ".", "put_bucket_policy", "(", "Bucket", "=", "self", ".", "bucket", ",", "Policy", "=", "policy_str", ")", "else", ":", "_response", "=", "self", ".", "s3client", ".", "delete_bucket_policy", "(", "Bucket", "=", "self", ".", "bucket", ")", "LOG", ".", "debug", "(", "'Response adding bucket policy: %s'", ",", "_response", ")", "LOG", ".", "info", "(", "'S3 Bucket Policy Attached'", ")" ]
Returns True of iRODS path exists otherwise False
def iexists ( irods_path ) : try : subprocess . check_output ( 'ils {}' . format ( irods_path ) , shell = True , stderr = subprocess . PIPE , ) return True except subprocess . CalledProcessError : return False
9,878
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L42-L52
[ "def", "_print_download_progress_msg", "(", "self", ",", "msg", ",", "flush", "=", "False", ")", ":", "if", "self", ".", "_interactive_mode", "(", ")", ":", "# Print progress message to console overwriting previous progress", "# message.", "self", ".", "_max_prog_str", "=", "max", "(", "self", ".", "_max_prog_str", ",", "len", "(", "msg", ")", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r%-{}s\"", ".", "format", "(", "self", ".", "_max_prog_str", ")", "%", "msg", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "flush", ":", "print", "(", "\"\\n\"", ")", "else", ":", "# Interactive progress tracking is disabled. Print progress to the", "# standard TF log.", "logging", ".", "info", "(", "msg", ")" ]
Return a random URL - safe text string in Base64 encoding .
def token_urlsafe ( nbytes = 32 ) : tok = os . urandom ( nbytes ) return base64 . urlsafe_b64encode ( tok ) . rstrip ( b'=' ) . decode ( 'ascii' )
9,879
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L55-L68
[ "def", "parse", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "ev", "=", "'errex'", "in", "kw", "and", "kw", ".", "pop", "(", "'errex'", ")", "if", "ev", "and", "not", "isinstance", "(", "ev", ",", "int", ")", ":", "raise", "TypeError", "(", "\"error exit value should be an integer\"", ")", "try", ":", "self", ".", "cmdline", "=", "self", ".", "parser", ".", "parse_args", "(", "*", "args", ",", "*", "*", "kw", ")", "except", "OptParseError", ":", "if", "ev", ":", "sys", ".", "exit", "(", "ev", ")", "raise", "return", "self", ".", "fuse_args" ]
Wrap text in a pretty line with maximum length .
def prettyln ( text , fill = '-' , align = '^' , prefix = '[ ' , suffix = ' ]' , length = 69 ) : text = '{prefix}{0}{suffix}' . format ( text , prefix = prefix , suffix = suffix ) print ( "{0:{fill}{align}{length}}" . format ( text , fill = fill , align = align , length = length ) )
9,880
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L77-L84
[ "async", "def", "run", "(", "self", ",", "config", ",", "*", ",", "name", "=", "None", ")", ":", "try", ":", "container", "=", "await", "self", ".", "create", "(", "config", ",", "name", "=", "name", ")", "except", "DockerError", "as", "err", ":", "# image not find, try pull it", "if", "err", ".", "status", "==", "404", "and", "\"Image\"", "in", "config", ":", "await", "self", ".", "docker", ".", "pull", "(", "config", "[", "\"Image\"", "]", ")", "container", "=", "await", "self", ".", "create", "(", "config", ",", "name", "=", "name", ")", "else", ":", "raise", "err", "try", ":", "await", "container", ".", "start", "(", ")", "except", "DockerError", "as", "err", ":", "raise", "DockerContainerError", "(", "err", ".", "status", ",", "{", "\"message\"", ":", "err", ".", "message", "}", ",", "container", "[", "\"id\"", "]", ")", "return", "container" ]
Unpack the nfrist items from the list and return the rest .
def unpack_nfirst ( seq , nfirst ) : iterator = iter ( seq ) for _ in range ( nfirst ) : yield next ( iterator , None ) yield tuple ( iterator )
9,881
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L98-L111
[ "def", "sqlite3_find_tool", "(", ")", ":", "# find sqlite3", "path", "=", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "None", ")", "if", "path", "is", "None", ":", "path", "=", "\"/usr/local/bin:/usr/bin:/bin\"", "sqlite3_path", "=", "None", "dirs", "=", "path", ".", "split", "(", "\":\"", ")", "for", "pathdir", "in", "dirs", ":", "if", "len", "(", "pathdir", ")", "==", "0", ":", "continue", "sqlite3_path", "=", "os", ".", "path", ".", "join", "(", "pathdir", ",", "'sqlite3'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "sqlite3_path", ")", ":", "continue", "if", "not", "os", ".", "path", ".", "isfile", "(", "sqlite3_path", ")", ":", "continue", "if", "not", "os", ".", "access", "(", "sqlite3_path", ",", "os", ".", "X_OK", ")", ":", "continue", "break", "if", "sqlite3_path", "is", "None", ":", "log", ".", "error", "(", "\"Could not find sqlite3 binary\"", ")", "return", "None", "return", "sqlite3_path" ]
Split the string and execute the callback function on each part .
def split ( string , callback = None , sep = None ) : if callback is not None : return [ callback ( i ) for i in string . split ( sep ) ] else : return string . split ( sep )
9,882
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L114-L126
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Create a namedtuple with default values
def namedtuple_with_defaults ( typename , field_names , default_values = [ ] ) : the_tuple = collections . namedtuple ( typename , field_names ) the_tuple . __new__ . __defaults__ = ( None , ) * len ( the_tuple . _fields ) if isinstance ( default_values , collections . Mapping ) : prototype = the_tuple ( * * default_values ) else : prototype = the_tuple ( * default_values ) the_tuple . __new__ . __defaults__ = tuple ( prototype ) return the_tuple
9,883
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L129-L151
[ "def", "_index_audio_cmu", "(", "self", ",", "basename", "=", "None", ",", "replace_already_indexed", "=", "False", ")", ":", "self", ".", "_prepare_audio", "(", "basename", "=", "basename", ",", "replace_already_indexed", "=", "replace_already_indexed", ")", "for", "staging_audio_basename", "in", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", ":", "original_audio_name", "=", "''", ".", "join", "(", "staging_audio_basename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "[", ":", "-", "3", "]", "pocketsphinx_command", "=", "''", ".", "join", "(", "[", "\"pocketsphinx_continuous\"", ",", "\"-infile\"", ",", "str", "(", "\"{}/staging/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "staging_audio_basename", ")", ")", ",", "\"-time\"", ",", "\"yes\"", ",", "\"-logfn\"", ",", "\"/dev/null\"", "]", ")", "try", ":", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Now indexing {}\"", ".", "format", "(", "staging_audio_basename", ")", ")", "output", "=", "subprocess", ".", "check_output", "(", "[", "\"pocketsphinx_continuous\"", ",", "\"-infile\"", ",", "str", "(", "\"{}/staging/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "staging_audio_basename", ")", ")", ",", "\"-time\"", ",", "\"yes\"", ",", "\"-logfn\"", ",", "\"/dev/null\"", "]", ",", "universal_newlines", "=", "True", ")", ".", "split", "(", "'\\n'", ")", "str_timestamps_with_sil_conf", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "split", "(", "\" \"", ")", ",", "filter", "(", "None", ",", "output", "[", "1", ":", "]", ")", ")", ")", "# Timestamps are putted in a list of a single element. To match", "# Watson's output.", "self", ".", "__timestamps_unregulated", "[", "original_audio_name", "+", "\".wav\"", "]", "=", "[", "(", "self", ".", "_timestamp_extractor_cmu", "(", "staging_audio_basename", ",", "str_timestamps_with_sil_conf", ")", ")", "]", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Done indexing {}\"", ".", "format", "(", "staging_audio_basename", ")", ")", "except", "OSError", "as", "e", ":", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "e", ",", "\"The command was: {}\"", ".", "format", "(", "pocketsphinx_command", ")", ")", "self", ".", "__errors", "[", "(", "time", "(", ")", ",", "staging_audio_basename", ")", "]", "=", "e", "self", ".", "_timestamp_regulator", "(", ")", "if", "self", ".", "get_verbosity", "(", ")", ":", "print", "(", "\"Finished indexing procedure\"", ")" ]
Remain the file pointer position after calling the decorated function
def remain_file_pointer ( function ) : def wrapper ( * args , * * kwargs ) : """Wrap the function and remain its parameters and return values""" file_obj = args [ - 1 ] old_position = file_obj . tell ( ) return_value = function ( * args , * * kwargs ) file_obj . seek ( old_position , 0 ) return return_value return wrapper
9,884
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L154-L169
[ "def", "variable_summaries", "(", "vars_", ",", "groups", "=", "None", ",", "scope", "=", "'weights'", ")", ":", "groups", "=", "groups", "or", "{", "r'all'", ":", "r'.*'", "}", "grouped", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "var", "in", "vars_", ":", "for", "name", ",", "pattern", "in", "groups", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "pattern", ",", "var", ".", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "pattern", ",", "name", ",", "var", ".", "name", ")", "grouped", "[", "name", "]", ".", "append", "(", "var", ")", "for", "name", "in", "groups", ":", "if", "name", "not", "in", "grouped", ":", "tf", ".", "logging", ".", "warn", "(", "\"No variables matching '{}' group.\"", ".", "format", "(", "name", ")", ")", "summaries", "=", "[", "]", "# pylint: disable=redefined-argument-from-local", "for", "name", ",", "vars_", "in", "grouped", ".", "items", "(", ")", ":", "vars_", "=", "[", "tf", ".", "reshape", "(", "var", ",", "[", "-", "1", "]", ")", "for", "var", "in", "vars_", "]", "vars_", "=", "tf", ".", "concat", "(", "vars_", ",", "0", ")", "summaries", ".", "append", "(", "tf", ".", "summary", ".", "histogram", "(", "scope", "+", "'/'", "+", "name", ",", "vars_", ")", ")", "return", "tf", ".", "summary", ".", "merge", "(", "summaries", ")" ]
Convert CamelCase to lower_and_underscore .
def decamelise ( text ) : s = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , text ) return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s ) . lower ( )
9,885
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L180-L183
[ "def", "skip_all", "(", "self", ")", ":", "storage", ",", "streaming", "=", "self", ".", "engine", ".", "count", "(", ")", "if", "self", ".", "selector", ".", "output", ":", "self", ".", "offset", "=", "streaming", "else", ":", "self", ".", "offset", "=", "storage", "self", ".", "_count", "=", "0" ]
Convert lower_underscore to CamelCase .
def camelise ( text , capital_first = True ) : def camelcase ( ) : if not capital_first : yield str . lower while True : yield str . capitalize if istype ( text , 'unicode' ) : text = text . encode ( 'utf8' ) c = camelcase ( ) return "" . join ( next ( c ) ( x ) if x else '_' for x in text . split ( "_" ) )
9,886
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L186-L198
[ "def", "ephemerals_info", "(", "self", ",", "hosts", ")", ":", "info_by_path", ",", "info_by_id", "=", "{", "}", ",", "{", "}", "for", "server_endpoint", ",", "dump", "in", "self", ".", "dump_by_server", "(", "hosts", ")", ".", "items", "(", ")", ":", "server_ip", ",", "server_port", "=", "server_endpoint", "sid", "=", "None", "for", "line", "in", "dump", ".", "split", "(", "\"\\n\"", ")", ":", "mat", "=", "self", ".", "SESSION_REGEX", ".", "match", "(", "line", ")", "if", "mat", ":", "sid", "=", "mat", ".", "group", "(", "1", ")", "continue", "mat", "=", "self", ".", "PATH_REGEX", ".", "match", "(", "line", ")", "if", "mat", ":", "info", "=", "info_by_id", ".", "get", "(", "sid", ",", "None", ")", "if", "info", "is", "None", ":", "info", "=", "info_by_id", "[", "sid", "]", "=", "ClientInfo", "(", "sid", ")", "info_by_path", "[", "mat", ".", "group", "(", "1", ")", "]", "=", "info", "continue", "mat", "=", "self", ".", "IP_PORT_REGEX", ".", "match", "(", "line", ")", "if", "mat", ":", "ip", ",", "port", ",", "sid", "=", "mat", ".", "groups", "(", ")", "if", "sid", "not", "in", "info_by_id", ":", "continue", "info_by_id", "[", "sid", "]", "(", "ip", ",", "int", "(", "port", ")", ",", "server_ip", ",", "server_port", ")", "return", "info_by_path" ]
Colorize text while stripping nested ANSI color sequences .
def colored ( text , color = None , on_color = None , attrs = None , ansi_code = None ) : if os . getenv ( 'ANSI_COLORS_DISABLED' ) is None : if ansi_code is not None : return "\033[38;5;{}m{}\033[0m" . format ( ansi_code , text ) fmt_str = '\033[%dm%s' if color is not None : text = re . sub ( COLORS_RE + '(.*?)' + RESET_RE , r'\1' , text ) text = fmt_str % ( COLORS [ color ] , text ) if on_color is not None : text = re . sub ( HIGHLIGHTS_RE + '(.*?)' + RESET_RE , r'\1' , text ) text = fmt_str % ( HIGHLIGHTS [ on_color ] , text ) if attrs is not None : text = re . sub ( ATTRIBUTES_RE + '(.*?)' + RESET_RE , r'\1' , text ) for attr in attrs : text = fmt_str % ( ATTRIBUTES [ attr ] , text ) return text + RESET else : return text
9,887
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L247-L278
[ "def", "upload_all_books", "(", "book_id_start", ",", "book_id_end", ",", "rdf_library", "=", "None", ")", ":", "# TODO refactor appname into variable", "logger", ".", "info", "(", "\"starting a gitberg mass upload: {0} -> {1}\"", ".", "format", "(", "book_id_start", ",", "book_id_end", ")", ")", "for", "book_id", "in", "range", "(", "int", "(", "book_id_start", ")", ",", "int", "(", "book_id_end", ")", "+", "1", ")", ":", "cache", "=", "{", "}", "errors", "=", "0", "try", ":", "if", "int", "(", "book_id", ")", "in", "missing_pgid", ":", "print", "(", "u'missing\\t{}'", ".", "format", "(", "book_id", ")", ")", "continue", "upload_book", "(", "book_id", ",", "rdf_library", "=", "rdf_library", ",", "cache", "=", "cache", ")", "except", "Exception", "as", "e", ":", "print", "(", "u'error\\t{}'", ".", "format", "(", "book_id", ")", ")", "logger", ".", "error", "(", "u\"Error processing: {}\\r{}\"", ".", "format", "(", "book_id", ",", "e", ")", ")", "errors", "+=", "1", "if", "errors", ">", "10", ":", "print", "(", "'error limit reached!'", ")", "break" ]
Pad a matrix with zeros on all sides .
def zero_pad ( m , n = 1 ) : return np . pad ( m , ( n , n ) , mode = 'constant' , constant_values = [ 0 ] )
9,888
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L332-L334
[ "def", "_updateVariantAnnotationSets", "(", "self", ",", "variantFile", ",", "dataUrl", ")", ":", "# TODO check the consistency of this between VCF files.", "if", "not", "self", ".", "isAnnotated", "(", ")", ":", "annotationType", "=", "None", "for", "record", "in", "variantFile", ".", "header", ".", "records", ":", "if", "record", ".", "type", "==", "\"GENERIC\"", ":", "if", "record", ".", "key", "==", "\"SnpEffVersion\"", ":", "annotationType", "=", "ANNOTATIONS_SNPEFF", "elif", "record", ".", "key", "==", "\"VEP\"", ":", "version", "=", "record", ".", "value", ".", "split", "(", ")", "[", "0", "]", "# TODO we need _much_ more sophisticated processing", "# of VEP versions here. When do they become", "# incompatible?", "if", "version", "==", "\"v82\"", ":", "annotationType", "=", "ANNOTATIONS_VEP_V82", "elif", "version", "==", "\"v77\"", ":", "annotationType", "=", "ANNOTATIONS_VEP_V77", "else", ":", "# TODO raise a proper typed exception there with", "# the file name as an argument.", "raise", "ValueError", "(", "\"Unsupported VEP version {} in '{}'\"", ".", "format", "(", "version", ",", "dataUrl", ")", ")", "if", "annotationType", "is", "None", ":", "infoKeys", "=", "variantFile", ".", "header", ".", "info", ".", "keys", "(", ")", "if", "'CSQ'", "in", "infoKeys", "or", "'ANN'", "in", "infoKeys", ":", "# TODO likewise, we want a properly typed exception that", "# we can throw back to the repo manager UI and display", "# as an import error.", "raise", "ValueError", "(", "\"Unsupported annotations in '{}'\"", ".", "format", "(", "dataUrl", ")", ")", "if", "annotationType", "is", "not", "None", ":", "vas", "=", "HtslibVariantAnnotationSet", "(", "self", ",", "self", ".", "getLocalId", "(", ")", ")", "vas", ".", "populateFromFile", "(", "variantFile", ",", "annotationType", ")", "self", ".", "addVariantAnnotationSet", "(", "vas", ")" ]
Checks if the terminal supports color .
def supports_color ( ) : if isnotebook ( ) : return True supported_platform = sys . platform != 'win32' or 'ANSICON' in os . environ is_a_tty = hasattr ( sys . stdout , 'isatty' ) and sys . stdout . isatty ( ) if not supported_platform or not is_a_tty : return False return True
9,889
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L356-L366
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Retrieves the Jpp revision number
def get_jpp_revision ( via_command = 'JPrint' ) : try : output = subprocess . check_output ( [ via_command , '-v' ] , stderr = subprocess . STDOUT ) except subprocess . CalledProcessError as e : if e . returncode == 1 : output = e . output else : return None except OSError : return None revision = output . decode ( ) . split ( '\n' ) [ 0 ] . split ( ) [ 1 ] . strip ( ) return revision
9,890
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L369-L382
[ "def", "spawn", "(", "self", ",", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# type: (Callable[..., Any], *Any, **Any) -> Spawned", "assert", "self", ".", "state", "!=", "STOPPED", ",", "\"Can't spawn when process stopped\"", "spawned", "=", "Spawned", "(", "function", ",", "args", ",", "kwargs", ")", "self", ".", "_spawned", ".", "append", "(", "spawned", ")", "self", ".", "_spawn_count", "+=", "1", "# Filter out things that are ready to avoid memory leaks", "if", "self", ".", "_spawn_count", ">", "SPAWN_CLEAR_COUNT", ":", "self", ".", "_clear_spawn_list", "(", ")", "return", "spawned" ]
LRU cache decorator with timeout .
def timed_cache ( * * timed_cache_kwargs ) : def _wrapper ( f ) : maxsize = timed_cache_kwargs . pop ( 'maxsize' , 128 ) typed = timed_cache_kwargs . pop ( 'typed' , False ) update_delta = timedelta ( * * timed_cache_kwargs ) # nonlocal workaround to support Python 2 # https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/ d = { 'next_update' : datetime . utcnow ( ) - update_delta } try : f = functools . lru_cache ( maxsize = maxsize , typed = typed ) ( f ) except AttributeError : print ( "LRU caching is not available in Pyton 2.7, " "this will have no effect!" ) pass @ functools . wraps ( f ) def _wrapped ( * args , * * kwargs ) : now = datetime . utcnow ( ) if now >= d [ 'next_update' ] : try : f . cache_clear ( ) except AttributeError : pass d [ 'next_update' ] = now + update_delta return f ( * args , * * kwargs ) return _wrapped return _wrapper
9,891
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L385-L430
[ "def", "convert_old_publication_info_to_new", "(", "publication_infos", ")", ":", "result", "=", "[", "]", "hidden_publication_infos", "=", "[", "]", "for", "publication_info", "in", "publication_infos", ":", "_publication_info", "=", "copy", ".", "deepcopy", "(", "publication_info", ")", "journal_title", "=", "_publication_info", ".", "get", "(", "'journal_title'", ")", "try", ":", "journal_title", "=", "_JOURNALS_RENAMED_OLD_TO_NEW", "[", "journal_title", "]", "_publication_info", "[", "'journal_title'", "]", "=", "journal_title", "result", ".", "append", "(", "_publication_info", ")", "continue", "except", "KeyError", ":", "pass", "journal_volume", "=", "_publication_info", ".", "get", "(", "'journal_volume'", ")", "if", "journal_title", "in", "_JOURNALS_WITH_YEAR_ADDED_TO_VOLUME", "and", "journal_volume", "and", "len", "(", "journal_volume", ")", "==", "4", ":", "try", ":", "was_last_century", "=", "int", "(", "journal_volume", "[", ":", "2", "]", ")", ">", "50", "except", "ValueError", ":", "pass", "else", ":", "_publication_info", "[", "'year'", "]", "=", "int", "(", "'19'", "+", "journal_volume", "[", ":", "2", "]", "if", "was_last_century", "else", "'20'", "+", "journal_volume", "[", ":", "2", "]", ")", "_publication_info", "[", "'journal_volume'", "]", "=", "journal_volume", "[", "2", ":", "]", "result", ".", "append", "(", "_publication_info", ")", "continue", "if", "journal_title", "and", "journal_volume", "and", "journal_title", ".", "lower", "(", ")", "not", "in", "JOURNALS_IGNORED_IN_OLD_TO_NEW", ":", "volume_starts_with_a_letter", "=", "_RE_VOLUME_STARTS_WITH_A_LETTER", ".", "match", "(", "journal_volume", ")", "volume_ends_with_a_letter", "=", "_RE_VOLUME_ENDS_WITH_A_LETTER", ".", "match", "(", "journal_volume", ")", "match", "=", "volume_starts_with_a_letter", "or", "volume_ends_with_a_letter", "if", "match", ":", "_publication_info", ".", "pop", "(", "'journal_record'", ",", "None", ")", "if", "journal_title", "in", "_JOURNALS_RENAMED_OLD_TO_NEW", ".", "values", "(", ")", ":", "_publication_info", "[", "'journal_title'", "]", "=", "journal_title", "else", ":", "_publication_info", "[", "'journal_title'", "]", "=", "''", ".", "join", "(", "[", "journal_title", ",", "''", "if", "journal_title", ".", "endswith", "(", "'.'", ")", "else", "' '", ",", "match", ".", "group", "(", "'letter'", ")", ",", "]", ")", "_publication_info", "[", "'journal_volume'", "]", "=", "match", ".", "group", "(", "'volume'", ")", "hidden", "=", "_publication_info", ".", "pop", "(", "'hidden'", ",", "None", ")", "if", "hidden", ":", "hidden_publication_infos", ".", "append", "(", "_publication_info", ")", "else", ":", "result", ".", "append", "(", "_publication_info", ")", "for", "publication_info", "in", "hidden_publication_infos", ":", "if", "publication_info", "not", "in", "result", ":", "publication_info", "[", "'hidden'", "]", "=", "True", "result", ".", "append", "(", "publication_info", ")", "return", "result" ]
Finds the given point in the profile or adds it in sorted z order .
def _get_point ( self , profile , point ) : cur_points_z = [ p . location . z for p in profile . elements ] try : cur_idx = cur_points_z . index ( point . z ) return profile . elements [ cur_idx ] except ValueError : new_idx = bisect_left ( cur_points_z , point . z ) new_point = Point ( ) new_point . location = sPoint ( point ) new_point . time = profile . time profile . elements . insert ( new_idx , new_point ) return new_point
9,892
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L74-L88
[ "def", "clean_content", "(", "content", ")", ":", "for", "pattern", ",", "subst", "in", "_CLEAN_PATTERNS", ":", "content", "=", "pattern", ".", "sub", "(", "subst", ",", "content", ")", "return", "content" ]
Parses a general DataArray .
def _parse_data_array ( self , data_array ) : # decimalSeparator = data_array.encoding.decimalSeparator tokenSeparator = data_array . encoding . tokenSeparator blockSeparator = data_array . encoding . blockSeparator # collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces data_values = data_array . values lines = [ x for x in data_values . split ( blockSeparator ) if x != "" ] ret_val = [ ] for row in lines : values = row . split ( tokenSeparator ) ret_val . append ( [ float ( v ) if " " not in v . strip ( ) else [ float ( vv ) for vv in v . split ( ) ] for v in values ] ) # transpose into columns return [ list ( x ) for x in zip ( * ret_val ) ]
9,893
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L280-L306
[ "def", "register_vm", "(", "datacenter", ",", "name", ",", "vmx_path", ",", "resourcepool_object", ",", "host_object", "=", "None", ")", ":", "try", ":", "if", "host_object", ":", "task", "=", "datacenter", ".", "vmFolder", ".", "RegisterVM_Task", "(", "path", "=", "vmx_path", ",", "name", "=", "name", ",", "asTemplate", "=", "False", ",", "host", "=", "host_object", ",", "pool", "=", "resourcepool_object", ")", "else", ":", "task", "=", "datacenter", ".", "vmFolder", ".", "RegisterVM_Task", "(", "path", "=", "vmx_path", ",", "name", "=", "name", ",", "asTemplate", "=", "False", ",", "pool", "=", "resourcepool_object", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "try", ":", "vm_ref", "=", "wait_for_task", "(", "task", ",", "name", ",", "'RegisterVM Task'", ")", "except", "salt", ".", "exceptions", ".", "VMwareFileNotFoundError", "as", "exc", ":", "raise", "salt", ".", "exceptions", ".", "VMwareVmRegisterError", "(", "'An error occurred during registration operation, the '", "'configuration file was not found: {0}'", ".", "format", "(", "exc", ")", ")", "return", "vm_ref" ]
Parses values via sensor data record passed in . Returns parsed values AND how many items it consumed out of rem_values .
def _parse_sensor_record ( self , sensor_data_rec , sensor_info , rem_values ) : val_idx = 0 # @TODO seems there is only a single field in each of these assert len ( sensor_data_rec . field ) == 1 sensor_data_array = sensor_data_rec . field [ 0 ] . content # there is probably not going to be a count in the def, it'll be in the data count = None count_text = sensor_data_array . elementCount . text if count_text : count = int ( count_text . strip ( ) ) if not count : count = int ( rem_values [ val_idx ] ) val_idx += 1 parsed = [ ] for recnum in range ( count ) : cur = [ ] for f in sensor_data_array . elementType . field : cur_val = rem_values [ val_idx ] val_idx += 1 m = Member ( name = f . name , standard = f . content . definition ) if hasattr ( f . content , "uom" ) : m [ "units" ] = f . content . uom try : m [ "value" ] = float ( cur_val ) except ValueError : m [ "value" ] = cur_val if len ( f . quality ) : m [ "quality" ] = [ ] for qual in f . quality : cur_qual = rem_values [ val_idx ] val_idx += 1 # @TODO check this against constraints m [ "quality" ] . append ( cur_qual ) cur . append ( m ) parsed . append ( cur ) return parsed , val_idx
9,894
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L393-L446
[ "def", "backup_database", "(", "args", ")", ":", "username", "=", "args", ".", "get", "(", "'<user>'", ")", "password", "=", "args", ".", "get", "(", "'<password>'", ")", "database", "=", "args", "[", "'<database>'", "]", "host", "=", "args", ".", "get", "(", "'<host>'", ")", "or", "'127.0.0.1'", "path", "=", "args", ".", "get", "(", "'--path'", ")", "or", "os", ".", "getcwd", "(", ")", "s3", "=", "args", ".", "get", "(", "'--upload_s3'", ")", "glacier", "=", "args", ".", "get", "(", "'--upload_glacier'", ")", "dropbox", "=", "args", ".", "get", "(", "'--upload_dropbox'", ")", "swift", "=", "args", ".", "get", "(", "'--upload_swift'", ")", "encrypt", "=", "args", ".", "get", "(", "'--encrypt'", ")", "or", "'Y'", "if", "not", "database", ":", "raise", "SystemExit", "(", "_error_codes", ".", "get", "(", "101", ")", ")", "if", "path", "and", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "SystemExit", "(", "_error_codes", ".", "get", "(", "105", ")", ")", "query", "=", "'mongodump -d {database} --host {host} '", "if", "username", ":", "query", "+=", "'-u {username} '", "if", "password", ":", "query", "+=", "'-p {password} '", "if", "path", ":", "query", "+=", "'-o {path}/dump'", "local", "(", "query", ".", "format", "(", "username", "=", "username", ",", "password", "=", "password", ",", "database", "=", "database", ",", "host", "=", "host", ",", "path", "=", "path", ")", ")", "compress_file", "=", "compress_folder_dump", "(", "normalize_path", "(", "path", ")", "+", "'dump'", ",", "normalize_path", "(", "path", ")", ")", "shutil", ".", "rmtree", "(", "normalize_path", "(", "path", ")", "+", "'dump'", ")", "optional_actions", "(", "encrypt", ",", "path", ",", "compress_file", ",", "s3", "=", "s3", ",", "glacier", "=", "glacier", ",", "dropbox", "=", "dropbox", ",", "swift", "=", "swift", ")" ]
Execute the engine - currently simple executes all workflows .
def execute ( self , debug = False ) : if debug : # Set some default times for execution (debugging) start_time = datetime ( year = 2016 , month = 10 , day = 19 , hour = 12 , minute = 28 , tzinfo = UTC ) duration = timedelta ( seconds = 5 ) end_time = start_time + duration relative_interval = RelativeTimeInterval ( 0 , 0 ) time_interval = TimeInterval ( start_time , end_time ) # workflow_id = "lda_localisation_model_predict" else : duration = 0 # not needed relative_interval = self . hyperstream . config . online_engine . interval time_interval = relative_interval . absolute ( utcnow ( ) ) for _ in range ( self . hyperstream . config . online_engine . iterations ) : if not debug : # if this takes more than x minutes, kill myself signal . alarm ( self . hyperstream . config . online_engine . alarm ) logging . info ( "Online engine starting up." ) # self.hyperstream.workflow_manager.set_requested_intervals(workflow_id, TimeIntervals([time_interval])) self . hyperstream . workflow_manager . set_all_requested_intervals ( TimeIntervals ( [ time_interval ] ) ) self . hyperstream . workflow_manager . execute_all ( ) logging . info ( "Online engine shutting down." ) logging . info ( "" ) sleep ( self . hyperstream . config . online_engine . sleep ) if debug : time_interval += duration else : time_interval = TimeInterval ( time_interval . end , utcnow ( ) + timedelta ( seconds = relative_interval . end ) )
9,895
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/online_engine.py#L47-L85
[ "def", "handlePortfolio", "(", "self", ",", "msg", ")", ":", "# log handler msg", "self", ".", "log_msg", "(", "\"portfolio\"", ",", "msg", ")", "# contract identifier", "contract_tuple", "=", "self", ".", "contract_to_tuple", "(", "msg", ".", "contract", ")", "contractString", "=", "self", ".", "contractString", "(", "contract_tuple", ")", "# try creating the contract", "self", ".", "registerContract", "(", "msg", ".", "contract", ")", "# new account?", "if", "msg", ".", "accountName", "not", "in", "self", ".", "_portfolios", ".", "keys", "(", ")", ":", "self", ".", "_portfolios", "[", "msg", ".", "accountName", "]", "=", "{", "}", "self", ".", "_portfolios", "[", "msg", ".", "accountName", "]", "[", "contractString", "]", "=", "{", "\"symbol\"", ":", "contractString", ",", "\"position\"", ":", "int", "(", "msg", ".", "position", ")", ",", "\"marketPrice\"", ":", "float", "(", "msg", ".", "marketPrice", ")", ",", "\"marketValue\"", ":", "float", "(", "msg", ".", "marketValue", ")", ",", "\"averageCost\"", ":", "float", "(", "msg", ".", "averageCost", ")", ",", "\"unrealizedPNL\"", ":", "float", "(", "msg", ".", "unrealizedPNL", ")", ",", "\"realizedPNL\"", ":", "float", "(", "msg", ".", "realizedPNL", ")", ",", "\"totalPNL\"", ":", "float", "(", "msg", ".", "realizedPNL", ")", "+", "float", "(", "msg", ".", "unrealizedPNL", ")", ",", "\"account\"", ":", "msg", ".", "accountName", "}", "# fire callback", "self", ".", "ibCallback", "(", "caller", "=", "\"handlePortfolio\"", ",", "msg", "=", "msg", ")" ]
Show the movie metadata .
def show ( movie ) : for key , value in sorted ( movie . iteritems ( ) , cmp = metadata_sorter , key = lambda x : x [ 0 ] ) : if isinstance ( value , list ) : if not value : continue other = value [ 1 : ] value = value [ 0 ] else : other = [ ] printer . p ( '<b>{key}</b>: {value}' , key = key , value = value ) for value in other : printer . p ( '{pad}{value}' , value = value , pad = ' ' * ( len ( key ) + 2 ) )
9,896
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/show.py#L11-L24
[ "def", "get_commensurate_points_in_integers", "(", "supercell_matrix", ")", ":", "smat", "=", "np", ".", "array", "(", "supercell_matrix", ",", "dtype", "=", "int", ")", "snf", "=", "SNF3x3", "(", "smat", ".", "T", ")", "snf", ".", "run", "(", ")", "D", "=", "snf", ".", "A", ".", "diagonal", "(", ")", "b", ",", "c", ",", "a", "=", "np", ".", "meshgrid", "(", "range", "(", "D", "[", "1", "]", ")", ",", "range", "(", "D", "[", "2", "]", ")", ",", "range", "(", "D", "[", "0", "]", ")", ")", "lattice_points", "=", "np", ".", "dot", "(", "np", ".", "c_", "[", "a", ".", "ravel", "(", ")", "*", "D", "[", "1", "]", "*", "D", "[", "2", "]", ",", "b", ".", "ravel", "(", ")", "*", "D", "[", "0", "]", "*", "D", "[", "2", "]", ",", "c", ".", "ravel", "(", ")", "*", "D", "[", "0", "]", "*", "D", "[", "1", "]", "]", ",", "snf", ".", "Q", ".", "T", ")", "lattice_points", "=", "np", ".", "array", "(", "lattice_points", "%", "np", ".", "prod", "(", "D", ")", ",", "dtype", "=", "'intc'", ",", "order", "=", "'C'", ")", "return", "lattice_points" ]
Sort metadata keys by priority .
def metadata_sorter ( x , y ) : if x == y : return 0 if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST : return - 1 if METADATA_SORTER_FIRST . index ( x ) < METADATA_SORTER_FIRST . index ( y ) else 1 elif x in METADATA_SORTER_FIRST : return - 1 elif y in METADATA_SORTER_FIRST : return 1 else : if x . startswith ( '_' ) and y . startswith ( '_' ) : return cmp ( x [ 1 : ] , y [ 1 : ] ) elif x . startswith ( '_' ) : return 1 elif y . startswith ( '_' ) : return - 1 else : return cmp ( x , y )
9,897
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/show.py#L27-L46
[ "def", "render_nocache", "(", "self", ")", ":", "tmpl", "=", "template", ".", "Template", "(", "''", ".", "join", "(", "[", "# start by loading the cache library", "template", ".", "BLOCK_TAG_START", ",", "'load %s'", "%", "self", ".", "get_templatetag_module", "(", ")", ",", "template", ".", "BLOCK_TAG_END", ",", "# and surround the cached template by \"raw\" tags", "self", ".", "RAW_TOKEN_START", ",", "self", ".", "content", ",", "self", ".", "RAW_TOKEN_END", ",", "]", ")", ")", "return", "tmpl", ".", "render", "(", "self", ".", "context", ")" ]
parse lines from the fileinput and send them to the log_parsers
def parse_lines ( log_parsers , fileinp ) : while 1 : logentry = fileinp . readline ( ) if not logentry : break elif not logentry . rstrip ( ) : continue # skip newlines processed = False for lp in log_parsers : if lp . grok ( logentry ) : processed = True if not processed : # error: none of the logparsers worked on the line logger = logging . getLogger ( 'logparser' ) logger . warning ( #'Could not parse line %s, in file %s >>>%s<<<', #fileinp.lineno(), fileinp.filename(), line.rstrip()) 'Could not parse line >>>%s<<<' , logentry . rstrip ( ) ) print ( 'Could not parse line >>>%s<<<' % logentry . rstrip ( ) )
9,898
https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/korg.py#L24-L44
[ "def", "select_between_exonic_splice_site_and_alternate_effect", "(", "effect", ")", ":", "if", "effect", ".", "__class__", "is", "not", "ExonicSpliceSite", ":", "return", "effect", "if", "effect", ".", "alternate_effect", "is", "None", ":", "return", "effect", "splice_priority", "=", "effect_priority", "(", "effect", ")", "alternate_priority", "=", "effect_priority", "(", "effect", ".", "alternate_effect", ")", "if", "splice_priority", ">", "alternate_priority", ":", "return", "effect", "else", ":", "return", "effect", ".", "alternate_effect" ]
Load commands of this profile .
def load_commands ( self , parser ) : entrypoints = self . _get_entrypoints ( ) already_loaded = set ( ) for entrypoint in entrypoints : if entrypoint . name not in already_loaded : command_class = entrypoint . load ( ) command_class ( entrypoint . name , self , parser ) . prepare ( ) already_loaded . add ( entrypoint . name )
9,899
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/profiles/__init__.py#L19-L32
[ "def", "init_attachment_cache", "(", "self", ")", ":", "if", "self", ".", "request", ".", "method", "==", "'GET'", ":", "# Invalidates previous attachments", "attachments_cache", ".", "delete", "(", "self", ".", "get_attachments_cache_key", "(", "self", ".", "request", ")", ")", "return", "# Try to restore previous uploaded attachments if applicable", "attachments_cache_key", "=", "self", ".", "get_attachments_cache_key", "(", "self", ".", "request", ")", "restored_attachments_dict", "=", "attachments_cache", ".", "get", "(", "attachments_cache_key", ")", "if", "restored_attachments_dict", ":", "restored_attachments_dict", ".", "update", "(", "self", ".", "request", ".", "FILES", ")", "self", ".", "request", ".", "_files", "=", "restored_attachments_dict", "# Updates the attachment cache if files are available", "if", "self", ".", "request", ".", "FILES", ":", "attachments_cache", ".", "set", "(", "attachments_cache_key", ",", "self", ".", "request", ".", "FILES", ")" ]