query stringlengths 5 1.23k | positive stringlengths 53 15.2k | id_ int64 0 252k | task_name stringlengths 87 242 | negative listlengths 20 553 |
|---|---|---|---|---|
Copy file in tree show a progress bar during operations and return the sha1 sum of copied file . | def copy ( tree , source_filename ) : #_, ext = os.path.splitext(source_filename) filehash = sha1 ( ) with printer . progress ( os . path . getsize ( source_filename ) ) as update : with open ( source_filename , 'rb' ) as fsource : with NamedTemporaryFile ( dir = os . path . join ( tree , '.kolekto' , 'movies' ) , delete = False ) as fdestination : # Copy the source into the temporary destination: while True : buf = fsource . read ( 10 * 1024 ) if not buf : break filehash . update ( buf ) fdestination . write ( buf ) update ( len ( buf ) ) # Rename the file to its final name or raise an error if # the file already exists: dest = os . path . join ( tree , '.kolekto' , 'movies' , filehash . hexdigest ( ) ) if os . path . exists ( dest ) : raise IOError ( 'This file already exists in tree (%s)' % filehash . hexdigest ( ) ) else : os . rename ( fdestination . name , dest ) return filehash . hexdigest ( ) | 9,900 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/importer.py#L36-L60 | [
"def",
"vehicle_registration_code",
"(",
"self",
",",
"locale",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"str",
":",
"if",
"locale",
":",
"return",
"VRC_BY_LOCALES",
"[",
"locale",
"]",
"return",
"self",
".",
"random",
".",
"choice",
"(",
"VR_CODES",
")"
] |
List attachment for the specified fullname . | def list_attachments ( fullname ) : parent , filename = os . path . split ( fullname ) filename_without_ext , ext = os . path . splitext ( filename ) attachments = [ ] for found_filename in os . listdir ( parent ) : found_filename_without_ext , _ = os . path . splitext ( found_filename ) if filename_without_ext == found_filename_without_ext and found_filename != filename : attachments . append ( os . path . join ( parent , found_filename ) ) return attachments | 9,901 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/importer.py#L88-L98 | [
"def",
"_copy_database_data_clientside",
"(",
"self",
",",
"tables",
",",
"source",
",",
"destination",
")",
":",
"# Retrieve database rows",
"rows",
"=",
"self",
".",
"get_database_rows",
"(",
"tables",
",",
"source",
")",
"# Retrieve database columns",
"cols",
"=",
"self",
".",
"get_database_columns",
"(",
"tables",
",",
"source",
")",
"# Validate rows and columns",
"for",
"r",
"in",
"list",
"(",
"rows",
".",
"keys",
"(",
")",
")",
":",
"assert",
"r",
"in",
"tables",
"for",
"c",
"in",
"list",
"(",
"cols",
".",
"keys",
"(",
")",
")",
":",
"assert",
"c",
"in",
"tables",
"# Change database to destination",
"self",
".",
"change_db",
"(",
"destination",
")",
"# Get insert queries",
"insert_queries",
"=",
"self",
".",
"_get_insert_commands",
"(",
"rows",
",",
"cols",
")",
"# Execute insert queries",
"self",
".",
"_execute_insert_commands",
"(",
"insert_queries",
")"
] |
Write to the stream | def write_to_stream ( self , stream_id , data , sandbox = None ) : if sandbox is not None : raise NotImplementedError if stream_id not in self . streams : raise StreamNotFoundError ( "Stream with id '{}' does not exist" . format ( stream_id ) ) writer = self . get_stream_writer ( self . streams [ stream_id ] ) if isinstance ( data , StreamInstance ) : data = [ data ] for instance in data : if not isinstance ( instance , StreamInstance ) : raise ValueError ( "Expected StreamInstance, got {}" . format ( str ( type ( instance ) ) ) ) writer ( instance ) | 9,902 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/assets_channel.py#L82-L107 | [
"def",
"read_file",
"(",
"filename",
")",
":",
"hits",
"=",
"glob",
".",
"glob",
"(",
"'**/{}'",
".",
"format",
"(",
"filename",
")",
",",
"recursive",
"=",
"True",
")",
"if",
"not",
"len",
"(",
"hits",
")",
":",
"pf",
".",
"debug",
"(",
"'No file \"{}\" found.'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"''",
"elif",
"len",
"(",
"hits",
")",
">",
"1",
":",
"pf",
".",
"debug",
"(",
"'File pattern \"{}\" ambiguous. Using first.'",
".",
"format",
"(",
"filename",
")",
")",
"with",
"open",
"(",
"hits",
"[",
"0",
"]",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] |
Process the start of the top - level xtvd node | def _startXTVDNode ( self , name , attrs ) : schemaVersion = attrs . get ( 'schemaVersion' ) validFrom = self . _parseDateTime ( attrs . get ( 'from' ) ) validTo = self . _parseDateTime ( attrs . get ( 'to' ) ) self . _progress . printMsg ( 'Parsing version %s data from %s to %s' % ( schemaVersion , validFrom . strftime ( '%Y/%m/%d' ) , validTo . strftime ( '%Y/%m/%d' ) ) ) | 9,903 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L68-L77 | [
"def",
"get_connection_ip_list",
"(",
"as_wmi_format",
"=",
"False",
",",
"server",
"=",
"_DEFAULT_SERVER",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"setting",
"=",
"'IPGrant'",
"reg_separator",
"=",
"r',\\s*'",
"if",
"as_wmi_format",
":",
"ret",
"=",
"list",
"(",
")",
"addresses",
"=",
"_get_wmi_setting",
"(",
"'IIsIPSecuritySetting'",
",",
"setting",
",",
"server",
")",
"# WMI returns the addresses as a tuple of unicode strings, each representing",
"# an address/subnet pair. Remove extra spaces that may be present.",
"for",
"unnormalized_address",
"in",
"addresses",
":",
"ip_address",
",",
"subnet",
"=",
"re",
".",
"split",
"(",
"reg_separator",
",",
"unnormalized_address",
")",
"if",
"as_wmi_format",
":",
"ret",
".",
"append",
"(",
"'{0}, {1}'",
".",
"format",
"(",
"ip_address",
",",
"subnet",
")",
")",
"else",
":",
"ret",
"[",
"ip_address",
"]",
"=",
"subnet",
"if",
"not",
"ret",
":",
"_LOG",
".",
"debug",
"(",
"'%s is empty.'",
",",
"setting",
")",
"return",
"ret"
] |
Callback run at the start of each XML element | def startElement ( self , name , attrs ) : self . _contextStack . append ( self . _context ) self . _contentList = [ ] if name in self . _statusDict : self . _itemTag , itemType = self . _statusDict [ name ] self . _progress . startItem ( itemType ) elif name == self . _itemTag : self . _error = False self . _progress . newItem ( ) try : if self . _context == 'root' : if name == 'xtvd' : self . _context = 'xtvd' self . _startXTVDNode ( name , attrs ) elif self . _context == 'xtvd' : self . _context = name elif self . _context == 'stations' : self . _startStationsNode ( name , attrs ) elif self . _context == 'lineups' : self . _startLineupsNode ( name , attrs ) elif self . _context == 'schedules' : self . _startSchedulesNode ( name , attrs ) elif self . _context == 'programs' : self . _startProgramsNode ( name , attrs ) elif self . _context == 'productionCrew' : self . _startProductionCrewNode ( name , attrs ) elif self . _context == 'genres' : self . _startGenresNode ( name , attrs ) except Exception , e : self . _error = True self . _progress . printMsg ( str ( e ) , error = True ) | 9,904 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L289-L323 | [
"def",
"create_timeseries",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"ts",
"=",
"predix",
".",
"admin",
".",
"timeseries",
".",
"TimeSeries",
"(",
"*",
"*",
"kwargs",
")",
"ts",
".",
"create",
"(",
")",
"client_id",
"=",
"self",
".",
"get_client_id",
"(",
")",
"if",
"client_id",
":",
"ts",
".",
"grant_client",
"(",
"client_id",
")",
"ts",
".",
"add_to_manifest",
"(",
"self",
")",
"return",
"ts"
] |
Callback run at the end of each XML element | def endElement ( self , name ) : content = '' . join ( self . _contentList ) if name == 'xtvd' : self . _progress . endItems ( ) else : try : if self . _context == 'stations' : self . _endStationsNode ( name , content ) elif self . _context == 'lineups' : self . _endLineupsNode ( name , content ) elif self . _context == 'schedules' : self . _endSchedulesNode ( name , content ) elif self . _context == 'programs' : self . _endProgramsNode ( name , content ) elif self . _context == 'productionCrew' : self . _endProductionCrewNode ( name , content ) elif self . _context == 'genres' : self . _endGenresNode ( name , content ) except Exception , e : self . _error = True self . _progress . printMsg ( str ( e ) , error = True ) self . _context = self . _contextStack . pop ( ) | 9,905 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L330-L355 | [
"def",
"create_timeseries",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"ts",
"=",
"predix",
".",
"admin",
".",
"timeseries",
".",
"TimeSeries",
"(",
"*",
"*",
"kwargs",
")",
"ts",
".",
"create",
"(",
")",
"client_id",
"=",
"self",
".",
"get_client_id",
"(",
")",
"if",
"client_id",
":",
"ts",
".",
"grant_client",
"(",
"client_id",
")",
"ts",
".",
"add_to_manifest",
"(",
"self",
")",
"return",
"ts"
] |
Callback run when a recoverable parsing error occurs | def error ( self , msg ) : self . _error = True self . _progress . printMsg ( 'XML parse error: %s' % msg , error = True ) | 9,906 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L357-L361 | [
"def",
"_revcheck",
"(",
"self",
",",
"func",
",",
"version",
")",
":",
"current",
"=",
"self",
".",
"_revint",
"(",
"self",
".",
"version",
")",
"check",
"=",
"self",
".",
"_revint",
"(",
"version",
")",
"if",
"func",
"in",
"(",
"'lt'",
",",
"'<='",
",",
")",
":",
"return",
"check",
"<=",
"current",
"elif",
"func",
"in",
"(",
"'gt'",
",",
"'>='",
")",
":",
"return",
"check",
">=",
"current",
"elif",
"func",
"in",
"(",
"'eq'",
",",
"'='",
",",
"'equals'",
")",
":",
"return",
"check",
"==",
"current",
"else",
":",
"return",
"False"
] |
Format the input string using each possible combination of lists in the provided environment . Returns a list of formated strings . | def format_all ( format_string , env ) : prepared_env = parse_pattern ( format_string , env , lambda x , y : [ FormatWrapper ( x , z ) for z in y ] ) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product ( * prepared_env . itervalues ( ) ) : format_env = dict ( izip ( prepared_env . iterkeys ( ) , field_values ) ) yield format_string . format ( * * format_env ) | 9,907 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/link.py#L26-L36 | [
"def",
"disconnect_controller",
"(",
"self",
",",
"vid",
",",
"pid",
",",
"serial",
")",
":",
"self",
".",
"lib",
".",
"tdDisconnectTellStickController",
"(",
"vid",
",",
"pid",
",",
"serial",
")"
] |
Initialization before prompting user for commands . | def preloop ( self ) : Cmd . preloop ( self ) # sets up command completion self . _hist = [ ] # No history yet self . _locals = { } # Initialize execution namespace for user self . _globals = { } | 9,908 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/rba.py#L46-L54 | [
"def",
"dropIndex",
"(",
"cls",
",",
"fields",
")",
":",
"con",
"=",
"RabaConnection",
"(",
"cls",
".",
"_raba_namespace",
")",
"rlf",
",",
"ff",
"=",
"cls",
".",
"_parseIndex",
"(",
"fields",
")",
"for",
"name",
"in",
"rlf",
":",
"con",
".",
"dropIndex",
"(",
"name",
",",
"'anchor_raba_id'",
")",
"con",
".",
"dropIndex",
"(",
"cls",
".",
"__name__",
",",
"ff",
")",
"con",
".",
"commit",
"(",
")"
] |
Executes the stream s tool over the given time interval | def execute_tool ( self , stream , interval ) : if interval . end > self . up_to_timestamp : raise ValueError ( 'The stream is not available after ' + str ( self . up_to_timestamp ) + ' and cannot be calculated' ) required_intervals = TimeIntervals ( [ interval ] ) - stream . calculated_intervals if not required_intervals . is_empty : for interval in required_intervals : stream . tool . execute ( stream . input_streams , stream , interval ) stream . calculated_intervals += interval if not stream . required_intervals . is_empty : raise RuntimeError ( 'Tool execution did not cover the specified time interval.' ) | 9,909 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L46-L65 | [
"def",
"read_file",
"(",
"filename",
")",
":",
"hits",
"=",
"glob",
".",
"glob",
"(",
"'**/{}'",
".",
"format",
"(",
"filename",
")",
",",
"recursive",
"=",
"True",
")",
"if",
"not",
"len",
"(",
"hits",
")",
":",
"pf",
".",
"debug",
"(",
"'No file \"{}\" found.'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"''",
"elif",
"len",
"(",
"hits",
")",
">",
"1",
":",
"pf",
".",
"debug",
"(",
"'File pattern \"{}\" ambiguous. Using first.'",
".",
"format",
"(",
"filename",
")",
")",
"with",
"open",
"(",
"hits",
"[",
"0",
"]",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] |
Helper function to get a stream or create one if it s not already defined | def get_or_create_stream ( self , stream_id , try_create = True ) : stream_id = get_stream_id ( stream_id ) if stream_id in self . streams : logging . debug ( "found {}" . format ( stream_id ) ) return self . streams [ stream_id ] elif try_create : # Try to create the stream logging . debug ( "creating {}" . format ( stream_id ) ) return self . create_stream ( stream_id = stream_id ) | 9,910 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L76-L91 | [
"def",
"check_image_size_incorrect",
"(",
"self",
")",
":",
"last_virtual_address",
"=",
"0",
"last_virtual_size",
"=",
"0",
"section_alignment",
"=",
"self",
".",
"pefile_handle",
".",
"OPTIONAL_HEADER",
".",
"SectionAlignment",
"total_image_size",
"=",
"self",
".",
"pefile_handle",
".",
"OPTIONAL_HEADER",
".",
"SizeOfImage",
"for",
"section",
"in",
"self",
".",
"pefile_handle",
".",
"sections",
":",
"if",
"section",
".",
"VirtualAddress",
">",
"last_virtual_address",
":",
"last_virtual_address",
"=",
"section",
".",
"VirtualAddress",
"last_virtual_size",
"=",
"section",
".",
"Misc_VirtualSize",
"# Just pad the size to be equal to the alignment and check for mismatch",
"last_virtual_size",
"+=",
"section_alignment",
"-",
"(",
"last_virtual_size",
"%",
"section_alignment",
")",
"if",
"(",
"last_virtual_address",
"+",
"last_virtual_size",
")",
"!=",
"total_image_size",
":",
"return",
"{",
"'description'",
":",
"'Image size does not match reported size'",
",",
"'severity'",
":",
"3",
",",
"'category'",
":",
"'MALFORMED'",
"}",
"return",
"None"
] |
Finds streams with the given meta data values . Useful for debugging purposes . | def find_streams ( self , * * kwargs ) : found = { } if 'name' in kwargs : name = kwargs . pop ( 'name' ) else : name = None for stream_id , stream in self . streams . items ( ) : if name is not None and stream_id . name != name : continue d = dict ( stream_id . meta_data ) if all ( k in d and d [ k ] == str ( v ) for k , v in kwargs . items ( ) ) : found [ stream_id ] = stream return found | 9,911 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L100-L121 | [
"def",
"cancel_order",
"(",
"self",
",",
"order_id",
":",
"str",
")",
"->",
"str",
":",
"self",
".",
"log",
".",
"debug",
"(",
"f'Canceling order id={order_id} on {self.name}'",
")",
"if",
"self",
".",
"dry_run",
":",
"# Don't cancel if dry run",
"self",
".",
"log",
".",
"warning",
"(",
"f'DRY RUN: Order cancelled on {self.name}: id={order_id}'",
")",
"return",
"order_id",
"try",
":",
"# Cancel order",
"self",
".",
"_cancel_order",
"(",
"order_id",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"self",
".",
"exception",
"(",
"OrderNotFound",
",",
"f'Failed to cancel order: id={order_id}'",
",",
"e",
")",
"from",
"e",
"self",
".",
"log",
".",
"info",
"(",
"f'Order cancelled on {self.name}: id={order_id}'",
")",
"return",
"order_id"
] |
Finds a single stream with the given meta data values . Useful for debugging purposes . | def find_stream ( self , * * kwargs ) : found = list ( self . find_streams ( * * kwargs ) . values ( ) ) if not found : raise StreamNotFoundError ( kwargs ) if len ( found ) > 1 : raise MultipleStreamsFoundError ( kwargs ) return found [ 0 ] | 9,912 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L123-L135 | [
"def",
"get_default_gentation",
"(",
"gender",
",",
"orientation",
")",
":",
"gender",
"=",
"gender",
".",
"lower",
"(",
")",
"[",
"0",
"]",
"orientation",
"=",
"orientation",
".",
"lower",
"(",
")",
"return",
"gender_to_orientation_to_gentation",
"[",
"gender",
"]",
"[",
"orientation",
"]"
] |
Get the next frame from file | def next_blob ( self ) : blob_file = self . blob_file try : preamble = DAQPreamble ( file_obj = blob_file ) except struct . error : raise StopIteration try : data_type = DATA_TYPES [ preamble . data_type ] except KeyError : log . error ( "Unkown datatype: {0}" . format ( preamble . data_type ) ) data_type = 'Unknown' blob = Blob ( ) blob [ data_type ] = None blob [ 'DAQPreamble' ] = preamble if data_type == 'DAQSummaryslice' : daq_frame = DAQSummaryslice ( blob_file ) blob [ data_type ] = daq_frame blob [ 'DAQHeader' ] = daq_frame . header elif data_type == 'DAQEvent' : daq_frame = DAQEvent ( blob_file ) blob [ data_type ] = daq_frame blob [ 'DAQHeader' ] = daq_frame . header else : log . warning ( "Skipping DAQ frame with data type code '{0}'." . format ( preamble . data_type ) ) blob_file . seek ( preamble . length - DAQPreamble . size , 1 ) return blob | 9,913 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L145-L179 | [
"def",
"sshagent_run",
"(",
"cmd",
")",
":",
"# Handle context manager modifications",
"wrapped_cmd",
"=",
"_prefix_commands",
"(",
"_prefix_env_vars",
"(",
"cmd",
")",
",",
"'remote'",
")",
"try",
":",
"host",
",",
"port",
"=",
"env",
".",
"host_string",
".",
"split",
"(",
"':'",
")",
"return",
"local",
"(",
"u\"ssh -p %s -A -o StrictHostKeyChecking=no %s@%s '%s'\"",
"%",
"(",
"port",
",",
"env",
".",
"user",
",",
"host",
",",
"wrapped_cmd",
")",
")",
"except",
"ValueError",
":",
"return",
"local",
"(",
"u\"ssh -A -o StrictHostKeyChecking=no %s@%s '%s'\"",
"%",
"(",
"env",
".",
"user",
",",
"env",
".",
"host_string",
",",
"wrapped_cmd",
")",
")"
] |
Move file pointer to the frame with given index . | def seek_to_frame ( self , index ) : pointer_position = self . frame_positions [ index ] self . blob_file . seek ( pointer_position , 0 ) | 9,914 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L181-L184 | [
"def",
"run",
"(",
"self",
",",
"*",
"*",
"import_params",
")",
":",
"import_params",
"[",
"\"url\"",
"]",
"=",
"self",
".",
"url",
"import_params",
"[",
"\"interval\"",
"]",
"=",
"self",
".",
"interval",
"if",
"\"connection\"",
"in",
"import_params",
":",
"self",
".",
"fields",
".",
"append",
"(",
"\"connector\"",
")",
"import_params",
"[",
"\"connection\"",
"]",
"[",
"\"interval\"",
"]",
"=",
"self",
".",
"interval",
"self",
".",
"update_from_dict",
"(",
"import_params",
"[",
"\"connection\"",
"]",
")",
"self",
".",
"save",
"(",
"force_create",
"=",
"True",
")",
"else",
":",
"return",
"super",
"(",
"SyncTableJob",
",",
"self",
")",
".",
"run",
"(",
"params",
"=",
"import_params",
")"
] |
Directly read from file handler . | def _parse_file ( self , file_obj ) : byte_data = file_obj . read ( self . size ) self . _parse_byte_data ( byte_data ) | 9,915 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L409-L416 | [
"def",
"register",
"(",
"self",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"dimensions",
"=",
"dict",
"(",
"(",
"k",
",",
"str",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"composite_key",
"=",
"self",
".",
"_composite_name",
"(",
"key",
",",
"dimensions",
")",
"self",
".",
"_metadata",
"[",
"composite_key",
"]",
"=",
"{",
"'metric'",
":",
"key",
",",
"'dimensions'",
":",
"dimensions",
"}",
"return",
"composite_key"
] |
Iterate through the byte data and fill the summary_frames | def _parse_summary_frames ( self , file_obj ) : for _ in range ( self . n_summary_frames ) : dom_id = unpack ( '<i' , file_obj . read ( 4 ) ) [ 0 ] dq_status = file_obj . read ( 4 ) # probably dom status? # noqa dom_status = unpack ( '<iiii' , file_obj . read ( 16 ) ) raw_rates = unpack ( 'b' * 31 , file_obj . read ( 31 ) ) pmt_rates = [ self . _get_rate ( value ) for value in raw_rates ] self . summary_frames [ dom_id ] = pmt_rates self . dq_status [ dom_id ] = dq_status self . dom_status [ dom_id ] = dom_status self . dom_rates [ dom_id ] = np . sum ( pmt_rates ) | 9,916 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L499-L510 | [
"def",
"disconnect",
"(",
"service_instance",
")",
":",
"log",
".",
"trace",
"(",
"'Disconnecting'",
")",
"try",
":",
"Disconnect",
"(",
"service_instance",
")",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")"
] |
Return the rate in Hz from the short int value | def _get_rate ( self , value ) : if value == 0 : return 0 else : return MINIMAL_RATE_HZ * math . exp ( value * self . _get_factor ( ) ) | 9,917 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L512-L517 | [
"def",
"_expectation",
"(",
"self",
",",
"X",
")",
":",
"# Calculate probabilities of X belonging to the different clusters",
"weighted_likelihoods",
"=",
"self",
".",
"_get_likelihoods",
"(",
"X",
")",
"*",
"self",
".",
"priors",
"sum_likelihoods",
"=",
"np",
".",
"expand_dims",
"(",
"np",
".",
"sum",
"(",
"weighted_likelihoods",
",",
"axis",
"=",
"1",
")",
",",
"axis",
"=",
"1",
")",
"# Determine responsibility as P(X|y)*P(y)/P(X)",
"self",
".",
"responsibility",
"=",
"weighted_likelihoods",
"/",
"sum_likelihoods",
"# Assign samples to cluster that has largest probability",
"self",
".",
"sample_assignments",
"=",
"self",
".",
"responsibility",
".",
"argmax",
"(",
"axis",
"=",
"1",
")",
"# Save value for convergence check",
"self",
".",
"responsibilities",
".",
"append",
"(",
"np",
".",
"max",
"(",
"self",
".",
"responsibility",
",",
"axis",
"=",
"1",
")",
")"
] |
Parse and store triggered hits . | def _parse_triggered_hits ( self , file_obj ) : for _ in range ( self . n_triggered_hits ) : dom_id , pmt_id = unpack ( '<ib' , file_obj . read ( 5 ) ) tdc_time = unpack ( '>I' , file_obj . read ( 4 ) ) [ 0 ] tot = unpack ( '<b' , file_obj . read ( 1 ) ) [ 0 ] trigger_mask = unpack ( '<Q' , file_obj . read ( 8 ) ) self . triggered_hits . append ( ( dom_id , pmt_id , tdc_time , tot , trigger_mask ) ) | 9,918 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L556-L565 | [
"def",
"create_image_metadata",
"(",
"data",
")",
":",
"# Default metadata",
"meta",
"=",
"{",
"'protected'",
":",
"data",
".",
"get",
"(",
"'protected'",
",",
"False",
")",
",",
"'disk_format'",
":",
"data",
".",
"get",
"(",
"'disk_format'",
",",
"'raw'",
")",
",",
"'container_format'",
":",
"data",
".",
"get",
"(",
"'container_format'",
",",
"'bare'",
")",
",",
"'min_disk'",
":",
"data",
".",
"get",
"(",
"'min_disk'",
")",
"or",
"0",
",",
"'min_ram'",
":",
"data",
".",
"get",
"(",
"'min_ram'",
")",
"or",
"0",
",",
"'name'",
":",
"data",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"}",
"# Glance does not really do anything with container_format at the",
"# moment. It requires it is set to the same disk_format for the three",
"# Amazon image types, otherwise it just treats them as 'bare.' As such",
"# we will just set that to be that here instead of bothering the user",
"# with asking them for information we can already determine.",
"if",
"meta",
"[",
"'disk_format'",
"]",
"in",
"(",
"'ami'",
",",
"'aki'",
",",
"'ari'",
",",
")",
":",
"meta",
"[",
"'container_format'",
"]",
"=",
"meta",
"[",
"'disk_format'",
"]",
"elif",
"meta",
"[",
"'disk_format'",
"]",
"==",
"'docker'",
":",
"# To support docker containers we allow the user to specify",
"# 'docker' as the format. In that case we really want to use",
"# 'raw' as the disk format and 'docker' as the container format.",
"meta",
"[",
"'disk_format'",
"]",
"=",
"'raw'",
"meta",
"[",
"'container_format'",
"]",
"=",
"'docker'",
"elif",
"meta",
"[",
"'disk_format'",
"]",
"==",
"'ova'",
":",
"# If the user wishes to upload an OVA using Horizon, then",
"# 'ova' must be the container format and 'vmdk' must be the disk",
"# format.",
"meta",
"[",
"'container_format'",
"]",
"=",
"'ova'",
"meta",
"[",
"'disk_format'",
"]",
"=",
"'vmdk'",
"properties",
"=",
"{",
"}",
"for",
"prop",
",",
"key",
"in",
"[",
"(",
"'description'",
",",
"'description'",
")",
",",
"(",
"'kernel_id'",
",",
"'kernel'",
")",
",",
"(",
"'ramdisk_id'",
",",
"'ramdisk'",
")",
",",
"(",
"'architecture'",
",",
"'architecture'",
")",
"]",
":",
"if",
"data",
".",
"get",
"(",
"key",
")",
":",
"properties",
"[",
"prop",
"]",
"=",
"data",
"[",
"key",
"]",
"_handle_unknown_properties",
"(",
"data",
",",
"properties",
")",
"if",
"(",
"'visibility'",
"in",
"data",
"and",
"data",
"[",
"'visibility'",
"]",
"not",
"in",
"[",
"'public'",
",",
"'private'",
",",
"'community'",
",",
"'shared'",
"]",
")",
":",
"raise",
"KeyError",
"(",
"'invalid visibility option: %s'",
"%",
"data",
"[",
"'visibility'",
"]",
")",
"_normalize_is_public_filter",
"(",
"data",
")",
"if",
"VERSIONS",
".",
"active",
"<",
"2",
":",
"meta",
"[",
"'properties'",
"]",
"=",
"properties",
"meta",
"[",
"'is_public'",
"]",
"=",
"data",
".",
"get",
"(",
"'is_public'",
",",
"False",
")",
"else",
":",
"meta",
"[",
"'visibility'",
"]",
"=",
"data",
".",
"get",
"(",
"'visibility'",
",",
"'private'",
")",
"meta",
".",
"update",
"(",
"properties",
")",
"return",
"meta"
] |
Parse and store snapshot hits . | def _parse_snapshot_hits ( self , file_obj ) : for _ in range ( self . n_snapshot_hits ) : dom_id , pmt_id = unpack ( '<ib' , file_obj . read ( 5 ) ) tdc_time = unpack ( '>I' , file_obj . read ( 4 ) ) [ 0 ] tot = unpack ( '<b' , file_obj . read ( 1 ) ) [ 0 ] self . snapshot_hits . append ( ( dom_id , pmt_id , tdc_time , tot ) ) | 9,919 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L567-L573 | [
"def",
"_read_console_output",
"(",
"self",
",",
"ws",
",",
"out",
")",
":",
"while",
"True",
":",
"msg",
"=",
"yield",
"from",
"ws",
".",
"receive",
"(",
")",
"if",
"msg",
".",
"tp",
"==",
"aiohttp",
".",
"WSMsgType",
".",
"text",
":",
"out",
".",
"feed_data",
"(",
"msg",
".",
"data",
".",
"encode",
"(",
")",
")",
"elif",
"msg",
".",
"tp",
"==",
"aiohttp",
".",
"WSMsgType",
".",
"BINARY",
":",
"out",
".",
"feed_data",
"(",
"msg",
".",
"data",
")",
"elif",
"msg",
".",
"tp",
"==",
"aiohttp",
".",
"WSMsgType",
".",
"ERROR",
":",
"log",
".",
"critical",
"(",
"\"Docker WebSocket Error: {}\"",
".",
"format",
"(",
"msg",
".",
"data",
")",
")",
"else",
":",
"out",
".",
"feed_eof",
"(",
")",
"ws",
".",
"close",
"(",
")",
"break",
"yield",
"from",
"self",
".",
"stop",
"(",
")"
] |
Print the run table of the last n runs for given detector | def runtable ( det_id , n = 5 , run_range = None , compact = False , sep = '\t' , regex = None ) : db = kp . db . DBManager ( ) df = db . run_table ( det_id ) if run_range is not None : try : from_run , to_run = [ int ( r ) for r in run_range . split ( '-' ) ] except ValueError : log . critical ( "Please specify a valid range (e.g. 3100-3200)!" ) raise SystemExit else : df = df [ ( df . RUN >= from_run ) & ( df . RUN <= to_run ) ] if regex is not None : try : re . compile ( regex ) except re . error : log . error ( "Invalid regex!" ) return df = df [ df [ 'RUNSETUPNAME' ] . str . contains ( regex ) | df [ 'RUNSETUPID' ] . str . contains ( regex ) ] if n is not None : df = df . tail ( n ) if compact : df = df [ [ 'RUN' , 'DATETIME' , 'RUNSETUPNAME' ] ] df . to_csv ( sys . stdout , sep = sep ) | 9,920 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/runtable.py#L35-L65 | [
"def",
"get_license_assignment_manager",
"(",
"service_instance",
")",
":",
"log",
".",
"debug",
"(",
"'Retrieving license assignment manager'",
")",
"try",
":",
"lic_assignment_manager",
"=",
"service_instance",
".",
"content",
".",
"licenseManager",
".",
"licenseAssignmentManager",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{0}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")",
"if",
"not",
"lic_assignment_manager",
":",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareObjectRetrievalError",
"(",
"'License assignment manager was not retrieved'",
")",
"return",
"lic_assignment_manager"
] |
Extract AHRS calibration information from XML root . | def _extract_calibration ( xroot ) : names = [ c . text for c in xroot . findall ( ".//Name" ) ] val = [ [ i . text for i in c ] for c in xroot . findall ( ".//Values" ) ] # The fields has to be reindeced, these are the index mappings col_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Matrix_Column(-)" ) ] ] try : row_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Matrix_Row(-)" ) ] ] except ValueError : row_ic = [ 2 , 2 , 2 , 1 , 1 , 1 , 0 , 0 , 0 ] try : vec_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Vector_Index(-)" ) ] ] except ValueError : vec_ic = [ 2 , 1 , 0 ] Aoff_ix = names . index ( "AHRS_Acceleration_Offset(g/ms^2-)" ) Arot_ix = names . index ( "AHRS_Acceleration_Rotation(-)" ) Hrot_ix = names . index ( "AHRS_Magnetic_Rotation(-)" ) Aoff = np . array ( val [ Aoff_ix ] ) [ vec_ic ] . astype ( float ) Arot = np . array ( val [ Arot_ix ] ) . reshape ( 3 , 3 ) [ col_ic , row_ic ] . reshape ( 3 , 3 ) . astype ( float ) Hrot = np . array ( val [ Hrot_ix ] ) . reshape ( 3 , 3 ) [ col_ic , row_ic ] . reshape ( 3 , 3 ) . astype ( float ) Hoff = [ ] for q in 'XYZ' : values = [ ] for t in ( 'Min' , 'Max' ) : ix = names . index ( "AHRS_Magnetic_{}{}(G-)" . format ( q , t ) ) values . append ( float ( val [ ix ] [ 0 ] ) ) Hoff . append ( sum ( values ) / 2. ) Hoff = np . array ( Hoff ) return Aoff , Arot , Hoff , Hrot | 9,921 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/ahrs.py#L207-L256 | [
"def",
"serve",
"(",
"content",
")",
":",
"temp_folder",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"temp_file_name",
"=",
"tempfile",
".",
"gettempprefix",
"(",
")",
"+",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"+",
"\".html\"",
"# Generate a file path with a random name in temporary dir",
"temp_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_folder",
",",
"temp_file_name",
")",
"# save content to temp file",
"save",
"(",
"temp_file_path",
",",
"content",
")",
"# Open templfile in a browser",
"webbrowser",
".",
"open",
"(",
"\"file://{}\"",
".",
"format",
"(",
"temp_file_path",
")",
")",
"# Block the thread while content is served",
"try",
":",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"# cleanup the temp file",
"os",
".",
"remove",
"(",
"temp_file_path",
")"
] |
Calculate yaw pitch and roll from the median of A and H . | def calibrate ( self ) : now = time . time ( ) dom_ids = self . A . keys ( ) print ( "Calibrating AHRS from median A and H for {} DOMs." . format ( len ( dom_ids ) ) ) calibrations = { } for dom_id in dom_ids : print ( "Calibrating DOM ID {}" . format ( dom_id ) ) clb_upi = self . db . doms . via_dom_id ( dom_id ) . clb_upi ahrs_calib = get_latest_ahrs_calibration ( clb_upi ) if ahrs_calib is None : log . warning ( "AHRS calibration missing for '{}'" . format ( dom_id ) ) continue du , floor , _ = self . detector . doms [ dom_id ] A = np . median ( self . A [ dom_id ] , axis = 0 ) H = np . median ( self . H [ dom_id ] , axis = 0 ) cyaw , cpitch , croll = fit_ahrs ( A , H , * ahrs_calib ) calibrations [ dom_id ] = ( now , du , floor , cyaw , cpitch , croll ) self . A = defaultdict ( list ) self . H = defaultdict ( list ) return calibrations | 9,922 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/ahrs.py#L75-L109 | [
"def",
"resume",
"(",
"env",
",",
"identifier",
")",
":",
"vsi",
"=",
"SoftLayer",
".",
"VSManager",
"(",
"env",
".",
"client",
")",
"vs_id",
"=",
"helpers",
".",
"resolve_id",
"(",
"vsi",
".",
"resolve_ids",
",",
"identifier",
",",
"'VS'",
")",
"env",
".",
"client",
"[",
"'Virtual_Guest'",
"]",
".",
"resume",
"(",
"id",
"=",
"vs_id",
")"
] |
Return an humanized file size . | def humanize_filesize ( value ) : value = float ( value ) if value == 1 : return '1 Byte' elif value < 1024 : return '%d Bytes' % value elif value < 1024 : return '%dB' % value for i , s in enumerate ( SUFFIXES ) : unit = 1024 ** ( i + 2 ) if value < unit : return '%.1f %s' % ( ( 1024 * value / unit ) , s ) return '%.1f %s' % ( ( 1024 * value / unit ) , s ) | 9,923 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/stats.py#L14-L31 | [
"def",
"_handle_interrupt",
"(",
"self",
",",
"fileno",
",",
"val",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"# Filter invalid edge values (sometimes 1 comes in when edge=falling)",
"edge",
"=",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"edge\"",
"]",
"if",
"(",
"edge",
"==",
"'rising'",
"and",
"val",
"==",
"0",
")",
"or",
"(",
"edge",
"==",
"'falling'",
"and",
"val",
"==",
"1",
")",
":",
"return",
"# If user activated debounce for this callback, check timing now",
"debounce",
"=",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"debounce_timeout_s\"",
"]",
"if",
"debounce",
":",
"t",
"=",
"time",
".",
"time",
"(",
")",
"t_last",
"=",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"interrupt_last\"",
"]",
"if",
"t",
"-",
"t_last",
"<",
"debounce",
":",
"debug",
"(",
"\"- don't start interrupt callback due to debouncing\"",
")",
"return",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"interrupt_last\"",
"]",
"=",
"t",
"# Start the callback(s) now",
"gpio_id",
"=",
"self",
".",
"_map_fileno_to_gpioid",
"[",
"fileno",
"]",
"if",
"gpio_id",
"in",
"self",
".",
"_map_gpioid_to_callbacks",
":",
"for",
"cb",
"in",
"self",
".",
"_map_gpioid_to_callbacks",
"[",
"gpio_id",
"]",
":",
"cb",
"(",
"gpio_id",
",",
"val",
")"
] |
Format a top . | def format_top ( counter , top = 3 ) : items = islice ( reversed ( sorted ( counter . iteritems ( ) , key = lambda x : x [ 1 ] ) ) , 0 , top ) return u'; ' . join ( u'{g} ({nb})' . format ( g = g , nb = nb ) for g , nb in items ) | 9,924 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/stats.py#L34-L38 | [
"def",
"start",
"(",
"self",
",",
"driver",
"=",
"None",
",",
"device",
"=",
"None",
",",
"midi_driver",
"=",
"None",
")",
":",
"if",
"driver",
"is",
"not",
"None",
":",
"assert",
"(",
"driver",
"in",
"[",
"'alsa'",
",",
"'oss'",
",",
"'jack'",
",",
"'portaudio'",
",",
"'sndmgr'",
",",
"'coreaudio'",
",",
"'Direct Sound'",
",",
"'pulseaudio'",
"]",
")",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"b'audio.driver'",
",",
"driver",
".",
"encode",
"(",
")",
")",
"if",
"device",
"is",
"not",
"None",
":",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"str",
"(",
"'audio.%s.device'",
"%",
"(",
"driver",
")",
")",
".",
"encode",
"(",
")",
",",
"device",
".",
"encode",
"(",
")",
")",
"self",
".",
"audio_driver",
"=",
"new_fluid_audio_driver",
"(",
"self",
".",
"settings",
",",
"self",
".",
"synth",
")",
"if",
"midi_driver",
"is",
"not",
"None",
":",
"assert",
"(",
"midi_driver",
"in",
"[",
"'alsa_seq'",
",",
"'alsa_raw'",
",",
"'oss'",
",",
"'winmidi'",
",",
"'midishare'",
",",
"'coremidi'",
"]",
")",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"b'midi.driver'",
",",
"midi_driver",
".",
"encode",
"(",
")",
")",
"self",
".",
"router",
"=",
"new_fluid_midi_router",
"(",
"self",
".",
"settings",
",",
"fluid_synth_handle_midi_event",
",",
"self",
".",
"synth",
")",
"fluid_synth_set_midi_router",
"(",
"self",
".",
"synth",
",",
"self",
".",
"router",
")",
"self",
".",
"midi_driver",
"=",
"new_fluid_midi_driver",
"(",
"self",
".",
"settings",
",",
"fluid_midi_router_handle_midi_event",
",",
"self",
".",
"router",
")"
] |
Decorator for Tool . _execute that checks the number of input streams | def check_input_stream_count ( expected_number_of_streams ) : def stream_count_decorator ( func ) : def func_wrapper ( * args , * * kwargs ) : self = args [ 0 ] sources = kwargs [ 'sources' ] if 'sources' in kwargs else args [ 1 ] if expected_number_of_streams == 0 : if sources : raise ValueError ( "No input streams expected" ) else : given_number_of_streams = len ( sources ) if sources else 0 if given_number_of_streams != expected_number_of_streams : raise ValueError ( "{} tool takes {} stream(s) as input ({} given)" . format ( self . __class__ . __name__ , expected_number_of_streams , given_number_of_streams ) ) return func ( * args , * * kwargs ) return func_wrapper return stream_count_decorator | 9,925 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/decorators.py#L69-L93 | [
"def",
"create_or_update",
"(",
"cls",
",",
"bucket",
",",
"key",
",",
"value",
")",
":",
"obj",
"=",
"cls",
".",
"get",
"(",
"bucket",
",",
"key",
")",
"if",
"obj",
":",
"obj",
".",
"value",
"=",
"value",
"db",
".",
"session",
".",
"merge",
"(",
"obj",
")",
"else",
":",
"obj",
"=",
"cls",
".",
"create",
"(",
"bucket",
",",
"key",
",",
"value",
")",
"return",
"obj"
] |
The main script | def main ( ) : from docopt import docopt args = docopt ( __doc__ , version = kp . version ) kp . logger . set_level ( "km3pipe" , args [ '-d' ] ) pipe = kp . Pipeline ( ) pipe . attach ( kp . io . ch . CHPump , host = args [ 'SOURCE_IP' ] , port = int ( args [ '-p' ] ) , tags = args [ '-m' ] , timeout = int ( args [ '-x' ] ) , max_queue = int ( args [ '-s' ] ) ) pipe . attach ( LigierSender , target_ip = args [ '-t' ] , port = int ( args [ '-q' ] ) ) pipe . drain ( ) | 9,926 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/ligiermirror.py#L44-L61 | [
"def",
"require_session",
"(",
"handler",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"handler",
")",
"async",
"def",
"decorated",
"(",
"request",
":",
"web",
".",
"Request",
")",
"->",
"web",
".",
"Response",
":",
"request_session_token",
"=",
"request",
".",
"match_info",
"[",
"'session'",
"]",
"session",
"=",
"session_from_request",
"(",
"request",
")",
"if",
"not",
"session",
"or",
"request_session_token",
"!=",
"session",
".",
"token",
":",
"LOG",
".",
"warning",
"(",
"f\"request for invalid session {request_session_token}\"",
")",
"return",
"web",
".",
"json_response",
"(",
"data",
"=",
"{",
"'error'",
":",
"'bad-token'",
",",
"'message'",
":",
"f'No such session {request_session_token}'",
"}",
",",
"status",
"=",
"404",
")",
"return",
"await",
"handler",
"(",
"request",
",",
"session",
")",
"return",
"decorated"
] |
parses a . ase file and returns a list of colors and color groups | def parse ( filename ) : with open ( filename , "rb" ) as data : header , v_major , v_minor , chunk_count = struct . unpack ( "!4sHHI" , data . read ( 12 ) ) assert header == b"ASEF" assert ( v_major , v_minor ) == ( 1 , 0 ) return [ c for c in parser . parse_chunk ( data ) ] | 9,927 | https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/__init__.py#L27-L106 | [
"def",
"_accountForNlinkEquals2",
"(",
"self",
",",
"localFilePath",
")",
":",
"fileStats",
"=",
"os",
".",
"stat",
"(",
"localFilePath",
")",
"assert",
"fileStats",
".",
"st_nlink",
">=",
"self",
".",
"nlinkThreshold",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"cacheInfo",
".",
"sigmaJob",
"-=",
"fileStats",
".",
"st_size",
"jobState",
"=",
"self",
".",
"_JobState",
"(",
"cacheInfo",
".",
"jobState",
"[",
"self",
".",
"jobID",
"]",
")",
"jobState",
".",
"updateJobReqs",
"(",
"fileStats",
".",
"st_size",
",",
"'remove'",
")"
] |
converts a swatch to bytes suitable for writing | def dumps ( obj ) : header = b'ASEF' v_major , v_minor = 1 , 0 chunk_count = writer . chunk_count ( obj ) head = struct . pack ( '!4sHHI' , header , v_major , v_minor , chunk_count ) body = b'' . join ( [ writer . chunk_for_object ( c ) for c in obj ] ) return head + body | 9,928 | https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/__init__.py#L108-L116 | [
"def",
"GetGroups",
"(",
"location",
"=",
"None",
",",
"alias",
"=",
"None",
")",
":",
"if",
"alias",
"is",
"None",
":",
"alias",
"=",
"clc",
".",
"v1",
".",
"Account",
".",
"GetAlias",
"(",
")",
"if",
"location",
"is",
"None",
":",
"location",
"=",
"clc",
".",
"v1",
".",
"Account",
".",
"GetLocation",
"(",
")",
"r",
"=",
"clc",
".",
"v1",
".",
"API",
".",
"Call",
"(",
"'post'",
",",
"'Group/GetGroups'",
",",
"{",
"'AccountAlias'",
":",
"alias",
",",
"'Location'",
":",
"location",
"}",
")",
"for",
"group",
"in",
"r",
"[",
"'HardwareGroups'",
"]",
":",
"clc",
".",
"_GROUP_MAPPING",
"[",
"group",
"[",
"'UUID'",
"]",
"]",
"=",
"group",
"[",
"'Name'",
"]",
"if",
"int",
"(",
"r",
"[",
"'StatusCode'",
"]",
")",
"==",
"0",
":",
"return",
"(",
"r",
"[",
"'HardwareGroups'",
"]",
")"
] |
Returns True if the underlying weighted majority graph contains a comparision between every pair of candidate and returns False otherwise . | def isFullPreferenceOrder ( self , candList ) : # If a candidate is missing from the wmgMap or if there is a pair of candidates for which # there is no value in the wmgMap, then the wmgMap cannot be a full preference order. for cand1 in candList : if cand1 not in self . wmgMap . keys ( ) : return False for cand2 in candList : if cand1 == cand2 : continue if cand2 not in self . wmgMap [ cand1 ] . keys ( ) : return False return True | 9,929 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L21-L39 | [
"def",
"ModuleHelp",
"(",
"self",
",",
"module",
")",
":",
"helplist",
"=",
"[",
"]",
"self",
".",
"__RenderOurModuleKeyFlags",
"(",
"module",
",",
"helplist",
")",
"return",
"'\\n'",
".",
"join",
"(",
"helplist",
")"
] |
Returns True if the underlying weighted majority graph contains a tie between any pair of candidates and returns False otherwise . | def containsTie ( self ) : # If a value of 0 is present in the wmgMap, we assume that it represents a tie. for cand in self . wmgMap . keys ( ) : if 0 in self . wmgMap [ cand ] . values ( ) : return True return False | 9,930 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L41-L51 | [
"def",
"Run",
"(",
"self",
")",
":",
"self",
".",
"_GetArgs",
"(",
")",
"goodlogging",
".",
"Log",
".",
"Info",
"(",
"\"CLEAR\"",
",",
"\"Using database: {0}\"",
".",
"format",
"(",
"self",
".",
"_databasePath",
")",
")",
"self",
".",
"_db",
"=",
"database",
".",
"RenamerDB",
"(",
"self",
".",
"_databasePath",
")",
"if",
"self",
".",
"_dbPrint",
"or",
"self",
".",
"_dbUpdate",
":",
"goodlogging",
".",
"Log",
".",
"Seperator",
"(",
")",
"self",
".",
"_db",
".",
"PrintAllTables",
"(",
")",
"if",
"self",
".",
"_dbUpdate",
":",
"goodlogging",
".",
"Log",
".",
"Seperator",
"(",
")",
"self",
".",
"_db",
".",
"ManualUpdateTables",
"(",
")",
"self",
".",
"_GetDatabaseConfig",
"(",
")",
"if",
"self",
".",
"_enableExtract",
":",
"goodlogging",
".",
"Log",
".",
"Seperator",
"(",
")",
"extractFileList",
"=",
"[",
"]",
"goodlogging",
".",
"Log",
".",
"Info",
"(",
"\"CLEAR\"",
",",
"\"Parsing source directory for compressed files\"",
")",
"goodlogging",
".",
"Log",
".",
"IncreaseIndent",
"(",
")",
"extract",
".",
"GetCompressedFilesInDir",
"(",
"self",
".",
"_sourceDir",
",",
"extractFileList",
",",
"self",
".",
"_ignoredDirsList",
")",
"goodlogging",
".",
"Log",
".",
"DecreaseIndent",
"(",
")",
"goodlogging",
".",
"Log",
".",
"Seperator",
"(",
")",
"extract",
".",
"Extract",
"(",
"extractFileList",
",",
"self",
".",
"_supportedFormatsList",
",",
"self",
".",
"_archiveDir",
",",
"self",
".",
"_skipUserInputExtract",
")",
"goodlogging",
".",
"Log",
".",
"Seperator",
"(",
")",
"tvFileList",
"=",
"[",
"]",
"goodlogging",
".",
"Log",
".",
"Info",
"(",
"\"CLEAR\"",
",",
"\"Parsing source directory for compatible files\"",
")",
"goodlogging",
".",
"Log",
".",
"IncreaseIndent",
"(",
")",
"self",
".",
"_GetSupportedFilesInDir",
"(",
"self",
".",
"_sourceDir",
",",
"tvFileList",
",",
"self",
".",
"_supportedFormatsList",
",",
"self",
".",
"_ignoredDirsList",
")",
"goodlogging",
".",
"Log",
".",
"DecreaseIndent",
"(",
")",
"tvRenamer",
"=",
"renamer",
".",
"TVRenamer",
"(",
"self",
".",
"_db",
",",
"tvFileList",
",",
"self",
".",
"_archiveDir",
",",
"guideName",
"=",
"'EPGUIDES'",
",",
"tvDir",
"=",
"self",
".",
"_tvDir",
",",
"inPlaceRename",
"=",
"self",
".",
"_inPlaceRename",
",",
"forceCopy",
"=",
"self",
".",
"_crossSystemCopyEnabled",
",",
"skipUserInput",
"=",
"self",
".",
"_skipUserInputRename",
")",
"tvRenamer",
".",
"Run",
"(",
")"
] |
Returns a dictionary that associates numbers of incoming edges in the weighted majority graph with the candidates that have that number of incoming edges . | def getIncEdgesMap ( self ) : # We calculate the number of incoming edges for each candidate and store it into a dictionary # that associates the number of incoming edges with the candidates with that number. incEdgesMap = dict ( ) for cand1 in self . wmgMap . keys ( ) : incEdgesSum = 0 for cand2 in self . wmgMap [ cand1 ] . keys ( ) : if self . wmgMap [ cand1 ] [ cand2 ] > 0 : incEdgesSum += self . wmgMap [ cand1 ] [ cand2 ] # Check if this is the first candidate associated with this number of associated edges. if incEdgesSum in incEdgesMap . keys ( ) : incEdgesMap [ incEdgesSum ] . append ( cand1 ) else : incEdgesMap [ incEdgesSum ] = [ cand1 ] return incEdgesMap | 9,931 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L53-L74 | [
"def",
"mode",
"(",
"self",
",",
"mode",
")",
":",
"allowed_values",
"=",
"[",
"\"test\"",
",",
"\"live\"",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `mode` ({0}), must be one of {1}\"",
".",
"format",
"(",
"mode",
",",
"allowed_values",
")",
")",
"self",
".",
"_mode",
"=",
"mode"
] |
Returns a dictionary that associates the integer representation of each candidate with its position in the ranking starting from 1 . | def getRankMap ( self ) : # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self . getIncEdgesMap ( ) sortedKeys = sorted ( incEdgesMap . keys ( ) , reverse = True ) rankMap = dict ( ) pos = 1 for key in sortedKeys : cands = incEdgesMap [ key ] for cand in cands : rankMap [ cand ] = pos pos += 1 return rankMap | 9,932 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L76-L93 | [
"def",
"time",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"start_time",
"=",
"time_module",
".",
"time",
"(",
")",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"end_time",
"=",
"time_module",
".",
"time",
"(",
")",
"return",
"end_time",
"-",
"start_time"
] |
Returns a dictionary that associates each position in the ranking with a list of integer representations of the candidates ranked at that position . | def getReverseRankMap ( self ) : # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self . getIncEdgesMap ( ) sortedKeys = sorted ( incEdgesMap . keys ( ) , reverse = True ) reverseRankMap = dict ( ) pos = 1 for key in sortedKeys : cands = incEdgesMap [ key ] reverseRankMap [ pos ] = cands pos += 1 return reverseRankMap | 9,933 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L95-L111 | [
"def",
"remove",
"(",
"self",
",",
"method",
":",
"Method",
")",
":",
"self",
".",
"_table",
"=",
"[",
"fld",
"for",
"fld",
"in",
"self",
".",
"_table",
"if",
"fld",
"is",
"not",
"method",
"]"
] |
Compute the histogram of a set of data . | def histogram ( a , bins ) : if any ( map ( lambda x : x < 0 , diff ( bins ) ) ) : raise ValueError ( 'bins must increase monotonically.' ) try : sa = sorted ( a ) except TypeError : # Perhaps just a single value? Treat as a list and carry on sa = sorted ( [ a ] ) # import numpy as np # nl = np.searchsorted(sa, bins[:-1], 'left') # nr = np.searchsorted(sa, bins[-1], 'right') # nn = np.r_[nl, nr] # # # cl = list(accumulate(Counter(map(lambda x: bisect_left(bins[:-1], x), sa))) # # print("cl") # # print([cl[i] for i in range(len(bins))]) # print("nl") # print(list(nl)) # # print(Counter(map(lambda x: bisect_right([bins[-1]], x), sa))) # print("nr") # print([nr]) # print("nn") # print(list(nn)) # print("hist") # print(list(np.diff(nn))) # print(list(np.histogram(a, bins)[0])) nl = list ( accumulate ( [ Counter ( map ( lambda x : bisect_left ( bins [ : - 1 ] , x ) , sa ) ) [ i ] for i in range ( len ( bins ) - 1 ) ] ) ) # print("nl") # print(nl) nr = Counter ( map ( lambda x : bisect_right ( [ bins [ 1 ] ] , x ) , sa ) ) [ 1 ] # print(nl) # print(nr) n = list ( nl ) + [ nr ] return diff ( n ) , bins | 9,934 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/statistics/histogram.py#L68-L115 | [
"def",
"asynchronize",
"(",
"framework",
",",
"sync_method",
",",
"doc",
"=",
"None",
",",
"wrap_class",
"=",
"None",
",",
"unwrap_class",
"=",
"None",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"sync_method",
")",
"def",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"unwrap_class",
"is",
"not",
"None",
":",
"# Don't call isinstance(), not checking subclasses.",
"unwrapped_args",
"=",
"[",
"obj",
".",
"delegate",
"if",
"obj",
".",
"__class__",
".",
"__name__",
".",
"endswith",
"(",
"(",
"unwrap_class",
",",
"'MotorClientSession'",
")",
")",
"else",
"obj",
"for",
"obj",
"in",
"args",
"]",
"unwrapped_kwargs",
"=",
"{",
"key",
":",
"(",
"obj",
".",
"delegate",
"if",
"obj",
".",
"__class__",
".",
"__name__",
".",
"endswith",
"(",
"(",
"unwrap_class",
",",
"'MotorClientSession'",
")",
")",
"else",
"obj",
")",
"for",
"key",
",",
"obj",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"else",
":",
"# For speed, don't call unwrap_args_session/unwrap_kwargs_session.",
"unwrapped_args",
"=",
"[",
"obj",
".",
"delegate",
"if",
"obj",
".",
"__class__",
".",
"__name__",
".",
"endswith",
"(",
"'MotorClientSession'",
")",
"else",
"obj",
"for",
"obj",
"in",
"args",
"]",
"unwrapped_kwargs",
"=",
"{",
"key",
":",
"(",
"obj",
".",
"delegate",
"if",
"obj",
".",
"__class__",
".",
"__name__",
".",
"endswith",
"(",
"'MotorClientSession'",
")",
"else",
"obj",
")",
"for",
"key",
",",
"obj",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"loop",
"=",
"self",
".",
"get_io_loop",
"(",
")",
"return",
"framework",
".",
"run_on_executor",
"(",
"loop",
",",
"sync_method",
",",
"self",
".",
"delegate",
",",
"*",
"unwrapped_args",
",",
"*",
"*",
"unwrapped_kwargs",
")",
"if",
"wrap_class",
"is",
"not",
"None",
":",
"method",
"=",
"framework",
".",
"pymongo_class_wrapper",
"(",
"method",
",",
"wrap_class",
")",
"method",
".",
"is_wrap_method",
"=",
"True",
"# For Synchro.",
"# This is for the benefit of motor_extensions.py, which needs this info to",
"# generate documentation with Sphinx.",
"method",
".",
"is_async_method",
"=",
"True",
"name",
"=",
"sync_method",
".",
"__name__",
"method",
".",
"pymongo_method_name",
"=",
"name",
"if",
"doc",
"is",
"not",
"None",
":",
"method",
".",
"__doc__",
"=",
"doc",
"return",
"method"
] |
Show a deprecation warning . | def deprecation ( self , message , * args , * * kws ) : self . _log ( DEPRECATION , message , args , * * kws ) | 9,935 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L33-L35 | [
"def",
"get_metric",
"(",
"self",
",",
"slug",
")",
":",
"results",
"=",
"OrderedDict",
"(",
")",
"granularities",
"=",
"self",
".",
"_granularities",
"(",
")",
"keys",
"=",
"self",
".",
"_build_keys",
"(",
"slug",
")",
"for",
"granularity",
",",
"key",
"in",
"zip",
"(",
"granularities",
",",
"keys",
")",
":",
"results",
"[",
"granularity",
"]",
"=",
"self",
".",
"r",
".",
"get",
"(",
"key",
")",
"return",
"results"
] |
Show a message only once determined by position in source or identifer . | def once ( self , message , * args , * * kws ) : # TODO: after py2 support drop, put this into # function signature: identifier=None (between *args and **kws) identifier = kws . pop ( 'identifier' , None ) if identifier is None : caller = getframeinfo ( stack ( ) [ 1 ] [ 0 ] ) identifier = "%s:%d" % ( caller . filename , caller . lineno ) if not hasattr ( self , 'once_dict' ) : self . once_dict = { } if identifier in self . once_dict : return self . once_dict [ identifier ] = True self . _log ( ONCE , message , args , * * kws ) | 9,936 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L38-L59 | [
"def",
"setDefaultApplicationForMimeType",
"(",
"self",
",",
"pchAppKey",
",",
"pchMimeType",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"setDefaultApplicationForMimeType",
"result",
"=",
"fn",
"(",
"pchAppKey",
",",
"pchMimeType",
")",
"return",
"result"
] |
Helper function to get a logger | def get_logger ( name ) : if name in loggers : return loggers [ name ] logger = logging . getLogger ( name ) logger . propagate = False pre1 , suf1 = hash_coloured_escapes ( name ) if supports_color ( ) else ( '' , '' ) pre2 , suf2 = hash_coloured_escapes ( name + 'salt' ) if supports_color ( ) else ( '' , '' ) formatter = logging . Formatter ( '%(levelname)s {}+{}+{} ' '%(name)s: %(message)s' . format ( pre1 , pre2 , suf1 ) ) ch = logging . StreamHandler ( ) ch . setFormatter ( formatter ) logger . addHandler ( ch ) loggers [ name ] = logger logger . once_dict = { } return logger | 9,937 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L119-L139 | [
"def",
"parse_torrent_properties",
"(",
"table_datas",
")",
":",
"output",
"=",
"{",
"'category'",
":",
"table_datas",
"[",
"0",
"]",
".",
"text",
",",
"'subcategory'",
":",
"None",
",",
"'quality'",
":",
"None",
",",
"'language'",
":",
"None",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"table_datas",
")",
")",
":",
"td",
"=",
"table_datas",
"[",
"i",
"]",
"url",
"=",
"td",
".",
"get",
"(",
"'href'",
")",
"params",
"=",
"Parser",
".",
"get_params",
"(",
"url",
")",
"if",
"Parser",
".",
"is_subcategory",
"(",
"params",
")",
"and",
"not",
"output",
"[",
"'subcategory'",
"]",
":",
"output",
"[",
"'subcategory'",
"]",
"=",
"td",
".",
"text",
"elif",
"Parser",
".",
"is_quality",
"(",
"params",
")",
"and",
"not",
"output",
"[",
"'quality'",
"]",
":",
"output",
"[",
"'quality'",
"]",
"=",
"td",
".",
"text",
"elif",
"Parser",
".",
"is_language",
"(",
"params",
")",
"and",
"not",
"output",
"[",
"'language'",
"]",
":",
"output",
"[",
"'language'",
"]",
"=",
"td",
".",
"text",
"return",
"output"
] |
Return a function which prints a message with a coloured name prefix | def get_printer ( name , color = None , ansi_code = None , force_color = False ) : if force_color or supports_color ( ) : if color is None and ansi_code is None : cpre_1 , csuf_1 = hash_coloured_escapes ( name ) cpre_2 , csuf_2 = hash_coloured_escapes ( name + 'salt' ) name = cpre_1 + '+' + cpre_2 + '+' + csuf_1 + ' ' + name else : name = colored ( name , color = color , ansi_code = ansi_code ) prefix = name + ': ' def printer ( text ) : print ( prefix + str ( text ) ) return printer | 9,938 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L152-L168 | [
"def",
"__get_axis_events",
"(",
"self",
",",
"state",
",",
"timeval",
"=",
"None",
")",
":",
"axis_changes",
"=",
"self",
".",
"__detect_axis_events",
"(",
"state",
")",
"events",
"=",
"self",
".",
"__emulate_axis",
"(",
"axis_changes",
",",
"timeval",
")",
"return",
"events"
] |
Return a ANSI coloured text based on its hash | def hash_coloured ( text ) : ansi_code = int ( sha256 ( text . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 230 return colored ( text , ansi_code = ansi_code ) | 9,939 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L171-L174 | [
"def",
"run_normalization",
"(",
"self",
")",
":",
"for",
"index",
",",
"media_file",
"in",
"enumerate",
"(",
"tqdm",
"(",
"self",
".",
"media_files",
",",
"desc",
"=",
"\"File\"",
",",
"disable",
"=",
"not",
"self",
".",
"progress",
",",
"position",
"=",
"0",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"Normalizing file {} ({} of {})\"",
".",
"format",
"(",
"media_file",
",",
"index",
"+",
"1",
",",
"self",
".",
"file_count",
")",
")",
"media_file",
".",
"run_normalization",
"(",
")",
"logger",
".",
"info",
"(",
"\"Normalized file written to {}\"",
".",
"format",
"(",
"media_file",
".",
"output_file",
")",
")"
] |
Return the ANSI hash colour prefix and suffix for a given text | def hash_coloured_escapes ( text ) : ansi_code = int ( sha256 ( text . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 230 prefix , suffix = colored ( 'SPLIT' , ansi_code = ansi_code ) . split ( 'SPLIT' ) return prefix , suffix | 9,940 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L177-L181 | [
"def",
"unregisterDataItem",
"(",
"self",
",",
"path",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/unregisterItem\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"itempath\"",
":",
"path",
",",
"\"force\"",
":",
"\"true\"",
"}",
"return",
"self",
".",
"_post",
"(",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] |
Return current TAI timestamp . | def tai_timestamp ( ) : timestamp = time . time ( ) date = datetime . utcfromtimestamp ( timestamp ) if date . year < 1972 : return timestamp offset = 10 + timestamp leap_seconds = [ ( 1972 , 1 , 1 ) , ( 1972 , 7 , 1 ) , ( 1973 , 1 , 1 ) , ( 1974 , 1 , 1 ) , ( 1975 , 1 , 1 ) , ( 1976 , 1 , 1 ) , ( 1977 , 1 , 1 ) , ( 1978 , 1 , 1 ) , ( 1979 , 1 , 1 ) , ( 1980 , 1 , 1 ) , ( 1981 , 7 , 1 ) , ( 1982 , 7 , 1 ) , ( 1983 , 7 , 1 ) , ( 1985 , 7 , 1 ) , ( 1988 , 1 , 1 ) , ( 1990 , 1 , 1 ) , ( 1991 , 1 , 1 ) , ( 1992 , 7 , 1 ) , ( 1993 , 7 , 1 ) , ( 1994 , 7 , 1 ) , ( 1996 , 1 , 1 ) , ( 1997 , 7 , 1 ) , ( 1999 , 1 , 1 ) , ( 2006 , 1 , 1 ) , ( 2009 , 1 , 1 ) , ( 2012 , 7 , 1 ) , ( 2015 , 7 , 1 ) , ( 2017 , 1 , 1 ) , ] for idx , leap_date in enumerate ( leap_seconds ) : if leap_date >= ( date . year , date . month , date . day ) : return idx - 1 + offset return len ( leap_seconds ) - 1 + offset | 9,941 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/time.py#L102-L142 | [
"def",
"_get_sg_name_dict",
"(",
"self",
",",
"data",
",",
"page_size",
",",
"no_nameconv",
")",
":",
"if",
"no_nameconv",
":",
"return",
"{",
"}",
"neutron_client",
"=",
"self",
".",
"get_client",
"(",
")",
"search_opts",
"=",
"{",
"'fields'",
":",
"[",
"'id'",
",",
"'name'",
"]",
"}",
"if",
"self",
".",
"pagination_support",
":",
"if",
"page_size",
":",
"search_opts",
".",
"update",
"(",
"{",
"'limit'",
":",
"page_size",
"}",
")",
"sec_group_ids",
"=",
"set",
"(",
")",
"for",
"rule",
"in",
"data",
":",
"for",
"key",
"in",
"self",
".",
"replace_rules",
":",
"if",
"rule",
".",
"get",
"(",
"key",
")",
":",
"sec_group_ids",
".",
"add",
"(",
"rule",
"[",
"key",
"]",
")",
"sec_group_ids",
"=",
"list",
"(",
"sec_group_ids",
")",
"def",
"_get_sec_group_list",
"(",
"sec_group_ids",
")",
":",
"search_opts",
"[",
"'id'",
"]",
"=",
"sec_group_ids",
"return",
"neutron_client",
".",
"list_security_groups",
"(",
"*",
"*",
"search_opts",
")",
".",
"get",
"(",
"'security_groups'",
",",
"[",
"]",
")",
"try",
":",
"secgroups",
"=",
"_get_sec_group_list",
"(",
"sec_group_ids",
")",
"except",
"exceptions",
".",
"RequestURITooLong",
"as",
"uri_len_exc",
":",
"# Length of a query filter on security group rule id",
"# id=<uuid>& (with len(uuid)=36)",
"sec_group_id_filter_len",
"=",
"40",
"# The URI is too long because of too many sec_group_id filters",
"# Use the excess attribute of the exception to know how many",
"# sec_group_id filters can be inserted into a single request",
"sec_group_count",
"=",
"len",
"(",
"sec_group_ids",
")",
"max_size",
"=",
"(",
"(",
"sec_group_id_filter_len",
"*",
"sec_group_count",
")",
"-",
"uri_len_exc",
".",
"excess",
")",
"chunk_size",
"=",
"max_size",
"//",
"sec_group_id_filter_len",
"secgroups",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"sec_group_count",
",",
"chunk_size",
")",
":",
"secgroups",
".",
"extend",
"(",
"_get_sec_group_list",
"(",
"sec_group_ids",
"[",
"i",
":",
"i",
"+",
"chunk_size",
"]",
")",
")",
"return",
"dict",
"(",
"[",
"(",
"sg",
"[",
"'id'",
"]",
",",
"sg",
"[",
"'name'",
"]",
")",
"for",
"sg",
"in",
"secgroups",
"if",
"sg",
"[",
"'name'",
"]",
"]",
")"
] |
Only execute callback when interval is reached . | def msg ( self , * args , * * kwargs ) : if self . timestamp is None or self . _interval_reached ( ) : self . callback ( * args , * * kwargs ) self . reset ( ) | 9,942 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/time.py#L83-L87 | [
"def",
"_convert_strls",
"(",
"self",
",",
"data",
")",
":",
"convert_cols",
"=",
"[",
"col",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"data",
")",
"if",
"self",
".",
"typlist",
"[",
"i",
"]",
"==",
"32768",
"or",
"col",
"in",
"self",
".",
"_convert_strl",
"]",
"if",
"convert_cols",
":",
"ssw",
"=",
"StataStrLWriter",
"(",
"data",
",",
"convert_cols",
")",
"tab",
",",
"new_data",
"=",
"ssw",
".",
"generate_table",
"(",
")",
"data",
"=",
"new_data",
"self",
".",
"_strl_blob",
"=",
"ssw",
".",
"generate_blob",
"(",
"tab",
")",
"return",
"data"
] |
Create a tuple of normalized metadata field values . | def _gather_field_values ( item , * , fields = None , field_map = FIELD_MAP , normalize_values = False , normalize_func = normalize_value ) : it = get_item_tags ( item ) if fields is None : fields = list ( it . keys ( ) ) normalize = normalize_func if normalize_values else lambda x : str ( x ) field_values = [ ] for field in fields : field_values . append ( normalize ( list_to_single_value ( get_field ( it , field , field_map = field_map ) ) ) ) return tuple ( field_values ) | 9,943 | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/compare.py#L12-L50 | [
"def",
"replace_heroku_db",
"(",
"self",
",",
"file_url",
")",
":",
"self",
".",
"print_message",
"(",
"\"Replacing database for Heroku app '%s'\"",
"%",
"self",
".",
"args",
".",
"destination_app",
")",
"self",
".",
"reset_heroku_database",
"(",
")",
"if",
"file_url",
":",
"self",
".",
"print_message",
"(",
"\"Restoring from URL '%s'\"",
"%",
"file_url",
")",
"args",
"=",
"[",
"\"heroku\"",
",",
"\"pg:backups:restore\"",
",",
"file_url",
",",
"\"--app=%s\"",
"%",
"self",
".",
"args",
".",
"destination_app",
",",
"\"DATABASE\"",
",",
"\"--confirm\"",
",",
"self",
".",
"args",
".",
"destination_app",
",",
"]",
"if",
"self",
".",
"args",
".",
"use_pgbackups",
":",
"args",
"=",
"[",
"\"heroku\"",
",",
"\"pgbackups:restore\"",
",",
"\"--app=%s\"",
"%",
"self",
".",
"args",
".",
"destination_app",
",",
"\"DATABASE_URL\"",
",",
"\"--confirm\"",
",",
"self",
".",
"args",
".",
"destination_app",
",",
"file_url",
",",
"]",
"subprocess",
".",
"check_call",
"(",
"args",
")",
"else",
":",
"# TODO perhaps add support for file -> heroku by piping to pg:psql",
"self",
".",
"print_message",
"(",
"\"Pushing data from database '%s'\"",
"%",
"self",
".",
"databases",
"[",
"'source'",
"]",
"[",
"'name'",
"]",
")",
"self",
".",
"print_message",
"(",
"\"NOTE: Any postgres authentication settings you passed to paragres \"",
"\"will be ignored.\\nIf desired, you can export PG* variables.\\n\"",
"\"You will be prompted for your psql password.\"",
")",
"args",
"=",
"[",
"\"heroku\"",
",",
"\"pg:push\"",
",",
"self",
".",
"databases",
"[",
"'source'",
"]",
"[",
"'name'",
"]",
",",
"\"DATABASE_URL\"",
",",
"\"--app=%s\"",
"%",
"self",
".",
"args",
".",
"destination_app",
",",
"]",
"subprocess",
".",
"check_call",
"(",
"args",
")"
] |
Find items from an item collection that are in another item collection . | def find_existing_items ( src , dst , * , fields = None , field_map = None , normalize_values = False , normalize_func = normalize_value ) : if field_map is None : field_map = FIELD_MAP dst_keys = { _gather_field_values ( dst_item , fields = fields , field_map = field_map , normalize_values = normalize_values , normalize_func = normalize_func ) for dst_item in dst } for src_item in src : if _gather_field_values ( src_item , fields = fields , field_map = field_map , normalize_values = normalize_values , normalize_func = normalize_func ) in dst_keys : yield src_item | 9,944 | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/compare.py#L53-L89 | [
"async",
"def",
"set_default_min_hwe_kernel",
"(",
"cls",
",",
"version",
":",
"typing",
".",
"Optional",
"[",
"str",
"]",
")",
":",
"await",
"cls",
".",
"set_config",
"(",
"\"default_min_hwe_kernel\"",
",",
"\"\"",
"if",
"version",
"is",
"None",
"else",
"version",
")"
] |
Define a monitoring logger that will be added to Logger | def monitor ( self , message , * args , * * kws ) : if self . isEnabledFor ( MON ) : # Yes, logger takes its '*args' as 'args'. self . _log ( MON , message , args , * * kws ) | 9,945 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/hyperstream_logger.py#L160-L172 | [
"def",
"_compute_precision",
"(",
"references",
",",
"translation",
",",
"n",
")",
":",
"matches",
"=",
"0",
"candidates",
"=",
"0",
"ref_ngram_counts",
"=",
"Counter",
"(",
")",
"for",
"reference",
"in",
"references",
":",
"ref_ngram_counts",
"|=",
"_ngrams",
"(",
"reference",
",",
"n",
")",
"trans_ngram_counts",
"=",
"_ngrams",
"(",
"translation",
",",
"n",
")",
"overlap_ngram_counts",
"=",
"trans_ngram_counts",
"&",
"ref_ngram_counts",
"matches",
"+=",
"sum",
"(",
"overlap_ngram_counts",
".",
"values",
"(",
")",
")",
"possible_matches",
"=",
"len",
"(",
"translation",
")",
"-",
"n",
"+",
"1",
"if",
"possible_matches",
">",
"0",
":",
"candidates",
"+=",
"possible_matches",
"return",
"matches",
",",
"candidates"
] |
Log a message with severity MON on the root logger . | def monitor ( msg , * args , * * kwargs ) : if len ( logging . root . handlers ) == 0 : logging . basicConfig ( ) logging . root . monitor ( msg , * args , * * kwargs ) | 9,946 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/hyperstream_logger.py#L178-L184 | [
"def",
"set_etag",
"(",
"self",
",",
"etag",
",",
"weak",
"=",
"False",
")",
":",
"self",
".",
"headers",
"[",
"\"ETag\"",
"]",
"=",
"quote_etag",
"(",
"etag",
",",
"weak",
")"
] |
The formatting function | def format ( self , record ) : try : n = record . n except AttributeError : n = 'default' try : message = record . message except AttributeError : message = record . msg senml = OrderedDict ( uid = "hyperstream" , bt = datetime . utcfromtimestamp ( record . created ) . isoformat ( ) [ : - 3 ] + 'Z' , e = [ OrderedDict ( n = n , v = message ) ] ) formatted_json = json . dumps ( senml ) return formatted_json | 9,947 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/hyperstream_logger.py#L197-L222 | [
"def",
"rate_limit",
"(",
"f",
")",
":",
"def",
"new_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"errors",
"=",
"0",
"while",
"True",
":",
"resp",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"errors",
"=",
"0",
"return",
"resp",
"elif",
"resp",
".",
"status_code",
"==",
"401",
":",
"# Hack to retain the original exception, but augment it with",
"# additional context for the user to interpret it. In a Python",
"# 3 only future we can raise a new exception of the same type",
"# with a new message from the old error.",
"try",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"message",
"=",
"\"\\nThis is a protected or locked account, or\"",
"+",
"\" the credentials provided are no longer valid.\"",
"e",
".",
"args",
"=",
"(",
"e",
".",
"args",
"[",
"0",
"]",
"+",
"message",
",",
")",
"+",
"e",
".",
"args",
"[",
"1",
":",
"]",
"log",
".",
"warning",
"(",
"\"401 Authentication required for %s\"",
",",
"resp",
".",
"url",
")",
"raise",
"elif",
"resp",
".",
"status_code",
"==",
"429",
":",
"reset",
"=",
"int",
"(",
"resp",
".",
"headers",
"[",
"'x-rate-limit-reset'",
"]",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"seconds",
"=",
"reset",
"-",
"now",
"+",
"10",
"if",
"seconds",
"<",
"1",
":",
"seconds",
"=",
"10",
"log",
".",
"warning",
"(",
"\"rate limit exceeded: sleeping %s secs\"",
",",
"seconds",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"elif",
"resp",
".",
"status_code",
">=",
"500",
":",
"errors",
"+=",
"1",
"if",
"errors",
">",
"30",
":",
"log",
".",
"warning",
"(",
"\"too many errors from Twitter, giving up\"",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"seconds",
"=",
"60",
"*",
"errors",
"log",
".",
"warning",
"(",
"\"%s from Twitter API, sleeping %s\"",
",",
"resp",
".",
"status_code",
",",
"seconds",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"else",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"new_f"
] |
Tears down the instance removing any remaining sessions that this instance has created . | def teardown ( self ) : with self . _teardown_lock : if not self . _teardown_called : self . _teardown_called = True if len ( self . _acquiring_session_ids ) > 0 : logger . info ( f"Destroying all sessions that have not acquired keys: {self._acquiring_session_ids}..." ) for session_id in self . _acquiring_session_ids : try : self . consul_client . session . destroy ( session_id = session_id ) logger . debug ( f"Destroyed: {session_id}" ) except requests . exceptions . ConnectionError as e : logger . debug ( f"Exception: {e}" ) logger . warning ( f"Could not connect to Consul to clean up session {session_id}" ) atexit . unregister ( self . teardown ) | 9,948 | https://github.com/wtsi-hgi/consul-lock/blob/deb07ab41dabbb49f4d0bbc062bc3b4b6e5d71b2/consullock/managers.py#L356-L375 | [
"def",
"generate_http_manifest",
"(",
"self",
")",
":",
"base_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"translate_path",
"(",
"self",
".",
"path",
")",
")",
"self",
".",
"dataset",
"=",
"dtoolcore",
".",
"DataSet",
".",
"from_uri",
"(",
"base_path",
")",
"admin_metadata_fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"\".dtool\"",
",",
"\"dtool\"",
")",
"with",
"open",
"(",
"admin_metadata_fpath",
")",
"as",
"fh",
":",
"admin_metadata",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"http_manifest",
"=",
"{",
"\"admin_metadata\"",
":",
"admin_metadata",
",",
"\"manifest_url\"",
":",
"self",
".",
"generate_url",
"(",
"\".dtool/manifest.json\"",
")",
",",
"\"readme_url\"",
":",
"self",
".",
"generate_url",
"(",
"\"README.yml\"",
")",
",",
"\"overlays\"",
":",
"self",
".",
"generate_overlay_urls",
"(",
")",
",",
"\"item_urls\"",
":",
"self",
".",
"generate_item_urls",
"(",
")",
"}",
"return",
"bytes",
"(",
"json",
".",
"dumps",
"(",
"http_manifest",
")",
",",
"\"utf-8\"",
")"
] |
Check if we are on a Lyon machine | def we_are_in_lyon ( ) : import socket try : hostname = socket . gethostname ( ) ip = socket . gethostbyname ( hostname ) except socket . gaierror : return False return ip . startswith ( "134.158." ) | 9,949 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L62-L70 | [
"def",
"load",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"reader",
"=",
"T7Reader",
"(",
"f",
",",
"*",
"*",
"kwargs",
")",
"return",
"reader",
".",
"read_obj",
"(",
")"
] |
Create a DataFrame from CSV text | def read_csv ( text , sep = "\t" ) : import pandas as pd # no top level load to make a faster import of db return pd . read_csv ( StringIO ( text ) , sep = "\t" ) | 9,950 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L73-L76 | [
"def",
"build",
"(",
"port",
"=",
"8000",
",",
"fixtures",
"=",
"None",
")",
":",
"extractor",
"=",
"Extractor",
"(",
")",
"parser",
"=",
"Parser",
"(",
"extractor",
".",
"url_details",
",",
"fixtures",
")",
"parser",
".",
"parse",
"(",
")",
"url_details",
"=",
"parser",
".",
"results",
"_store",
"=",
"get_store",
"(",
"url_details",
")",
"store",
"=",
"json",
".",
"dumps",
"(",
"_store",
")",
"variables",
"=",
"str",
"(",
"Variable",
"(",
"'let'",
",",
"'store'",
",",
"store",
")",
")",
"functions",
"=",
"DATA_FINDER",
"+",
"GET_HANDLER",
"+",
"MODIFY_HANDLER",
"+",
"POST_HANDLER",
"endpoints",
"=",
"[",
"]",
"endpoint_uris",
"=",
"[",
"]",
"for",
"u",
"in",
"parser",
".",
"results",
":",
"endpoint",
"=",
"Endpoint",
"(",
")",
"if",
"u",
"[",
"'method'",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"'get'",
",",
"'post'",
"]",
":",
"method",
"=",
"u",
"[",
"'method'",
"]",
".",
"lower",
"(",
")",
"else",
":",
"method",
"=",
"'modify'",
"response",
"=",
"str",
"(",
"ResponseBody",
"(",
"method",
")",
")",
"# Check in store if the base url has individual instances",
"u",
"[",
"'url'",
"]",
",",
"list_url",
"=",
"clean_url",
"(",
"u",
"[",
"'full_url'",
"]",
",",
"_store",
",",
"u",
"[",
"'method'",
"]",
".",
"lower",
"(",
")",
")",
"if",
"list_url",
"is",
"not",
"None",
"and",
"u",
"[",
"'method'",
"]",
".",
"lower",
"(",
")",
"==",
"'get'",
":",
"list_endpoint",
"=",
"Endpoint",
"(",
")",
"list_endpoint",
".",
"construct",
"(",
"'get'",
",",
"list_url",
",",
"response",
")",
"if",
"str",
"(",
"list_endpoint",
")",
"not",
"in",
"endpoints",
":",
"endpoints",
".",
"append",
"(",
"str",
"(",
"list_endpoint",
")",
")",
"if",
"list_endpoint",
".",
"uri",
"not",
"in",
"endpoint_uris",
":",
"endpoint_uris",
".",
"append",
"(",
"list_endpoint",
".",
"uri",
")",
"if",
"method",
"==",
"'modify'",
":",
"without_prefix",
"=",
"re",
".",
"sub",
"(",
"r'\\/(\\w+)\\_\\_'",
",",
"''",
",",
"u",
"[",
"'url'",
"]",
")",
"for",
"k",
",",
"v",
"in",
"_store",
".",
"items",
"(",
")",
":",
"if",
"without_prefix",
"in",
"k",
":",
"options",
"=",
"v",
".",
"get",
"(",
"'options'",
",",
"'{}'",
")",
"options",
"=",
"ast",
".",
"literal_eval",
"(",
"options",
")",
"modifiers",
"=",
"[",
"]",
"if",
"options",
"is",
"not",
"None",
":",
"modifiers",
"=",
"options",
".",
"get",
"(",
"'modifiers'",
",",
"[",
"]",
")",
"if",
"modifiers",
":",
"for",
"mod",
"in",
"modifiers",
":",
"if",
"u",
"[",
"'method'",
"]",
".",
"lower",
"(",
")",
"==",
"mod",
":",
"mod_endpoint",
"=",
"Endpoint",
"(",
")",
"uri",
"=",
"without_prefix",
"if",
"v",
".",
"get",
"(",
"'position'",
")",
"is",
"not",
"None",
"and",
"v",
"[",
"'position'",
"]",
"==",
"'url'",
":",
"uri",
"=",
"re",
".",
"sub",
"(",
"r'\\/?\\_\\_key'",
",",
"'/:id'",
",",
"u",
"[",
"'full_url'",
"]",
")",
"mod_endpoint",
".",
"construct",
"(",
"u",
"[",
"'method'",
"]",
".",
"lower",
"(",
")",
",",
"uri",
",",
"response",
")",
"if",
"str",
"(",
"mod_endpoint",
")",
"not",
"in",
"endpoints",
":",
"endpoints",
".",
"append",
"(",
"str",
"(",
"mod_endpoint",
")",
")",
"if",
"mod_endpoint",
".",
"uri",
"not",
"in",
"endpoint_uris",
":",
"endpoint_uris",
".",
"append",
"(",
"mod_endpoint",
".",
"uri",
")",
"else",
":",
"endpoint",
".",
"construct",
"(",
"u",
"[",
"'method'",
"]",
",",
"u",
"[",
"'url'",
"]",
",",
"response",
")",
"if",
"str",
"(",
"endpoint",
")",
"not",
"in",
"endpoints",
":",
"endpoints",
".",
"append",
"(",
"str",
"(",
"endpoint",
")",
")",
"if",
"endpoint",
".",
"uri",
"not",
"in",
"endpoint_uris",
":",
"endpoint_uris",
".",
"append",
"(",
"endpoint",
".",
"uri",
")",
"endpoints",
"=",
"''",
".",
"join",
"(",
"endpoints",
")",
"express",
"=",
"ExpressServer",
"(",
")",
"express",
".",
"construct",
"(",
"variables",
",",
"functions",
",",
"endpoints",
",",
"port",
")",
"return",
"express"
] |
Add an additional DATETIME column with standar datetime format . | def add_datetime ( dataframe , timestamp_key = 'UNIXTIME' ) : def convert_data ( timestamp ) : return datetime . fromtimestamp ( float ( timestamp ) / 1e3 , UTC_TZ ) try : log . debug ( "Adding DATETIME column to the data" ) converted = dataframe [ timestamp_key ] . apply ( convert_data ) dataframe [ 'DATETIME' ] = converted except KeyError : log . warning ( "Could not add DATETIME column" ) | 9,951 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L488-L502 | [
"def",
"clean",
"(",
"self",
")",
":",
"if",
"self",
".",
"_initialized",
":",
"logger",
".",
"info",
"(",
"\"brace yourselves, removing %r\"",
",",
"self",
".",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"path",
")"
] |
Show AHRS calibration data for given clb_upi . | def show_ahrs_calibration ( clb_upi , version = '3' ) : db = DBManager ( ) ahrs_upi = clbupi2ahrsupi ( clb_upi ) print ( "AHRS UPI: {}" . format ( ahrs_upi ) ) content = db . _get_content ( "show_product_test.htm?upi={0}&" "testtype=AHRS-CALIBRATION-v{1}&n=1&out=xml" . format ( ahrs_upi , version ) ) . replace ( '\n' , '' ) import xml . etree . ElementTree as ET try : root = ET . parse ( io . StringIO ( content ) ) . getroot ( ) except ET . ParseError : print ( "No calibration data found" ) else : for child in root : print ( "{}: {}" . format ( child . tag , child . text ) ) names = [ c . text for c in root . findall ( ".//Name" ) ] values = [ [ i . text for i in c ] for c in root . findall ( ".//Values" ) ] for name , value in zip ( names , values ) : print ( "{}: {}" . format ( name , value ) ) | 9,952 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L826-L848 | [
"def",
"update_bookmark",
"(",
"self",
",",
"old",
",",
"new",
",",
"*",
",",
"max_retries",
"=",
"3",
")",
":",
"def",
"replace_bookmark",
"(",
"bookmarks",
",",
"old",
",",
"new",
")",
":",
"modified_bookmarks",
"=",
"list",
"(",
"bookmarks",
")",
"try",
":",
"i",
"=",
"bookmarks",
".",
"index",
"(",
"old",
")",
"modified_bookmarks",
"[",
"i",
"]",
"=",
"new",
"except",
"ValueError",
":",
"modified_bookmarks",
".",
"append",
"(",
"new",
")",
"return",
"modified_bookmarks",
"with",
"(",
"yield",
"from",
"self",
".",
"_lock",
")",
":",
"bookmarks",
"=",
"yield",
"from",
"self",
".",
"_get_bookmarks",
"(",
")",
"try",
":",
"yield",
"from",
"self",
".",
"_set_bookmarks",
"(",
"replace_bookmark",
"(",
"bookmarks",
",",
"old",
",",
"new",
")",
")",
"retries",
"=",
"0",
"bookmarks",
"=",
"yield",
"from",
"self",
".",
"_get_bookmarks",
"(",
")",
"while",
"retries",
"<",
"max_retries",
":",
"if",
"new",
"in",
"bookmarks",
":",
"break",
"yield",
"from",
"self",
".",
"_set_bookmarks",
"(",
"replace_bookmark",
"(",
"bookmarks",
",",
"old",
",",
"new",
")",
")",
"bookmarks",
"=",
"yield",
"from",
"self",
".",
"_get_bookmarks",
"(",
")",
"retries",
"+=",
"1",
"if",
"new",
"not",
"in",
"bookmarks",
":",
"raise",
"RuntimeError",
"(",
"\"Cold not update bookmark\"",
")",
"finally",
":",
"self",
".",
"_diff_emit_update",
"(",
"bookmarks",
")"
] |
Extract data from database | def _datalog ( self , parameter , run , maxrun , det_id ) : values = { 'parameter_name' : parameter , 'minrun' : run , 'maxrun' : maxrun , 'detid' : det_id , } data = urlencode ( values ) content = self . _get_content ( 'streamds/datalognumbers.txt?' + data ) if content . startswith ( 'ERROR' ) : log . error ( content ) return None try : dataframe = read_csv ( content ) except ValueError : log . warning ( "Empty dataset" ) # ...probably. Waiting for more info return make_empty_dataset ( ) else : add_datetime ( dataframe ) try : self . _add_converted_units ( dataframe , parameter ) except KeyError : log . warning ( "Could not add converted units for {0}" . format ( parameter ) ) return dataframe | 9,953 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L134-L162 | [
"def",
"method_file_check",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"curr_dir",
"=",
"ROOT",
".",
"gDirectory",
"if",
"isinstance",
"(",
"curr_dir",
",",
"ROOT",
".",
"TROOT",
")",
"or",
"not",
"curr_dir",
":",
"raise",
"RuntimeError",
"(",
"\"You must first create a File before calling {0}.{1}\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"_get_qualified_name",
"(",
"f",
")",
")",
")",
"if",
"not",
"curr_dir",
".",
"IsWritable",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Calling {0}.{1} requires that the \"",
"\"current File is writable\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"_get_qualified_name",
"(",
"f",
")",
")",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
Add an additional DATA_VALUE column with converted VALUEs | def _add_converted_units ( self , dataframe , parameter , key = 'VALUE' ) : convert_unit = self . parameters . get_converter ( parameter ) try : log . debug ( "Adding unit converted DATA_VALUE to the data" ) dataframe [ key ] = dataframe [ 'DATA_VALUE' ] . apply ( convert_unit ) except KeyError : log . warning ( "Missing 'VALUE': no unit conversion." ) else : dataframe . unit = self . parameters . unit ( parameter ) | 9,954 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L178-L187 | [
"def",
"write_lockfile",
"(",
"self",
",",
"content",
")",
":",
"s",
"=",
"self",
".",
"_lockfile_encoder",
".",
"encode",
"(",
"content",
")",
"open_kwargs",
"=",
"{",
"\"newline\"",
":",
"self",
".",
"_lockfile_newlines",
",",
"\"encoding\"",
":",
"\"utf-8\"",
"}",
"with",
"vistir",
".",
"contextmanagers",
".",
"atomic_open_for_write",
"(",
"self",
".",
"lockfile_location",
",",
"*",
"*",
"open_kwargs",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"s",
")",
"# Write newline at end of document. GH-319.",
"# Only need '\\n' here; the file object handles the rest.",
"if",
"not",
"s",
".",
"endswith",
"(",
"u\"\\n\"",
")",
":",
"f",
".",
"write",
"(",
"u\"\\n\"",
")"
] |
Convert det ID or OID to det ID | def to_det_id ( self , det_id_or_det_oid ) : try : int ( det_id_or_det_oid ) except ValueError : return self . get_det_id ( det_id_or_det_oid ) else : return det_id_or_det_oid | 9,955 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L223-L230 | [
"def",
"_get_session",
"(",
"server",
")",
":",
"if",
"server",
"in",
"_sessions",
":",
"return",
"_sessions",
"[",
"server",
"]",
"config",
"=",
"_get_spacewalk_configuration",
"(",
"server",
")",
"if",
"not",
"config",
":",
"raise",
"Exception",
"(",
"'No config for \\'{0}\\' found on master'",
".",
"format",
"(",
"server",
")",
")",
"session",
"=",
"_get_client_and_key",
"(",
"config",
"[",
"'api_url'",
"]",
",",
"config",
"[",
"'username'",
"]",
",",
"config",
"[",
"'password'",
"]",
")",
"atexit",
".",
"register",
"(",
"_disconnect_session",
",",
"session",
")",
"client",
"=",
"session",
"[",
"'client'",
"]",
"key",
"=",
"session",
"[",
"'key'",
"]",
"_sessions",
"[",
"server",
"]",
"=",
"(",
"client",
",",
"key",
")",
"return",
"client",
",",
"key"
] |
Convert det OID or ID to det OID | def to_det_oid ( self , det_id_or_det_oid ) : try : int ( det_id_or_det_oid ) except ValueError : return det_id_or_det_oid else : return self . get_det_oid ( det_id_or_det_oid ) | 9,956 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L232-L239 | [
"def",
"_get_session",
"(",
"server",
")",
":",
"if",
"server",
"in",
"_sessions",
":",
"return",
"_sessions",
"[",
"server",
"]",
"config",
"=",
"_get_spacewalk_configuration",
"(",
"server",
")",
"if",
"not",
"config",
":",
"raise",
"Exception",
"(",
"'No config for \\'{0}\\' found on master'",
".",
"format",
"(",
"server",
")",
")",
"session",
"=",
"_get_client_and_key",
"(",
"config",
"[",
"'api_url'",
"]",
",",
"config",
"[",
"'username'",
"]",
",",
"config",
"[",
"'password'",
"]",
")",
"atexit",
".",
"register",
"(",
"_disconnect_session",
",",
"session",
")",
"client",
"=",
"session",
"[",
"'client'",
"]",
"key",
"=",
"session",
"[",
"'key'",
"]",
"_sessions",
"[",
"server",
"]",
"=",
"(",
"client",
",",
"key",
")",
"return",
"client",
",",
"key"
] |
Retrieve a list of available parameters from the database | def _load_parameters ( self ) : parameters = self . _get_json ( 'allparam/s' ) data = { } for parameter in parameters : # There is a case-chaos in the DB data [ parameter [ 'Name' ] . lower ( ) ] = parameter self . _parameters = ParametersContainer ( data ) | 9,957 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L248-L254 | [
"def",
"from_rep",
"(",
"u",
")",
":",
"if",
"isinstance",
"(",
"u",
",",
"pyversion",
".",
"string_types",
")",
":",
"return",
"uuid",
".",
"UUID",
"(",
"u",
")",
"# hack to remove signs",
"a",
"=",
"ctypes",
".",
"c_ulong",
"(",
"u",
"[",
"0",
"]",
")",
"b",
"=",
"ctypes",
".",
"c_ulong",
"(",
"u",
"[",
"1",
"]",
")",
"combined",
"=",
"a",
".",
"value",
"<<",
"64",
"|",
"b",
".",
"value",
"return",
"uuid",
".",
"UUID",
"(",
"int",
"=",
"combined",
")"
] |
Retrieve the trigger setup for a given runsetup OID | def trigger_setup ( self , runsetup_oid ) : r = self . _get_content ( "jsonds/rslite/s?rs_oid={}&upifilter=1.1.2.2.3/*" . format ( runsetup_oid ) ) data = json . loads ( r ) [ 'Data' ] if not data : log . error ( "Empty dataset." ) return raw_setup = data [ 0 ] det_id = raw_setup [ 'DetID' ] name = raw_setup [ 'Name' ] description = raw_setup [ 'Desc' ] _optical_df = raw_setup [ 'ConfGroups' ] [ 0 ] optical_df = { 'Name' : _optical_df [ 'Name' ] , 'Desc' : _optical_df [ 'Desc' ] } for param in _optical_df [ 'Params' ] : pname = self . parameters . oid2name ( param [ 'OID' ] ) . replace ( 'DAQ_' , '' ) try : dtype = float if '.' in param [ 'Val' ] else int val = dtype ( param [ 'Val' ] ) except ValueError : val = param [ 'Val' ] optical_df [ pname ] = val _acoustic_df = raw_setup [ 'ConfGroups' ] [ 1 ] acoustic_df = { 'Name' : _acoustic_df [ 'Name' ] , 'Desc' : _acoustic_df [ 'Desc' ] } for param in _acoustic_df [ 'Params' ] : pname = self . parameters . oid2name ( param [ 'OID' ] ) . replace ( 'DAQ_' , '' ) try : dtype = float if '.' in param [ 'Val' ] else int val = dtype ( param [ 'Val' ] ) except ValueError : val = param [ 'Val' ] acoustic_df [ pname ] = val return TriggerSetup ( runsetup_oid , name , det_id , description , optical_df , acoustic_df ) | 9,958 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L256-L298 | [
"def",
"read_data",
"(",
"self",
",",
"blocksize",
"=",
"4096",
")",
":",
"frames",
"=",
"ctypes",
".",
"c_uint",
"(",
"blocksize",
"//",
"self",
".",
"_client_fmt",
".",
"mBytesPerFrame",
")",
"buf",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"blocksize",
")",
"buflist",
"=",
"AudioBufferList",
"(",
")",
"buflist",
".",
"mNumberBuffers",
"=",
"1",
"buflist",
".",
"mBuffers",
"[",
"0",
"]",
".",
"mNumberChannels",
"=",
"self",
".",
"_client_fmt",
".",
"mChannelsPerFrame",
"buflist",
".",
"mBuffers",
"[",
"0",
"]",
".",
"mDataByteSize",
"=",
"blocksize",
"buflist",
".",
"mBuffers",
"[",
"0",
"]",
".",
"mData",
"=",
"ctypes",
".",
"cast",
"(",
"buf",
",",
"ctypes",
".",
"c_void_p",
")",
"while",
"True",
":",
"check",
"(",
"_coreaudio",
".",
"ExtAudioFileRead",
"(",
"self",
".",
"_obj",
",",
"ctypes",
".",
"byref",
"(",
"frames",
")",
",",
"ctypes",
".",
"byref",
"(",
"buflist",
")",
")",
")",
"assert",
"buflist",
".",
"mNumberBuffers",
"==",
"1",
"size",
"=",
"buflist",
".",
"mBuffers",
"[",
"0",
"]",
".",
"mDataByteSize",
"if",
"not",
"size",
":",
"break",
"data",
"=",
"ctypes",
".",
"cast",
"(",
"buflist",
".",
"mBuffers",
"[",
"0",
"]",
".",
"mData",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
")",
"blob",
"=",
"data",
"[",
":",
"size",
"]",
"yield",
"blob"
] |
Retrieve the detector file for given detector id | def detx ( self , det_id , t0set = None , calibration = None ) : url = 'detx/{0}?' . format ( det_id ) # '?' since it's ignored if no args if t0set is not None : url += '&t0set=' + t0set if calibration is not None : url += '&calibrid=' + calibration detx = self . _get_content ( url ) return detx | 9,959 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L311-L323 | [
"def",
"connection",
"(",
"self",
",",
"shareable",
"=",
"False",
")",
":",
"try",
":",
"con",
"=",
"self",
".",
"thread",
".",
"connection",
"except",
"AttributeError",
":",
"con",
"=",
"self",
".",
"steady_connection",
"(",
")",
"if",
"not",
"con",
".",
"threadsafety",
"(",
")",
":",
"raise",
"NotSupportedError",
"(",
"\"Database module is not thread-safe.\"",
")",
"self",
".",
"thread",
".",
"connection",
"=",
"con",
"con",
".",
"_ping_check",
"(",
")",
"return",
"con"
] |
Get JSON - type content | def _get_json ( self , url ) : content = self . _get_content ( 'jsonds/' + url ) try : json_content = json . loads ( content . decode ( ) ) except AttributeError : json_content = json . loads ( content ) if json_content [ 'Comment' ] : log . warning ( json_content [ 'Comment' ] ) if json_content [ 'Result' ] != 'OK' : raise ValueError ( 'Error while retrieving the parameter list.' ) return json_content [ 'Data' ] | 9,960 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L356-L367 | [
"def",
"catalogFactory",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"fn",
"=",
"lambda",
"member",
":",
"inspect",
".",
"isclass",
"(",
"member",
")",
"and",
"member",
".",
"__module__",
"==",
"__name__",
"catalogs",
"=",
"odict",
"(",
"inspect",
".",
"getmembers",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
",",
"fn",
")",
")",
"if",
"name",
"not",
"in",
"list",
"(",
"catalogs",
".",
"keys",
"(",
")",
")",
":",
"msg",
"=",
"\"%s not found in catalogs:\\n %s\"",
"%",
"(",
"name",
",",
"list",
"(",
"kernels",
".",
"keys",
"(",
")",
")",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"msg",
"=",
"\"Unrecognized catalog: %s\"",
"%",
"name",
"raise",
"Exception",
"(",
"msg",
")",
"return",
"catalogs",
"[",
"name",
"]",
"(",
"*",
"*",
"kwargs",
")"
] |
Get HTML content | def _get_content ( self , url ) : target_url = self . _db_url + '/' + unquote ( url ) # .encode('utf-8')) log . debug ( "Opening '{0}'" . format ( target_url ) ) try : f = self . opener . open ( target_url ) except HTTPError as e : log . error ( "HTTP error, your session may be expired." ) log . error ( e ) if input ( "Request new permanent session and retry? (y/n)" ) in 'yY' : self . request_permanent_session ( ) return self . _get_content ( url ) else : return None log . debug ( "Accessing '{0}'" . format ( target_url ) ) try : content = f . read ( ) except IncompleteRead as icread : log . critical ( "Incomplete data received from the DB, " + "the data could be corrupted." ) content = icread . partial log . debug ( "Got {0} bytes of data." . format ( len ( content ) ) ) return content . decode ( 'utf-8' ) | 9,961 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L369-L393 | [
"def",
"signalize_extensions",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.rownumber used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension connection.<exception> used\"",
",",
"SalesforceWarning",
")",
"# TODO",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.connection used\"",
",",
"SalesforceWarning",
")",
"# not implemented DB-API extension cursor.scroll(, SalesforceWarning)",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.messages used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension connection.messages used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.next(, SalesforceWarning) used\"",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.__iter__(, SalesforceWarning) used\"",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.lastrowid used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension .errorhandler used\"",
",",
"SalesforceWarning",
")"
] |
A reusable connection manager | def opener ( self ) : if self . _opener is None : log . debug ( "Creating connection handler" ) opener = build_opener ( ) if self . _cookies : log . debug ( "Appending cookies" ) else : log . debug ( "No cookies to append" ) for cookie in self . _cookies : cookie_str = cookie . name + '=' + cookie . value opener . addheaders . append ( ( 'Cookie' , cookie_str ) ) self . _opener = opener else : log . debug ( "Reusing connection manager" ) return self . _opener | 9,962 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L396-L411 | [
"def",
"populateFromDirectory",
"(",
"self",
",",
"vcfDirectory",
")",
":",
"pattern",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vcfDirectory",
",",
"\"*.vcf.gz\"",
")",
"dataFiles",
"=",
"[",
"]",
"indexFiles",
"=",
"[",
"]",
"for",
"vcfFile",
"in",
"glob",
".",
"glob",
"(",
"pattern",
")",
":",
"dataFiles",
".",
"append",
"(",
"vcfFile",
")",
"indexFiles",
".",
"append",
"(",
"vcfFile",
"+",
"\".tbi\"",
")",
"self",
".",
"populateFromFile",
"(",
"dataFiles",
",",
"indexFiles",
")"
] |
Request cookie for permanent session token . | def request_sid_cookie ( self , username , password ) : log . debug ( "Requesting SID cookie" ) target_url = self . _login_url + '?usr={0}&pwd={1}&persist=y' . format ( username , password ) cookie = urlopen ( target_url ) . read ( ) return cookie | 9,963 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L413-L420 | [
"def",
"auto_decompress_stream",
"(",
"src",
")",
":",
"block",
"=",
"next",
"(",
"src",
")",
"compression",
"=",
"guess_compression",
"(",
"block",
")",
"if",
"compression",
"==",
"'bz2'",
":",
"src",
"=",
"bz2_decompress_stream",
"(",
"chain",
"(",
"[",
"block",
"]",
",",
"src",
")",
")",
"elif",
"compression",
"==",
"'xz'",
":",
"src",
"=",
"xz_decompress_stream",
"(",
"chain",
"(",
"[",
"block",
"]",
",",
"src",
")",
")",
"else",
":",
"src",
"=",
"chain",
"(",
"[",
"block",
"]",
",",
"src",
")",
"for",
"block",
"in",
"src",
":",
"yield",
"block"
] |
Establish databse connection using permanent session cookie | def restore_session ( self , cookie ) : log . debug ( "Restoring session from cookie: {}" . format ( cookie ) ) opener = build_opener ( ) opener . addheaders . append ( ( 'Cookie' , cookie ) ) self . _opener = opener | 9,964 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L422-L427 | [
"def",
"ttm",
"(",
"self",
",",
"V",
",",
"mode",
"=",
"None",
",",
"transp",
"=",
"False",
",",
"without",
"=",
"False",
")",
":",
"if",
"mode",
"is",
"None",
":",
"mode",
"=",
"range",
"(",
"self",
".",
"ndim",
")",
"if",
"isinstance",
"(",
"V",
",",
"np",
".",
"ndarray",
")",
":",
"Y",
"=",
"self",
".",
"_ttm_compute",
"(",
"V",
",",
"mode",
",",
"transp",
")",
"elif",
"is_sequence",
"(",
"V",
")",
":",
"dims",
",",
"vidx",
"=",
"check_multiplication_dims",
"(",
"mode",
",",
"self",
".",
"ndim",
",",
"len",
"(",
"V",
")",
",",
"vidx",
"=",
"True",
",",
"without",
"=",
"without",
")",
"Y",
"=",
"self",
".",
"_ttm_compute",
"(",
"V",
"[",
"vidx",
"[",
"0",
"]",
"]",
",",
"dims",
"[",
"0",
"]",
",",
"transp",
")",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"dims",
")",
")",
":",
"Y",
"=",
"Y",
".",
"_ttm_compute",
"(",
"V",
"[",
"vidx",
"[",
"i",
"]",
"]",
",",
"dims",
"[",
"i",
"]",
",",
"transp",
")",
"return",
"Y"
] |
Login to the database and store cookies for upcoming requests . | def login ( self , username , password ) : log . debug ( "Logging in to the DB" ) opener = self . _build_opener ( ) values = { 'usr' : username , 'pwd' : password } req = self . _make_request ( self . _login_url , values ) try : log . debug ( "Sending login request" ) f = opener . open ( req ) except URLError as e : log . error ( "Failed to connect to the database -> probably down!" ) log . error ( "Error from database server:\n {0}" . format ( e ) ) return False html = f . read ( ) failed_auth_message = 'Bad username or password' if failed_auth_message in str ( html ) : log . error ( failed_auth_message ) return False return True | 9,965 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L449-L467 | [
"def",
"assert_raises_regex",
"(",
"exception",
",",
"regex",
",",
"msg_fmt",
"=",
"\"{msg}\"",
")",
":",
"def",
"test",
"(",
"exc",
")",
":",
"compiled",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"if",
"not",
"exc",
".",
"args",
":",
"msg",
"=",
"\"{} without message\"",
".",
"format",
"(",
"exception",
".",
"__name__",
")",
"fail",
"(",
"msg_fmt",
".",
"format",
"(",
"msg",
"=",
"msg",
",",
"text",
"=",
"None",
",",
"pattern",
"=",
"compiled",
".",
"pattern",
",",
"exc_type",
"=",
"exception",
",",
"exc_name",
"=",
"exception",
".",
"__name__",
",",
")",
")",
"text",
"=",
"exc",
".",
"args",
"[",
"0",
"]",
"if",
"not",
"compiled",
".",
"search",
"(",
"text",
")",
":",
"msg",
"=",
"\"{!r} does not match {!r}\"",
".",
"format",
"(",
"text",
",",
"compiled",
".",
"pattern",
")",
"fail",
"(",
"msg_fmt",
".",
"format",
"(",
"msg",
"=",
"msg",
",",
"text",
"=",
"text",
",",
"pattern",
"=",
"compiled",
".",
"pattern",
",",
"exc_type",
"=",
"exception",
",",
"exc_name",
"=",
"exception",
".",
"__name__",
",",
")",
")",
"context",
"=",
"AssertRaisesRegexContext",
"(",
"exception",
",",
"regex",
",",
"msg_fmt",
")",
"context",
".",
"add_test",
"(",
"test",
")",
"return",
"context"
] |
Update the list of available straems | def _update_streams ( self ) : content = self . _db . _get_content ( "streamds" ) self . _stream_df = read_csv ( content ) . sort_values ( "STREAM" ) self . _streams = None for stream in self . streams : setattr ( self , stream , self . __getattr__ ( stream ) ) | 9,966 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L517-L523 | [
"def",
"start_processing_handler",
"(",
"self",
",",
"event",
")",
":",
"results_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"configuration",
"[",
"'results_folder'",
"]",
",",
"\"filesystem.json\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Event %s: start comparing %s with %s.\"",
",",
"event",
",",
"self",
".",
"checkpoints",
"[",
"0",
"]",
",",
"self",
".",
"checkpoints",
"[",
"1",
"]",
")",
"results",
"=",
"compare_disks",
"(",
"self",
".",
"checkpoints",
"[",
"0",
"]",
",",
"self",
".",
"checkpoints",
"[",
"1",
"]",
",",
"self",
".",
"configuration",
")",
"with",
"open",
"(",
"results_path",
",",
"'w'",
")",
"as",
"results_file",
":",
"json",
".",
"dump",
"(",
"results",
",",
"results_file",
")",
"self",
".",
"processing_done",
".",
"set",
"(",
")"
] |
A list of available streams | def streams ( self ) : if self . _streams is None : self . _streams = list ( self . _stream_df [ "STREAM" ] . values ) return self . _streams | 9,967 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L553-L557 | [
"def",
"_create_update_tracking_event",
"(",
"instance",
")",
":",
"event",
"=",
"_create_event",
"(",
"instance",
",",
"UPDATE",
")",
"for",
"field",
"in",
"instance",
".",
"_tracked_fields",
":",
"if",
"not",
"isinstance",
"(",
"instance",
".",
"_meta",
".",
"get_field",
"(",
"field",
")",
",",
"ManyToManyField",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"instance",
".",
"_meta",
".",
"get_field",
"(",
"field",
")",
",",
"ForeignKey",
")",
":",
"# Compare pk",
"value",
"=",
"getattr",
"(",
"instance",
",",
"'{0}_id'",
".",
"format",
"(",
"field",
")",
")",
"else",
":",
"value",
"=",
"getattr",
"(",
"instance",
",",
"field",
")",
"if",
"instance",
".",
"_original_fields",
"[",
"field",
"]",
"!=",
"value",
":",
"_create_tracked_field",
"(",
"event",
",",
"instance",
",",
"field",
")",
"except",
"TypeError",
":",
"# Can't compare old and new value, should be different.",
"_create_tracked_field",
"(",
"event",
",",
"instance",
",",
"field",
")"
] |
Show the help for a given stream . | def help ( self , stream ) : if stream not in self . streams : log . error ( "Stream '{}' not found in the database." . format ( stream ) ) params = self . _stream_df [ self . _stream_df [ 'STREAM' ] == stream ] . values [ 0 ] self . _print_stream_parameters ( params ) | 9,968 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L574-L579 | [
"def",
"_create_task",
"(",
"self",
",",
"task_info",
",",
"config_string",
",",
"sync_map_root_directory",
",",
"job_os_hierarchy_type",
")",
":",
"self",
".",
"log",
"(",
"u\"Converting config string to config dict\"",
")",
"parameters",
"=",
"gf",
".",
"config_string_to_dict",
"(",
"config_string",
")",
"self",
".",
"log",
"(",
"u\"Creating task\"",
")",
"task",
"=",
"Task",
"(",
"config_string",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"task",
".",
"configuration",
"[",
"\"description\"",
"]",
"=",
"\"Task %s\"",
"%",
"task_info",
"[",
"0",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Task description: %s\"",
",",
"task",
".",
"configuration",
"[",
"\"description\"",
"]",
"]",
")",
"try",
":",
"task",
".",
"configuration",
"[",
"\"language\"",
"]",
"=",
"parameters",
"[",
"gc",
".",
"PPN_TASK_LANGUAGE",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Set language from task: '%s'\"",
",",
"task",
".",
"configuration",
"[",
"\"language\"",
"]",
"]",
")",
"except",
"KeyError",
":",
"task",
".",
"configuration",
"[",
"\"language\"",
"]",
"=",
"parameters",
"[",
"gc",
".",
"PPN_JOB_LANGUAGE",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Set language from job: '%s'\"",
",",
"task",
".",
"configuration",
"[",
"\"language\"",
"]",
"]",
")",
"custom_id",
"=",
"task_info",
"[",
"0",
"]",
"task",
".",
"configuration",
"[",
"\"custom_id\"",
"]",
"=",
"custom_id",
"self",
".",
"log",
"(",
"[",
"u\"Task custom_id: %s\"",
",",
"task",
".",
"configuration",
"[",
"\"custom_id\"",
"]",
"]",
")",
"task",
".",
"text_file_path",
"=",
"task_info",
"[",
"1",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Task text file path: %s\"",
",",
"task",
".",
"text_file_path",
"]",
")",
"task",
".",
"audio_file_path",
"=",
"task_info",
"[",
"2",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Task audio file path: %s\"",
",",
"task",
".",
"audio_file_path",
"]",
")",
"task",
".",
"sync_map_file_path",
"=",
"self",
".",
"_compute_sync_map_file_path",
"(",
"sync_map_root_directory",
",",
"job_os_hierarchy_type",
",",
"custom_id",
",",
"task",
".",
"configuration",
"[",
"\"o_name\"",
"]",
")",
"self",
".",
"log",
"(",
"[",
"u\"Task sync map file path: %s\"",
",",
"task",
".",
"sync_map_file_path",
"]",
")",
"self",
".",
"log",
"(",
"u\"Replacing placeholder in os_file_smil_audio_ref\"",
")",
"task",
".",
"configuration",
"[",
"\"o_smil_audio_ref\"",
"]",
"=",
"self",
".",
"_replace_placeholder",
"(",
"task",
".",
"configuration",
"[",
"\"o_smil_audio_ref\"",
"]",
",",
"custom_id",
")",
"self",
".",
"log",
"(",
"u\"Replacing placeholder in os_file_smil_page_ref\"",
")",
"task",
".",
"configuration",
"[",
"\"o_smil_page_ref\"",
"]",
"=",
"self",
".",
"_replace_placeholder",
"(",
"task",
".",
"configuration",
"[",
"\"o_smil_page_ref\"",
"]",
",",
"custom_id",
")",
"self",
".",
"log",
"(",
"u\"Returning task\"",
")",
"return",
"task"
] |
Print a coloured help for a given tuple of stream parameters . | def _print_stream_parameters ( self , values ) : cprint ( "{0}" . format ( * values ) , "magenta" , attrs = [ "bold" ] ) print ( "{4}" . format ( * values ) ) cprint ( " available formats: {1}" . format ( * values ) , "blue" ) cprint ( " mandatory selectors: {2}" . format ( * values ) , "red" ) cprint ( " optional selectors: {3}" . format ( * values ) , "green" ) print ( ) | 9,969 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L586-L593 | [
"def",
"_file_nums_str",
"(",
"self",
",",
"n_all",
",",
"n_type",
",",
"n_ign",
")",
":",
"# 'other' is the difference between all and named",
"n_oth",
"=",
"n_all",
"-",
"np",
".",
"sum",
"(",
"n_type",
")",
"f_str",
"=",
"\"{} Files\"",
".",
"format",
"(",
"n_all",
")",
"+",
"\" (\"",
"if",
"len",
"(",
"n_type",
")",
":",
"f_str",
"+=",
"\", \"",
".",
"join",
"(",
"\"{} {}\"",
".",
"format",
"(",
"name",
",",
"num",
")",
"for",
"name",
",",
"num",
"in",
"zip",
"(",
"self",
".",
"_COUNT_FILE_TYPES",
",",
"n_type",
")",
")",
"f_str",
"+=",
"\", \"",
"f_str",
"+=",
"\"other {}; {} ignored)\"",
".",
"format",
"(",
"n_oth",
",",
"n_ign",
")",
"return",
"f_str"
] |
Get the data for a given stream manually | def get ( self , stream , fmt = 'txt' , * * kwargs ) : sel = '' . join ( [ "&{0}={1}" . format ( k , v ) for ( k , v ) in kwargs . items ( ) ] ) url = "streamds/{0}.{1}?{2}" . format ( stream , fmt , sel [ 1 : ] ) data = self . _db . _get_content ( url ) if not data : log . error ( "No data found at URL '%s'." % url ) return if ( data . startswith ( "ERROR" ) ) : log . error ( data ) return if fmt == "txt" : return read_csv ( data ) return data | 9,970 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L595-L608 | [
"def",
"actually_mount",
"(",
"self",
",",
"client",
")",
":",
"a_obj",
"=",
"self",
".",
"config",
".",
"copy",
"(",
")",
"if",
"'description'",
"in",
"a_obj",
":",
"del",
"a_obj",
"[",
"'description'",
"]",
"try",
":",
"m_fun",
"=",
"getattr",
"(",
"client",
",",
"self",
".",
"mount_fun",
")",
"if",
"self",
".",
"description",
"and",
"a_obj",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
",",
"description",
"=",
"self",
".",
"description",
",",
"config",
"=",
"a_obj",
")",
"elif",
"self",
".",
"description",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
",",
"description",
"=",
"self",
".",
"description",
")",
"elif",
"a_obj",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
",",
"config",
"=",
"a_obj",
")",
"else",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
")",
"except",
"hvac",
".",
"exceptions",
".",
"InvalidRequest",
"as",
"exception",
":",
"match",
"=",
"re",
".",
"match",
"(",
"'existing mount at (?P<path>.+)'",
",",
"str",
"(",
"exception",
")",
")",
"if",
"match",
":",
"e_msg",
"=",
"\"%s has a mountpoint conflict with %s\"",
"%",
"(",
"self",
".",
"path",
",",
"match",
".",
"group",
"(",
"'path'",
")",
")",
"raise",
"aomi_excep",
".",
"VaultConstraint",
"(",
"e_msg",
")",
"else",
":",
"raise"
] |
Return a dict for given parameter | def get_parameter ( self , parameter ) : parameter = self . _get_parameter_name ( parameter ) return self . _parameters [ parameter ] | 9,971 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L624-L627 | [
"def",
"face",
"(",
"sign",
",",
"lon",
")",
":",
"faces",
"=",
"FACES",
"[",
"sign",
"]",
"if",
"lon",
"<",
"10",
":",
"return",
"faces",
"[",
"0",
"]",
"elif",
"lon",
"<",
"20",
":",
"return",
"faces",
"[",
"1",
"]",
"else",
":",
"return",
"faces",
"[",
"2",
"]"
] |
Generate unit conversion function for given parameter | def get_converter ( self , parameter ) : if parameter not in self . _converters : param = self . get_parameter ( parameter ) try : scale = float ( param [ 'Scale' ] ) except KeyError : scale = 1 def convert ( value ) : # easy_scale = float(param['EasyScale']) # easy_scale_multiplier = float(param['EasyScaleMultiplier']) return value * scale return convert | 9,972 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L629-L643 | [
"def",
"_AlignDecryptedDataOffset",
"(",
"self",
",",
"decrypted_data_offset",
")",
":",
"self",
".",
"_file_object",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"self",
".",
"_decrypter",
"=",
"self",
".",
"_GetDecrypter",
"(",
")",
"self",
".",
"_decrypted_data",
"=",
"b''",
"encrypted_data_offset",
"=",
"0",
"encrypted_data_size",
"=",
"self",
".",
"_file_object",
".",
"get_size",
"(",
")",
"while",
"encrypted_data_offset",
"<",
"encrypted_data_size",
":",
"read_count",
"=",
"self",
".",
"_ReadEncryptedData",
"(",
"self",
".",
"_ENCRYPTED_DATA_BUFFER_SIZE",
")",
"if",
"read_count",
"==",
"0",
":",
"break",
"encrypted_data_offset",
"+=",
"read_count",
"if",
"decrypted_data_offset",
"<",
"self",
".",
"_decrypted_data_size",
":",
"self",
".",
"_decrypted_data_offset",
"=",
"decrypted_data_offset",
"break",
"decrypted_data_offset",
"-=",
"self",
".",
"_decrypted_data_size"
] |
Get the unit for given parameter | def unit ( self , parameter ) : parameter = self . _get_parameter_name ( parameter ) . lower ( ) return self . _parameters [ parameter ] [ 'Unit' ] | 9,973 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L645-L648 | [
"def",
"_series_imagej",
"(",
"self",
")",
":",
"# ImageJ's dimension order is always TZCYXS",
"# TODO: fix loading of color, composite, or palette images",
"pages",
"=",
"self",
".",
"pages",
"pages",
".",
"useframes",
"=",
"True",
"pages",
".",
"keyframe",
"=",
"0",
"page",
"=",
"pages",
"[",
"0",
"]",
"ij",
"=",
"self",
".",
"imagej_metadata",
"def",
"is_hyperstack",
"(",
")",
":",
"# ImageJ hyperstack store all image metadata in the first page and",
"# image data are stored contiguously before the second page, if any",
"if",
"not",
"page",
".",
"is_final",
":",
"return",
"False",
"images",
"=",
"ij",
".",
"get",
"(",
"'images'",
",",
"0",
")",
"if",
"images",
"<=",
"1",
":",
"return",
"False",
"offset",
",",
"count",
"=",
"page",
".",
"is_contiguous",
"if",
"(",
"count",
"!=",
"product",
"(",
"page",
".",
"shape",
")",
"*",
"page",
".",
"bitspersample",
"//",
"8",
"or",
"offset",
"+",
"count",
"*",
"images",
">",
"self",
".",
"filehandle",
".",
"size",
")",
":",
"raise",
"ValueError",
"(",
")",
"# check that next page is stored after data",
"if",
"len",
"(",
"pages",
")",
">",
"1",
"and",
"offset",
"+",
"count",
"*",
"images",
">",
"pages",
"[",
"1",
"]",
".",
"offset",
":",
"return",
"False",
"return",
"True",
"try",
":",
"hyperstack",
"=",
"is_hyperstack",
"(",
")",
"except",
"ValueError",
":",
"log",
".",
"warning",
"(",
"'ImageJ series: invalid metadata or corrupted file'",
")",
"return",
"None",
"if",
"hyperstack",
":",
"# no need to read other pages",
"pages",
"=",
"[",
"page",
"]",
"else",
":",
"pages",
"=",
"pages",
"[",
":",
"]",
"shape",
"=",
"[",
"]",
"axes",
"=",
"[",
"]",
"if",
"'frames'",
"in",
"ij",
":",
"shape",
".",
"append",
"(",
"ij",
"[",
"'frames'",
"]",
")",
"axes",
".",
"append",
"(",
"'T'",
")",
"if",
"'slices'",
"in",
"ij",
":",
"shape",
".",
"append",
"(",
"ij",
"[",
"'slices'",
"]",
")",
"axes",
".",
"append",
"(",
"'Z'",
")",
"if",
"'channels'",
"in",
"ij",
"and",
"not",
"(",
"page",
".",
"photometric",
"==",
"2",
"and",
"not",
"ij",
".",
"get",
"(",
"'hyperstack'",
",",
"False",
")",
")",
":",
"shape",
".",
"append",
"(",
"ij",
"[",
"'channels'",
"]",
")",
"axes",
".",
"append",
"(",
"'C'",
")",
"remain",
"=",
"ij",
".",
"get",
"(",
"'images'",
",",
"len",
"(",
"pages",
")",
")",
"//",
"(",
"product",
"(",
"shape",
")",
"if",
"shape",
"else",
"1",
")",
"if",
"remain",
">",
"1",
":",
"shape",
".",
"append",
"(",
"remain",
")",
"axes",
".",
"append",
"(",
"'I'",
")",
"if",
"page",
".",
"axes",
"[",
"0",
"]",
"==",
"'I'",
":",
"# contiguous multiple images",
"shape",
".",
"extend",
"(",
"page",
".",
"shape",
"[",
"1",
":",
"]",
")",
"axes",
".",
"extend",
"(",
"page",
".",
"axes",
"[",
"1",
":",
"]",
")",
"elif",
"page",
".",
"axes",
"[",
":",
"2",
"]",
"==",
"'SI'",
":",
"# color-mapped contiguous multiple images",
"shape",
"=",
"page",
".",
"shape",
"[",
"0",
":",
"1",
"]",
"+",
"tuple",
"(",
"shape",
")",
"+",
"page",
".",
"shape",
"[",
"2",
":",
"]",
"axes",
"=",
"list",
"(",
"page",
".",
"axes",
"[",
"0",
"]",
")",
"+",
"axes",
"+",
"list",
"(",
"page",
".",
"axes",
"[",
"2",
":",
"]",
")",
"else",
":",
"shape",
".",
"extend",
"(",
"page",
".",
"shape",
")",
"axes",
".",
"extend",
"(",
"page",
".",
"axes",
")",
"truncated",
"=",
"(",
"hyperstack",
"and",
"len",
"(",
"self",
".",
"pages",
")",
"==",
"1",
"and",
"page",
".",
"is_contiguous",
"[",
"1",
"]",
"!=",
"product",
"(",
"shape",
")",
"*",
"page",
".",
"bitspersample",
"//",
"8",
")",
"self",
".",
"is_uniform",
"=",
"True",
"return",
"[",
"TiffPageSeries",
"(",
"pages",
",",
"shape",
",",
"page",
".",
"dtype",
",",
"axes",
",",
"kind",
"=",
"'ImageJ'",
",",
"truncated",
"=",
"truncated",
")",
"]"
] |
Look up the parameter name for a given OID | def oid2name ( self , oid ) : if not self . _oid_lookup : for name , data in self . _parameters . items ( ) : self . _oid_lookup [ data [ 'OID' ] ] = data [ 'Name' ] return self . _oid_lookup [ oid ] | 9,974 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L650-L655 | [
"def",
"share",
"(",
"self",
",",
"group_id",
",",
"group_access",
",",
"expires_at",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'/projects/%s/share'",
"%",
"self",
".",
"get_id",
"(",
")",
"data",
"=",
"{",
"'group_id'",
":",
"group_id",
",",
"'group_access'",
":",
"group_access",
",",
"'expires_at'",
":",
"expires_at",
"}",
"self",
".",
"manager",
".",
"gitlab",
".",
"http_post",
"(",
"path",
",",
"post_data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] |
Return DOM for given dom_id | def via_dom_id ( self , dom_id , det_id ) : try : return DOM . from_json ( [ d for d in self . _json if d [ "DOMId" ] == dom_id and d [ "DetOID" ] == det_id ] [ 0 ] ) except IndexError : log . critical ( "No DOM found for DOM ID '{0}'" . format ( dom_id ) ) | 9,975 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L717-L725 | [
"def",
"read_thrift",
"(",
"file_obj",
",",
"ttype",
")",
":",
"from",
"thrift",
".",
"transport",
".",
"TTransport",
"import",
"TFileObjectTransport",
",",
"TBufferedTransport",
"starting_pos",
"=",
"file_obj",
".",
"tell",
"(",
")",
"# set up the protocol chain",
"ft",
"=",
"TFileObjectTransport",
"(",
"file_obj",
")",
"bufsize",
"=",
"2",
"**",
"16",
"# for accelerated reading ensure that we wrap this so that the CReadable transport can be used.",
"bt",
"=",
"TBufferedTransport",
"(",
"ft",
",",
"bufsize",
")",
"pin",
"=",
"TCompactProtocol",
"(",
"bt",
")",
"# read out type",
"obj",
"=",
"ttype",
"(",
")",
"obj",
".",
"read",
"(",
"pin",
")",
"# The read will actually overshoot due to the buffering that thrift does. Seek backwards to the correct spot,.",
"buffer_pos",
"=",
"bt",
".",
"cstringio_buf",
".",
"tell",
"(",
")",
"ending_pos",
"=",
"file_obj",
".",
"tell",
"(",
")",
"blocks",
"=",
"(",
"(",
"ending_pos",
"-",
"starting_pos",
")",
"//",
"bufsize",
")",
"-",
"1",
"if",
"blocks",
"<",
"0",
":",
"blocks",
"=",
"0",
"file_obj",
".",
"seek",
"(",
"starting_pos",
"+",
"blocks",
"*",
"bufsize",
"+",
"buffer_pos",
")",
"return",
"obj"
] |
return DOM for given CLB UPI | def via_clb_upi ( self , clb_upi , det_id ) : try : return DOM . from_json ( [ d for d in self . _json if d [ "CLBUPI" ] == clb_upi and d [ "DetOID" ] == det_id ] [ 0 ] ) except IndexError : log . critical ( "No DOM found for CLB UPI '{0}'" . format ( clb_upi ) ) | 9,976 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L727-L735 | [
"def",
"__remove_if_expired",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"self",
".",
"duration",
":",
"return",
"created",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"path",
".",
"getctime",
"(",
"filename",
")",
")",
"expired",
"=",
"created",
"+",
"self",
".",
"duration",
"if",
"expired",
"<",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"log",
".",
"debug",
"(",
"\"%s expired, deleted\"",
",",
"filename",
")"
] |
A dict of CLBs with UPI as key | def upi ( self ) : parameter = 'UPI' if parameter not in self . _by : self . _populate ( by = parameter ) return self . _by [ parameter ] | 9,977 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L879-L884 | [
"def",
"dump_http",
"(",
"method",
",",
"url",
",",
"request_headers",
",",
"response",
",",
"output_stream",
")",
":",
"# Start header.",
"output_stream",
".",
"write",
"(",
"'---------START-HTTP---------\\n'",
")",
"# Get parsed url.",
"parsed_url",
"=",
"urlsplit",
"(",
"url",
")",
"# Dump all request headers recursively.",
"http_path",
"=",
"parsed_url",
".",
"path",
"if",
"parsed_url",
".",
"query",
":",
"http_path",
"=",
"http_path",
"+",
"'?'",
"+",
"parsed_url",
".",
"query",
"output_stream",
".",
"write",
"(",
"'{0} {1} HTTP/1.1\\n'",
".",
"format",
"(",
"method",
",",
"http_path",
")",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"request_headers",
".",
"items",
"(",
")",
")",
":",
"if",
"k",
"is",
"'authorization'",
":",
"# Redact signature header value from trace logs.",
"v",
"=",
"re",
".",
"sub",
"(",
"r'Signature=([[0-9a-f]+)'",
",",
"'Signature=*REDACTED*'",
",",
"v",
")",
"output_stream",
".",
"write",
"(",
"'{0}: {1}\\n'",
".",
"format",
"(",
"k",
".",
"title",
"(",
")",
",",
"v",
")",
")",
"# Write a new line.",
"output_stream",
".",
"write",
"(",
"'\\n'",
")",
"# Write response status code.",
"output_stream",
".",
"write",
"(",
"'HTTP/1.1 {0}\\n'",
".",
"format",
"(",
"response",
".",
"status",
")",
")",
"# Dump all response headers recursively.",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"response",
".",
"getheaders",
"(",
")",
".",
"items",
"(",
")",
")",
":",
"output_stream",
".",
"write",
"(",
"'{0}: {1}\\n'",
".",
"format",
"(",
"k",
".",
"title",
"(",
")",
",",
"v",
")",
")",
"# For all errors write all the available response body.",
"if",
"response",
".",
"status",
"!=",
"200",
"and",
"response",
".",
"status",
"!=",
"204",
"and",
"response",
".",
"status",
"!=",
"206",
":",
"output_stream",
".",
"write",
"(",
"'{0}'",
".",
"format",
"(",
"response",
".",
"read",
"(",
")",
")",
")",
"# End header.",
"output_stream",
".",
"write",
"(",
"'---------END-HTTP---------\\n'",
")"
] |
A dict of CLBs with DOM ID as key | def dom_id ( self ) : parameter = 'DOMID' if parameter not in self . _by : self . _populate ( by = parameter ) return self . _by [ parameter ] | 9,978 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L887-L892 | [
"def",
"map_eigenvalues",
"(",
"matrix",
":",
"np",
".",
"ndarray",
",",
"func",
":",
"Callable",
"[",
"[",
"complex",
"]",
",",
"complex",
"]",
",",
"*",
",",
"rtol",
":",
"float",
"=",
"1e-5",
",",
"atol",
":",
"float",
"=",
"1e-8",
")",
"->",
"np",
".",
"ndarray",
":",
"vals",
",",
"vecs",
"=",
"_perp_eigendecompose",
"(",
"matrix",
",",
"rtol",
"=",
"rtol",
",",
"atol",
"=",
"atol",
")",
"pieces",
"=",
"[",
"np",
".",
"outer",
"(",
"vec",
",",
"np",
".",
"conj",
"(",
"vec",
".",
"T",
")",
")",
"for",
"vec",
"in",
"vecs",
"]",
"out_vals",
"=",
"np",
".",
"vectorize",
"(",
"func",
")",
"(",
"vals",
".",
"astype",
"(",
"complex",
")",
")",
"total",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"matrix",
".",
"shape",
")",
"for",
"piece",
",",
"val",
"in",
"zip",
"(",
"pieces",
",",
"out_vals",
")",
":",
"total",
"=",
"np",
".",
"add",
"(",
"total",
",",
"piece",
"*",
"val",
")",
"return",
"total"
] |
Return the base CLB for a given DU | def base ( self , du ) : parameter = 'base' if parameter not in self . _by : self . _by [ parameter ] = { } for clb in self . upi . values ( ) : if clb . floor == 0 : self . _by [ parameter ] [ clb . du ] = clb return self . _by [ parameter ] [ du ] | 9,979 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L894-L902 | [
"def",
"_rescan",
"(",
"self",
",",
"skip_to_end",
"=",
"True",
")",
":",
"# Get listing of matching files.",
"paths",
"=",
"[",
"]",
"for",
"single_glob",
"in",
"self",
".",
"_globspec",
":",
"paths",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"single_glob",
")",
")",
"# Remove files that don't appear in the new list.",
"for",
"path",
"in",
"self",
".",
"_tailedfiles",
".",
"keys",
"(",
")",
":",
"if",
"path",
"not",
"in",
"paths",
":",
"self",
".",
"_tailedfiles",
"[",
"path",
"]",
".",
"_close",
"(",
")",
"del",
"self",
".",
"_tailedfiles",
"[",
"path",
"]",
"# Add any files we don't have open yet.",
"for",
"path",
"in",
"paths",
":",
"try",
":",
"# If the file has been rotated, reopen it.",
"if",
"self",
".",
"_tailedfiles",
"[",
"path",
"]",
".",
"hasBeenRotated",
"(",
")",
":",
"# If it can't be reopened, close it.",
"if",
"not",
"self",
".",
"_tailedfiles",
"[",
"path",
"]",
".",
"reopen",
"(",
")",
":",
"del",
"self",
".",
"_tailedfiles",
"[",
"path",
"]",
"except",
"KeyError",
":",
"# Open a file that we haven't seen yet.",
"self",
".",
"_tailedfiles",
"[",
"path",
"]",
"=",
"TailedFile",
"(",
"path",
",",
"skip_to_end",
"=",
"skip_to_end",
",",
"offset",
"=",
"self",
".",
"_offsets",
".",
"get",
"(",
"path",
",",
"None",
")",
")"
] |
Get the results for a given stream | def get_results ( self , stream , time_interval ) : query = stream . stream_id . as_raw ( ) query [ 'datetime' ] = { '$gt' : time_interval . start , '$lte' : time_interval . end } with switch_db ( StreamInstanceModel , 'hyperstream' ) : for instance in StreamInstanceModel . objects ( __raw__ = query ) : yield StreamInstance ( timestamp = instance . datetime , value = instance . value ) | 9,980 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/database_channel.py#L57-L69 | [
"def",
"actually_mount",
"(",
"self",
",",
"client",
")",
":",
"a_obj",
"=",
"self",
".",
"config",
".",
"copy",
"(",
")",
"if",
"'description'",
"in",
"a_obj",
":",
"del",
"a_obj",
"[",
"'description'",
"]",
"try",
":",
"m_fun",
"=",
"getattr",
"(",
"client",
",",
"self",
".",
"mount_fun",
")",
"if",
"self",
".",
"description",
"and",
"a_obj",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
",",
"description",
"=",
"self",
".",
"description",
",",
"config",
"=",
"a_obj",
")",
"elif",
"self",
".",
"description",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
",",
"description",
"=",
"self",
".",
"description",
")",
"elif",
"a_obj",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
",",
"config",
"=",
"a_obj",
")",
"else",
":",
"m_fun",
"(",
"self",
".",
"backend",
",",
"mount_point",
"=",
"self",
".",
"path",
")",
"except",
"hvac",
".",
"exceptions",
".",
"InvalidRequest",
"as",
"exception",
":",
"match",
"=",
"re",
".",
"match",
"(",
"'existing mount at (?P<path>.+)'",
",",
"str",
"(",
"exception",
")",
")",
"if",
"match",
":",
"e_msg",
"=",
"\"%s has a mountpoint conflict with %s\"",
"%",
"(",
"self",
".",
"path",
",",
"match",
".",
"group",
"(",
"'path'",
")",
")",
"raise",
"aomi_excep",
".",
"VaultConstraint",
"(",
"e_msg",
")",
"else",
":",
"raise"
] |
Move file pointer to the packet with given index . | def seek_to_packet ( self , index ) : pointer_position = self . packet_positions [ index ] self . blob_file . seek ( pointer_position , 0 ) | 9,981 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/clb.py#L59-L62 | [
"def",
"update_sandbox_product",
"(",
"self",
",",
"product_id",
",",
"surge_multiplier",
"=",
"None",
",",
"drivers_available",
"=",
"None",
",",
")",
":",
"args",
"=",
"{",
"'surge_multiplier'",
":",
"surge_multiplier",
",",
"'drivers_available'",
":",
"drivers_available",
",",
"}",
"endpoint",
"=",
"'v1.2/sandbox/products/{}'",
".",
"format",
"(",
"product_id",
")",
"return",
"self",
".",
"_api_call",
"(",
"'PUT'",
",",
"endpoint",
",",
"args",
"=",
"args",
")"
] |
Generate next blob in file | def next_blob ( self ) : try : length = struct . unpack ( '<i' , self . blob_file . read ( 4 ) ) [ 0 ] except struct . error : raise StopIteration header = CLBHeader ( file_obj = self . blob_file ) blob = { 'CLBHeader' : header } remaining_length = length - header . size pmt_data = [ ] pmt_raw_data = self . blob_file . read ( remaining_length ) pmt_raw_data_io = BytesIO ( pmt_raw_data ) for _ in range ( int ( remaining_length / 6 ) ) : channel_id , time , tot = struct . unpack ( '>cic' , pmt_raw_data_io . read ( 6 ) ) pmt_data . append ( PMTData ( ord ( channel_id ) , time , ord ( tot ) ) ) blob [ 'PMTData' ] = pmt_data blob [ 'PMTRawData' ] = pmt_raw_data return blob | 9,982 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/clb.py#L64-L83 | [
"def",
"recv",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
":",
"try",
":",
"testsock",
"=",
"self",
".",
"_zmq",
".",
"select",
"(",
"[",
"self",
".",
"socket",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"timeout",
")",
"[",
"0",
"]",
"except",
"zmq",
".",
"ZMQError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EINTR",
":",
"testsock",
"=",
"None",
"else",
":",
"raise",
"if",
"not",
"testsock",
":",
"return",
"rv",
"=",
"self",
".",
"socket",
".",
"recv",
"(",
"self",
".",
"_zmq",
".",
"NOBLOCK",
")",
"return",
"LogRecord",
".",
"from_dict",
"(",
"json",
".",
"loads",
"(",
"rv",
")",
")",
"else",
":",
"return",
"super",
"(",
"ZeroMQPullSubscriber",
",",
"self",
")",
".",
"recv",
"(",
"timeout",
")"
] |
Returns the Kendall Tau Score | def getKendallTauScore ( myResponse , otherResponse ) : # variables kt = 0 list1 = myResponse . values ( ) list2 = otherResponse . values ( ) if len ( list1 ) <= 1 : return kt # runs through list1 for itr1 in range ( 0 , len ( list1 ) - 1 ) : # runs through list2 for itr2 in range ( itr1 + 1 , len ( list2 ) ) : # checks if there is a discrepancy. If so, adds if ( ( list1 [ itr1 ] > list1 [ itr2 ] and list2 [ itr1 ] < list2 [ itr2 ] ) or ( list1 [ itr1 ] < list1 [ itr2 ] and list2 [ itr1 ] > list2 [ itr2 ] ) ) : kt += 1 # normalizes between 0 and 1 kt = ( kt * 2 ) / ( len ( list1 ) * ( len ( list1 ) - 1 ) ) # returns found value return kt | 9,983 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L580-L606 | [
"def",
"wave_infochunk",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"file",
":",
"if",
"file",
".",
"read",
"(",
"4",
")",
"!=",
"b\"RIFF\"",
":",
"return",
"None",
"data_size",
"=",
"file",
".",
"read",
"(",
"4",
")",
"# container size",
"if",
"file",
".",
"read",
"(",
"4",
")",
"!=",
"b\"WAVE\"",
":",
"return",
"None",
"while",
"True",
":",
"chunkid",
"=",
"file",
".",
"read",
"(",
"4",
")",
"sizebuf",
"=",
"file",
".",
"read",
"(",
"4",
")",
"if",
"len",
"(",
"sizebuf",
")",
"<",
"4",
"or",
"len",
"(",
"chunkid",
")",
"<",
"4",
":",
"return",
"None",
"size",
"=",
"struct",
".",
"unpack",
"(",
"b'<L'",
",",
"sizebuf",
")",
"[",
"0",
"]",
"if",
"chunkid",
"[",
"0",
":",
"3",
"]",
"!=",
"b\"fmt\"",
":",
"if",
"size",
"%",
"2",
"==",
"1",
":",
"seek",
"=",
"size",
"+",
"1",
"else",
":",
"seek",
"=",
"size",
"file",
".",
"seek",
"(",
"size",
",",
"1",
")",
"else",
":",
"return",
"bytearray",
"(",
"b\"RIFF\"",
"+",
"data_size",
"+",
"b\"WAVE\"",
"+",
"chunkid",
"+",
"sizebuf",
"+",
"file",
".",
"read",
"(",
"size",
")",
")"
] |
Returns a dictonary that associates the integer representation of each candidate with the score they recieved in the profile . | def getCandScoresMap ( self , profile ) : # Currently, we expect the profile to contain complete ordering over candidates. elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported election type" ) exit ( ) # Initialize our dictionary so that all candidates have a score of zero. candScoresMap = dict ( ) for cand in profile . candMap . keys ( ) : candScoresMap [ cand ] = 0.0 rankMaps = profile . getRankMaps ( ) rankMapCounts = profile . getPreferenceCounts ( ) scoringVector = self . getScoringVector ( profile ) # Go through the rankMaps of the profile and increment each candidates score appropriately. for i in range ( 0 , len ( rankMaps ) ) : rankMap = rankMaps [ i ] rankMapCount = rankMapCounts [ i ] for cand in rankMap . keys ( ) : candScoresMap [ cand ] += scoringVector [ rankMap [ cand ] - 1 ] * rankMapCount # print("candScoresMap=", candScoresMap) return candScoresMap | 9,984 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L128-L159 | [
"def",
"boolean_union",
"(",
"self",
",",
"mesh",
",",
"inplace",
"=",
"False",
")",
":",
"bfilter",
"=",
"vtk",
".",
"vtkBooleanOperationPolyDataFilter",
"(",
")",
"bfilter",
".",
"SetOperationToUnion",
"(",
")",
"bfilter",
".",
"SetInputData",
"(",
"1",
",",
"mesh",
")",
"bfilter",
".",
"SetInputData",
"(",
"0",
",",
"self",
")",
"bfilter",
".",
"ReorientDifferenceCellsOff",
"(",
")",
"bfilter",
".",
"Update",
"(",
")",
"mesh",
"=",
"_get_output",
"(",
"bfilter",
")",
"if",
"inplace",
":",
"self",
".",
"overwrite",
"(",
"mesh",
")",
"else",
":",
"return",
"mesh"
] |
Returns an integer that is equal to the margin of victory of the election profile . | def getMov ( self , profile ) : # from . import mov import mov return mov . MoVScoring ( profile , self . getScoringVector ( profile ) ) | 9,985 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L161-L169 | [
"def",
"SetConsoleTextAttribute",
"(",
"stream_id",
",",
"attrs",
")",
":",
"handle",
"=",
"handles",
"[",
"stream_id",
"]",
"return",
"windll",
".",
"kernel32",
".",
"SetConsoleTextAttribute",
"(",
"handle",
",",
"attrs",
")"
] |
Returns a dictionary that associates integer representations of each candidate with their Bucklin score . | def getCandScoresMap ( self , profile ) : # Currently, we expect the profile to contain complete ordering over candidates. elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported profile type" ) exit ( ) bucklinScores = dict ( ) rankMaps = profile . getRankMaps ( ) preferenceCounts = profile . getPreferenceCounts ( ) for cand in profile . candMap . keys ( ) : # We keep track of the number of times a candidate is ranked in the first t positions. numTimesRanked = 0 # We increase t in increments of 1 until we find t such that the candidate is ranked in the # first t positions in at least half the votes. for t in range ( 1 , profile . numCands + 1 ) : for i in range ( 0 , len ( rankMaps ) ) : if ( rankMaps [ i ] [ cand ] == t ) : numTimesRanked += preferenceCounts [ i ] if numTimesRanked >= math . ceil ( float ( profile . numVoters ) / 2 ) : bucklinScores [ cand ] = t break return bucklinScores | 9,986 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L335-L367 | [
"def",
"invalidate_metadata",
"(",
"self",
",",
"name",
"=",
"None",
",",
"database",
"=",
"None",
")",
":",
"stmt",
"=",
"'INVALIDATE METADATA'",
"if",
"name",
"is",
"not",
"None",
":",
"stmt",
"=",
"self",
".",
"_table_command",
"(",
"stmt",
",",
"name",
",",
"database",
"=",
"database",
")",
"self",
".",
"_execute",
"(",
"stmt",
")"
] |
Returns a dictionary that associates integer representations of each candidate with their maximin score . | def getCandScoresMap ( self , profile ) : # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported election type" ) exit ( ) wmg = profile . getWmg ( ) # Initialize the maximin score for each candidate as infinity. maximinScores = dict ( ) for cand in wmg . keys ( ) : maximinScores [ cand ] = float ( "inf" ) # For each pair of candidates, calculate the number of times each beats the other. for cand1 , cand2 in itertools . combinations ( wmg . keys ( ) , 2 ) : if cand2 in wmg [ cand1 ] . keys ( ) : maximinScores [ cand1 ] = min ( maximinScores [ cand1 ] , wmg [ cand1 ] [ cand2 ] ) maximinScores [ cand2 ] = min ( maximinScores [ cand2 ] , wmg [ cand2 ] [ cand1 ] ) return maximinScores | 9,987 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L436-L464 | [
"def",
"flush_devices",
"(",
"self",
")",
":",
"self",
".",
"rom",
".",
"program",
"(",
"[",
"0",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"rom",
".",
"size",
")",
"]",
")",
"self",
".",
"flash",
".",
"program",
"(",
"[",
"0",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"flash",
".",
"size",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ram",
".",
"size",
")",
":",
"self",
".",
"ram",
".",
"write",
"(",
"i",
",",
"0",
")"
] |
Returns a two - dimensional dictionary that associates every pair of candidates cand1 and cand2 with the strongest path from cand1 to cand2 . | def computeStrongestPaths ( self , profile , pairwisePreferences ) : cands = profile . candMap . keys ( ) numCands = len ( cands ) # Initialize the two-dimensional dictionary that will hold our strongest paths. strongestPaths = dict ( ) for cand in cands : strongestPaths [ cand ] = dict ( ) for i in range ( 1 , numCands + 1 ) : for j in range ( 1 , numCands + 1 ) : if ( i == j ) : continue if pairwisePreferences [ i ] [ j ] > pairwisePreferences [ j ] [ i ] : strongestPaths [ i ] [ j ] = pairwisePreferences [ i ] [ j ] else : strongestPaths [ i ] [ j ] = 0 for i in range ( 1 , numCands + 1 ) : for j in range ( 1 , numCands + 1 ) : if ( i == j ) : continue for k in range ( 1 , numCands + 1 ) : if ( i == k or j == k ) : continue strongestPaths [ j ] [ k ] = max ( strongestPaths [ j ] [ k ] , min ( strongestPaths [ j ] [ i ] , strongestPaths [ i ] [ k ] ) ) return strongestPaths | 9,988 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L475-L511 | [
"def",
"ulocalized_gmt0_time",
"(",
"self",
",",
"time",
",",
"context",
",",
"request",
")",
":",
"value",
"=",
"get_date",
"(",
"context",
",",
"time",
")",
"if",
"not",
"value",
":",
"return",
"\"\"",
"# DateTime is stored with TimeZone, but DateTimeWidget omits TZ",
"value",
"=",
"value",
".",
"toZone",
"(",
"\"GMT+0\"",
")",
"return",
"self",
".",
"ulocalized_time",
"(",
"value",
",",
"context",
",",
"request",
")"
] |
Returns a two - dimensional dictionary that associates every pair of candidates cand1 and cand2 with number of voters who prefer cand1 to cand2 . | def computePairwisePreferences ( self , profile ) : cands = profile . candMap . keys ( ) # Initialize the two-dimensional dictionary that will hold our pairwise preferences. pairwisePreferences = dict ( ) for cand in cands : pairwisePreferences [ cand ] = dict ( ) for cand1 in cands : for cand2 in cands : if cand1 != cand2 : pairwisePreferences [ cand1 ] [ cand2 ] = 0 for preference in profile . preferences : wmgMap = preference . wmgMap for cand1 , cand2 in itertools . combinations ( cands , 2 ) : # If either candidate was unranked, we assume that they are lower ranked than all # ranked candidates. if cand1 not in wmgMap . keys ( ) : if cand2 in wmgMap . keys ( ) : pairwisePreferences [ cand2 ] [ cand1 ] += 1 * preference . count elif cand2 not in wmgMap . keys ( ) : if cand1 in wmgMap . keys ( ) : pairwisePreferences [ cand1 ] [ cand2 ] += 1 * preference . count elif wmgMap [ cand1 ] [ cand2 ] == 1 : pairwisePreferences [ cand1 ] [ cand2 ] += 1 * preference . count elif wmgMap [ cand1 ] [ cand2 ] == - 1 : pairwisePreferences [ cand2 ] [ cand1 ] += 1 * preference . count return pairwisePreferences | 9,989 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L513-L550 | [
"def",
"mask_unphysical",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"valid_range",
":",
"return",
"data",
"else",
":",
"return",
"np",
".",
"ma",
".",
"masked_outside",
"(",
"data",
",",
"np",
".",
"min",
"(",
"self",
".",
"valid_range",
")",
",",
"np",
".",
"max",
"(",
"self",
".",
"valid_range",
")",
")"
] |
Returns a dictionary that associates integer representations of each candidate with the number of other candidates for which her strongest path to the other candidate is greater than the other candidate s stronget path to her . | def getCandScoresMap ( self , profile ) : cands = profile . candMap . keys ( ) pairwisePreferences = self . computePairwisePreferences ( profile ) strongestPaths = self . computeStrongestPaths ( profile , pairwisePreferences ) # For each candidate, determine how many times p[E,X] >= p[X,E] using a variant of the # Floyd-Warshall algorithm. betterCount = dict ( ) for cand in cands : betterCount [ cand ] = 0 for cand1 in cands : for cand2 in cands : if cand1 == cand2 : continue if strongestPaths [ cand1 ] [ cand2 ] >= strongestPaths [ cand2 ] [ cand1 ] : betterCount [ cand1 ] += 1 return betterCount | 9,990 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L552-L577 | [
"def",
"calc_regenerated",
"(",
"self",
",",
"lastvotetime",
")",
":",
"delta",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"datetime",
".",
"strptime",
"(",
"lastvotetime",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"td",
"=",
"delta",
".",
"days",
"ts",
"=",
"delta",
".",
"seconds",
"tt",
"=",
"(",
"td",
"*",
"86400",
")",
"+",
"ts",
"return",
"tt",
"*",
"10000",
"/",
"86400",
"/",
"5"
] |
Returns an integer list that represents all possible winners of a profile under STV rule . | def STVsocwinners ( self , profile ) : ordering = profile . getOrderVectors ( ) prefcounts = profile . getPreferenceCounts ( ) m = profile . numCands if min ( ordering [ 0 ] ) == 0 : startstate = set ( range ( m ) ) else : startstate = set ( range ( 1 , m + 1 ) ) ordering , startstate = self . preprocessing ( ordering , prefcounts , m , startstate ) m_star = len ( startstate ) known_winners = set ( ) # ----------Some statistics-------------- hashtable2 = set ( ) # push the node of start state into the priority queue root = Node ( value = startstate ) stackNode = [ ] stackNode . append ( root ) while stackNode : # ------------pop the current node----------------- node = stackNode . pop ( ) # ------------------------------------------------- state = node . value . copy ( ) # use heuristic to delete all the candidates which satisfy the following condition # goal state 1: if the state set contains only 1 candidate, then stop if len ( state ) == 1 and list ( state ) [ 0 ] not in known_winners : known_winners . add ( list ( state ) [ 0 ] ) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners : continue # ----------Compute plurality score for the current remaining candidates-------------- plural_score = self . get_plurality_scores3 ( prefcounts , ordering , state , m_star ) minscore = min ( plural_score . values ( ) ) for to_be_deleted in state : if plural_score [ to_be_deleted ] == minscore : child_state = state . copy ( ) child_state . remove ( to_be_deleted ) tpc = tuple ( sorted ( child_state ) ) if tpc in hashtable2 : continue else : hashtable2 . add ( tpc ) child_node = Node ( value = child_state ) stackNode . append ( child_node ) return sorted ( known_winners ) | 9,991 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L624-L680 | [
"def",
"load_projects",
"(",
"self",
")",
":",
"server_config",
"=",
"Config",
".",
"instance",
"(",
")",
".",
"get_section_config",
"(",
"\"Server\"",
")",
"projects_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"server_config",
".",
"get",
"(",
"\"projects_path\"",
",",
"\"~/GNS3/projects\"",
")",
")",
"os",
".",
"makedirs",
"(",
"projects_path",
",",
"exist_ok",
"=",
"True",
")",
"try",
":",
"for",
"project_path",
"in",
"os",
".",
"listdir",
"(",
"projects_path",
")",
":",
"project_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"projects_path",
",",
"project_path",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"project_dir",
")",
":",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"project_dir",
")",
":",
"if",
"file",
".",
"endswith",
"(",
"\".gns3\"",
")",
":",
"try",
":",
"yield",
"from",
"self",
".",
"load_project",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"file",
")",
",",
"load",
"=",
"False",
")",
"except",
"(",
"aiohttp",
".",
"web_exceptions",
".",
"HTTPConflict",
",",
"NotImplementedError",
")",
":",
"pass",
"# Skip not compatible projects",
"except",
"OSError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"str",
"(",
"e",
")",
")"
] |
Returns an integer list that represents all possible winners of a profile under baldwin rule . | def baldwinsoc_winners ( self , profile ) : ordering = profile . getOrderVectors ( ) m = profile . numCands prefcounts = profile . getPreferenceCounts ( ) if min ( ordering [ 0 ] ) == 0 : startstate = set ( range ( m ) ) else : startstate = set ( range ( 1 , m + 1 ) ) wmg = self . getWmg2 ( prefcounts , ordering , startstate , normalize = False ) known_winners = set ( ) # ----------Some statistics-------------- hashtable2 = set ( ) # push the node of start state into the priority queue root = Node ( value = startstate ) stackNode = [ ] stackNode . append ( root ) while stackNode : # ------------pop the current node----------------- node = stackNode . pop ( ) # ------------------------------------------------- state = node . value . copy ( ) # goal state 1: if the state set contains only 1 candidate, then stop if len ( state ) == 1 and list ( state ) [ 0 ] not in known_winners : known_winners . add ( list ( state ) [ 0 ] ) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners : continue # ----------Compute plurality score for the current remaining candidates-------------- plural_score = dict ( ) for cand in state : plural_score [ cand ] = 0 for cand1 , cand2 in itertools . permutations ( state , 2 ) : plural_score [ cand1 ] += wmg [ cand1 ] [ cand2 ] # if current state satisfies one of the 3 goal state, continue to the next loop # After using heuristics, generate children and push them into priority queue # frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners)) minscore = min ( plural_score . values ( ) ) for to_be_deleted in state : if plural_score [ to_be_deleted ] == minscore : child_state = state . copy ( ) child_state . remove ( to_be_deleted ) tpc = tuple ( sorted ( child_state ) ) if tpc in hashtable2 : continue else : hashtable2 . add ( tpc ) child_node = Node ( value = child_state ) stackNode . append ( child_node ) return sorted ( known_winners ) | 9,992 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L801-L861 | [
"def",
"create_marker_index",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"es",
".",
"indices",
".",
"exists",
"(",
"index",
"=",
"self",
".",
"marker_index",
")",
":",
"self",
".",
"es",
".",
"indices",
".",
"create",
"(",
"index",
"=",
"self",
".",
"marker_index",
")"
] |
Generate a weighted majority graph that represents the whole profile . The function will return a two - dimensional dictionary that associates integer representations of each pair of candidates cand1 and cand2 with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1 . | def getWmg2 ( self , prefcounts , ordering , state , normalize = False ) : # Initialize a new dictionary for our final weighted majority graph. wmgMap = dict ( ) for cand in state : wmgMap [ cand ] = dict ( ) for cand1 , cand2 in itertools . combinations ( state , 2 ) : wmgMap [ cand1 ] [ cand2 ] = 0 wmgMap [ cand2 ] [ cand1 ] = 0 # Go through the wmgMaps and increment the value of each edge in our final graph with the # edges in each of the wmgMaps. We take into account the number of times that the vote # occured. for i in range ( 0 , len ( prefcounts ) ) : for cand1 , cand2 in itertools . combinations ( ordering [ i ] , 2 ) : # -------------------------- wmgMap [ cand1 ] [ cand2 ] += prefcounts [ i ] # By default, we assume that the weighted majority graph should not be normalized. If # desired, we normalize by dividing each edge by the value of the largest edge. if normalize == True : maxEdge = float ( '-inf' ) for cand in wmgMap . keys ( ) : maxEdge = max ( maxEdge , max ( wmgMap [ cand ] . values ( ) ) ) for cand1 in wmgMap . keys ( ) : for cand2 in wmgMap [ cand1 ] . keys ( ) : wmgMap [ cand1 ] [ cand2 ] = float ( wmgMap [ cand1 ] [ cand2 ] ) / maxEdge return wmgMap | 9,993 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L927-L963 | [
"def",
"is_varchar",
"(",
"self",
")",
":",
"dt",
"=",
"DATA_TYPES",
"[",
"'varchar'",
"]",
"if",
"type",
"(",
"self",
".",
"data",
")",
"is",
"dt",
"[",
"'type'",
"]",
"and",
"len",
"(",
"self",
".",
"data",
")",
"<",
"dt",
"[",
"'max'",
"]",
":",
"self",
".",
"type",
"=",
"'VARCHAR'",
"self",
".",
"len",
"=",
"len",
"(",
"self",
".",
"data",
")",
"return",
"True"
] |
Returns a number that associates the winner of a profile under Plurality with Runoff rule . | def PluRunOff_single_winner ( self , profile ) : # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" and elecType != "csv" : print ( "ERROR: unsupported election type" ) exit ( ) # Initialization prefcounts = profile . getPreferenceCounts ( ) len_prefcounts = len ( prefcounts ) rankmaps = profile . getRankMaps ( ) ranking = MechanismPlurality ( ) . getRanking ( profile ) # 1st round: find the top 2 candidates in plurality scores # Compute the 1st-place candidate in plurality scores # print(ranking) max_cand = ranking [ 0 ] [ 0 ] [ 0 ] # Compute the 2nd-place candidate in plurality scores # Automatically using tie-breaking rule--numerically increasing order if len ( ranking [ 0 ] [ 0 ] ) > 1 : second_max_cand = ranking [ 0 ] [ 0 ] [ 1 ] else : second_max_cand = ranking [ 0 ] [ 1 ] [ 0 ] top_2 = [ max_cand , second_max_cand ] # 2nd round: find the candidate with maximum plurality score dict_top2 = { max_cand : 0 , second_max_cand : 0 } for i in range ( len_prefcounts ) : vote_top2 = { key : value for key , value in rankmaps [ i ] . items ( ) if key in top_2 } top_position = min ( vote_top2 . values ( ) ) keys = [ x for x in vote_top2 . keys ( ) if vote_top2 [ x ] == top_position ] for key in keys : dict_top2 [ key ] += prefcounts [ i ] # print(dict_top2) winner = max ( dict_top2 . items ( ) , key = lambda x : x [ 1 ] ) [ 0 ] return winner | 9,994 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1780-L1825 | [
"def",
"clear_matplotlib_ticks",
"(",
"self",
",",
"axis",
"=",
"\"both\"",
")",
":",
"ax",
"=",
"self",
".",
"get_axes",
"(",
")",
"plotting",
".",
"clear_matplotlib_ticks",
"(",
"ax",
"=",
"ax",
",",
"axis",
"=",
"axis",
")"
] |
Returns a list that associates all the winners of a profile under Plurality with Runoff rule . | def PluRunOff_cowinners ( self , profile ) : # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" and elecType != "csv" : print ( "ERROR: unsupported election type" ) exit ( ) # Initialization prefcounts = profile . getPreferenceCounts ( ) len_prefcounts = len ( prefcounts ) rankmaps = profile . getRankMaps ( ) ranking = MechanismPlurality ( ) . getRanking ( profile ) known_winners = set ( ) # 1st round: find the top 2 candidates in plurality scores top_2_combinations = [ ] if len ( ranking [ 0 ] [ 0 ] ) > 1 : for cand1 , cand2 in itertools . combinations ( ranking [ 0 ] [ 0 ] , 2 ) : top_2_combinations . append ( [ cand1 , cand2 ] ) else : max_cand = ranking [ 0 ] [ 0 ] [ 0 ] if len ( ranking [ 0 ] [ 1 ] ) > 1 : for second_max_cand in ranking [ 0 ] [ 1 ] : top_2_combinations . append ( [ max_cand , second_max_cand ] ) else : second_max_cand = ranking [ 0 ] [ 1 ] [ 0 ] top_2_combinations . append ( [ max_cand , second_max_cand ] ) # 2nd round: find the candidate with maximum plurality score for top_2 in top_2_combinations : dict_top2 = { top_2 [ 0 ] : 0 , top_2 [ 1 ] : 0 } for i in range ( len_prefcounts ) : vote_top2 = { key : value for key , value in rankmaps [ i ] . items ( ) if key in top_2 } top_position = min ( vote_top2 . values ( ) ) keys = [ x for x in vote_top2 . keys ( ) if vote_top2 [ x ] == top_position ] for key in keys : dict_top2 [ key ] += prefcounts [ i ] max_value = max ( dict_top2 . values ( ) ) winners = [ y for y in dict_top2 . keys ( ) if dict_top2 [ y ] == max_value ] known_winners = known_winners | set ( winners ) return sorted ( known_winners ) | 9,995 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1827-L1876 | [
"def",
"decode_coords",
"(",
"ds",
",",
"gridfile",
"=",
"None",
")",
":",
"def",
"add_attrs",
"(",
"obj",
")",
":",
"if",
"'coordinates'",
"in",
"obj",
".",
"attrs",
":",
"extra_coords",
".",
"update",
"(",
"obj",
".",
"attrs",
"[",
"'coordinates'",
"]",
".",
"split",
"(",
")",
")",
"obj",
".",
"encoding",
"[",
"'coordinates'",
"]",
"=",
"obj",
".",
"attrs",
".",
"pop",
"(",
"'coordinates'",
")",
"if",
"'bounds'",
"in",
"obj",
".",
"attrs",
":",
"extra_coords",
".",
"add",
"(",
"obj",
".",
"attrs",
"[",
"'bounds'",
"]",
")",
"if",
"gridfile",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"gridfile",
",",
"xr",
".",
"Dataset",
")",
":",
"gridfile",
"=",
"open_dataset",
"(",
"gridfile",
")",
"extra_coords",
"=",
"set",
"(",
"ds",
".",
"coords",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"ds",
".",
"variables",
")",
":",
"add_attrs",
"(",
"v",
")",
"add_attrs",
"(",
"ds",
")",
"if",
"gridfile",
"is",
"not",
"None",
":",
"ds",
".",
"update",
"(",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"gridfile",
".",
"variables",
")",
"if",
"k",
"in",
"extra_coords",
"}",
")",
"if",
"xr_version",
"<",
"(",
"0",
",",
"11",
")",
":",
"ds",
".",
"set_coords",
"(",
"extra_coords",
".",
"intersection",
"(",
"ds",
".",
"variables",
")",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"ds",
".",
"_coord_names",
".",
"update",
"(",
"extra_coords",
".",
"intersection",
"(",
"ds",
".",
"variables",
")",
")",
"return",
"ds"
] |
Returns a list that associates all the winners of a profile under Single non - transferable vote rule . | def SNTV_winners ( self , profile , K ) : # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" and elecType != "csv" : print ( "ERROR: unsupported election type" ) exit ( ) m = profile . numCands candScoresMap = MechanismPlurality ( ) . getCandScoresMap ( profile ) if K >= m : return list ( candScoresMap . keys ( ) ) # print(candScoresMap) sorted_items = sorted ( candScoresMap . items ( ) , key = lambda x : x [ 1 ] , reverse = True ) sorted_dict = { key : value for key , value in sorted_items } winners = list ( sorted_dict . keys ( ) ) [ 0 : K ] return winners | 9,996 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1899-L1921 | [
"def",
"truncate_schema",
"(",
"self",
")",
":",
"assert",
"self",
".",
"server",
"==",
"'localhost'",
"con",
"=",
"self",
".",
"connection",
"or",
"self",
".",
"_connect",
"(",
")",
"self",
".",
"_initialize",
"(",
"con",
")",
"cur",
"=",
"con",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'DELETE FROM publication;'",
")",
"cur",
".",
"execute",
"(",
"'TRUNCATE systems CASCADE;'",
")",
"con",
".",
"commit",
"(",
")",
"con",
".",
"close",
"(",
")",
"return"
] |
Returns a list that associates all the winners of a profile under The Borda - mean rule . | def Borda_mean_winners ( self , profile ) : n_candidates = profile . numCands prefcounts = profile . getPreferenceCounts ( ) len_prefcounts = len ( prefcounts ) rankmaps = profile . getRankMaps ( ) values = zeros ( [ len_prefcounts , n_candidates ] , dtype = int ) if min ( list ( rankmaps [ 0 ] . keys ( ) ) ) == 0 : delta = 0 else : delta = 1 for i in range ( len_prefcounts ) : for j in range ( delta , n_candidates + delta ) : values [ i ] [ j - delta ] = rankmaps [ i ] [ j ] # print("values=", values) mat0 = self . _build_mat ( values , n_candidates , prefcounts ) borda = [ 0 for i in range ( n_candidates ) ] for i in range ( n_candidates ) : borda [ i ] = sum ( [ mat0 [ i , j ] for j in range ( n_candidates ) ] ) borda_mean = mean ( borda ) bin_winners_list = [ int ( borda [ i ] >= borda_mean ) for i in range ( n_candidates ) ] return bin_winners_list | 9,997 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L2043-L2068 | [
"def",
"getComplexFileData",
"(",
"self",
",",
"fileInfo",
",",
"data",
")",
":",
"result",
"=",
"fileInfo",
"[",
"fileInfo",
".",
"find",
"(",
"data",
"+",
"\"</td>\"",
")",
"+",
"len",
"(",
"data",
"+",
"\"</td>\"",
")",
":",
"]",
"result",
"=",
"result",
"[",
":",
"result",
".",
"find",
"(",
"\"</td>\"",
")",
"]",
"result",
"=",
"result",
"[",
"result",
".",
"rfind",
"(",
"\">\"",
")",
"+",
"1",
":",
"]",
"return",
"result"
] |
Apply only t0s | def apply_t0 ( self , hits ) : if HAVE_NUMBA : apply_t0_nb ( hits . time , hits . dom_id , hits . channel_id , self . _lookup_tables ) else : n = len ( hits ) cal = np . empty ( n ) lookup = self . _calib_by_dom_and_channel for i in range ( n ) : calib = lookup [ hits [ 'dom_id' ] [ i ] ] [ hits [ 'channel_id' ] [ i ] ] cal [ i ] = calib [ 6 ] hits . time += cal return hits | 9,998 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/calib.py#L116-L130 | [
"def",
"run_vcfanno",
"(",
"vcf_file",
",",
"data",
",",
"decomposed",
"=",
"False",
")",
":",
"conf_files",
"=",
"dd",
".",
"get_vcfanno",
"(",
"data",
")",
"if",
"conf_files",
":",
"with_basepaths",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"gemini_basepath",
"=",
"_back_compatible_gemini",
"(",
"conf_files",
",",
"data",
")",
"for",
"f",
"in",
"conf_files",
":",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"[",
"0",
"]",
"if",
"f",
".",
"endswith",
"(",
"\".lua\"",
")",
":",
"conf_file",
"=",
"None",
"lua_file",
"=",
"f",
"else",
":",
"conf_file",
"=",
"f",
"lua_file",
"=",
"\"%s.lua\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"conf_file",
")",
"[",
"0",
"]",
"if",
"lua_file",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"lua_file",
")",
":",
"lua_file",
"=",
"None",
"data_basepath",
"=",
"gemini_basepath",
"if",
"name",
"==",
"\"gemini\"",
"else",
"None",
"if",
"conf_file",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"conf_file",
")",
":",
"with_basepaths",
"[",
"(",
"data_basepath",
",",
"name",
")",
"]",
".",
"append",
"(",
"conf_file",
")",
"if",
"lua_file",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"lua_file",
")",
":",
"with_basepaths",
"[",
"(",
"data_basepath",
",",
"name",
")",
"]",
".",
"append",
"(",
"lua_file",
")",
"conf_files",
"=",
"with_basepaths",
".",
"items",
"(",
")",
"out_file",
"=",
"None",
"if",
"conf_files",
":",
"VcfannoIn",
"=",
"collections",
".",
"namedtuple",
"(",
"\"VcfannoIn\"",
",",
"[",
"\"conf\"",
",",
"\"lua\"",
"]",
")",
"bp_files",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"(",
"data_basepath",
",",
"name",
")",
",",
"anno_files",
"in",
"conf_files",
":",
"anno_files",
"=",
"list",
"(",
"set",
"(",
"anno_files",
")",
")",
"if",
"len",
"(",
"anno_files",
")",
"==",
"1",
":",
"cur",
"=",
"VcfannoIn",
"(",
"anno_files",
"[",
"0",
"]",
",",
"None",
")",
"elif",
"len",
"(",
"anno_files",
")",
"==",
"2",
":",
"lua_files",
"=",
"[",
"x",
"for",
"x",
"in",
"anno_files",
"if",
"x",
".",
"endswith",
"(",
"\".lua\"",
")",
"]",
"assert",
"len",
"(",
"lua_files",
")",
"==",
"1",
",",
"anno_files",
"lua_file",
"=",
"lua_files",
"[",
"0",
"]",
"anno_files",
".",
"remove",
"(",
"lua_file",
")",
"cur",
"=",
"VcfannoIn",
"(",
"anno_files",
"[",
"0",
"]",
",",
"lua_file",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected annotation group %s\"",
"%",
"anno_files",
")",
"bp_files",
"[",
"data_basepath",
"]",
".",
"append",
"(",
"cur",
")",
"for",
"data_basepath",
",",
"anno_files",
"in",
"bp_files",
".",
"items",
"(",
")",
":",
"ann_file",
"=",
"vcfanno",
".",
"run",
"(",
"vcf_file",
",",
"[",
"x",
".",
"conf",
"for",
"x",
"in",
"anno_files",
"]",
",",
"[",
"x",
".",
"lua",
"for",
"x",
"in",
"anno_files",
"]",
",",
"data",
",",
"basepath",
"=",
"data_basepath",
",",
"decomposed",
"=",
"decomposed",
")",
"if",
"ann_file",
":",
"out_file",
"=",
"ann_file",
"vcf_file",
"=",
"ann_file",
"return",
"out_file"
] |
Create a string out of the current file_index | def _get_file_index_str ( self ) : file_index = str ( self . file_index ) if self . n_digits is not None : file_index = file_index . zfill ( self . n_digits ) return file_index | 9,999 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L156-L161 | [
"def",
"shapes_match",
"(",
"a",
",",
"b",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"isinstance",
"(",
"b",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"if",
"len",
"(",
"a",
")",
"!=",
"len",
"(",
"b",
")",
":",
"return",
"False",
"return",
"all",
"(",
"[",
"shapes_match",
"(",
"ia",
",",
"ib",
")",
"for",
"ia",
",",
"ib",
"in",
"zip",
"(",
"a",
",",
"b",
")",
"]",
")",
"elif",
"isinstance",
"(",
"a",
",",
"dict",
")",
"and",
"isinstance",
"(",
"b",
",",
"dict",
")",
":",
"if",
"len",
"(",
"a",
")",
"!=",
"len",
"(",
"b",
")",
":",
"return",
"False",
"match",
"=",
"True",
"for",
"(",
"ak",
",",
"av",
")",
",",
"(",
"bk",
",",
"bv",
")",
"in",
"zip",
"(",
"a",
".",
"items",
"(",
")",
",",
"b",
".",
"items",
"(",
")",
")",
":",
"match",
"=",
"match",
"and",
"all",
"(",
"[",
"ak",
"==",
"bk",
"and",
"shapes_match",
"(",
"av",
",",
"bv",
")",
"]",
")",
"return",
"match",
"else",
":",
"shape_checker",
"=",
"shape_checkers",
"[",
"(",
"type",
"(",
"a",
")",
",",
"type",
"(",
"b",
")",
")",
"]",
"return",
"shape_checker",
"(",
"a",
",",
"b",
")"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.