idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
19,900
def put_document ( document_path : str , content_type : str , presigned_url : str ) -> str : body = pathlib . Path ( document_path ) . read_bytes ( ) headers = { 'Content-Type' : content_type } put_document_response = requests . put ( presigned_url , data = body , headers = headers ) put_document_response . raise_for_status ( ) return put_document_response . content . decode ( )
Convenience method for putting a document to presigned url .
19,901
def get_expiration ( self ) : exp = self . _get_int ( 'expiration' ) if exp is not None : return datetime . datetime . fromtimestamp ( exp ) return None
Returns the expiration date .
19,902
def login ( self , user_id , password ) : self . _session = requests . session ( ) self . _session . verify = self . _verify_certs self . _session . auth = ( user_id , password ) try : self . _update_capabilities ( ) url_components = parse . urlparse ( self . url ) if self . _dav_endpoint_version == 1 : self . _davpath = url_components . path + 'remote.php/dav/files/' + parse . quote ( user_id ) self . _webdav_url = self . url + 'remote.php/dav/files/' + parse . quote ( user_id ) else : self . _davpath = url_components . path + 'remote.php/webdav' self . _webdav_url = self . url + 'remote.php/webdav' except HTTPResponseError as e : self . _session . close ( ) self . _session = None raise e
Authenticate to ownCloud . This will create a session on the server .
19,903
def file_info ( self , path ) : res = self . _make_dav_request ( 'PROPFIND' , path , headers = { 'Depth' : '0' } ) if res : return res [ 0 ] return None
Returns the file info for the given remote file
19,904
def get_file_contents ( self , path ) : path = self . _normalize_path ( path ) res = self . _session . get ( self . _webdav_url + parse . quote ( self . _encode_string ( path ) ) ) if res . status_code == 200 : return res . content elif res . status_code >= 400 : raise HTTPResponseError ( res ) return False
Returns the contents of a remote file
19,905
def get_directory_as_zip ( self , remote_path , local_file ) : remote_path = self . _normalize_path ( remote_path ) url = self . url + 'index.php/apps/files/ajax/download.php?dir=' + parse . quote ( remote_path ) res = self . _session . get ( url , stream = True ) if res . status_code == 200 : if local_file is None : local_file = os . path . basename ( remote_path ) file_handle = open ( local_file , 'wb' , 8192 ) for chunk in res . iter_content ( 8192 ) : file_handle . write ( chunk ) file_handle . close ( ) return True elif res . status_code >= 400 : raise HTTPResponseError ( res ) return False
Downloads a remote directory as zip
19,906
def put_directory ( self , target_path , local_directory , ** kwargs ) : target_path = self . _normalize_path ( target_path ) if not target_path . endswith ( '/' ) : target_path += '/' gathered_files = [ ] if not local_directory . endswith ( '/' ) : local_directory += '/' basedir = os . path . basename ( local_directory [ 0 : - 1 ] ) + '/' for path , _ , files in os . walk ( local_directory ) : gathered_files . append ( ( path , basedir + path [ len ( local_directory ) : ] , files ) ) for path , remote_path , files in gathered_files : self . mkdir ( target_path + remote_path + '/' ) for name in files : if not self . put_file ( target_path + remote_path + '/' , path + '/' + name , ** kwargs ) : return False return True
Upload a directory with all its contents
19,907
def _put_file_chunked ( self , remote_path , local_source_file , ** kwargs ) : chunk_size = kwargs . get ( 'chunk_size' , 10 * 1024 * 1024 ) result = True transfer_id = int ( time . time ( ) ) remote_path = self . _normalize_path ( remote_path ) if remote_path . endswith ( '/' ) : remote_path += os . path . basename ( local_source_file ) stat_result = os . stat ( local_source_file ) file_handle = open ( local_source_file , 'rb' , 8192 ) file_handle . seek ( 0 , os . SEEK_END ) size = file_handle . tell ( ) file_handle . seek ( 0 ) headers = { } if kwargs . get ( 'keep_mtime' , True ) : headers [ 'X-OC-MTIME' ] = str ( int ( stat_result . st_mtime ) ) if size == 0 : return self . _make_dav_request ( 'PUT' , remote_path , data = '' , headers = headers ) chunk_count = int ( math . ceil ( float ( size ) / float ( chunk_size ) ) ) if chunk_count > 1 : headers [ 'OC-CHUNKED' ] = '1' for chunk_index in range ( 0 , int ( chunk_count ) ) : data = file_handle . read ( chunk_size ) if chunk_count > 1 : chunk_name = '%s-chunking-%s-%i-%i' % ( remote_path , transfer_id , chunk_count , chunk_index ) else : chunk_name = remote_path if not self . _make_dav_request ( 'PUT' , chunk_name , data = data , headers = headers ) : result = False break file_handle . close ( ) return result
Uploads a file using chunks . If the file is smaller than chunk_size it will be uploaded directly .
19,908
def list_open_remote_share ( self ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_SHARE , 'remote_shares/pending' ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) shares = [ ] for element in tree . find ( 'data' ) . iter ( 'element' ) : share_attr = { } for child in element : key = child . tag value = child . text share_attr [ key ] = value shares . append ( share_attr ) return shares raise HTTPResponseError ( res )
List all pending remote shares
19,909
def accept_remote_share ( self , share_id ) : if not isinstance ( share_id , int ) : return False res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'remote_shares/pending/' + str ( share_id ) ) if res . status_code == 200 : return res raise HTTPResponseError ( res )
Accepts a remote share
19,910
def update_share ( self , share_id , ** kwargs ) : perms = kwargs . get ( 'perms' , None ) password = kwargs . get ( 'password' , None ) public_upload = kwargs . get ( 'public_upload' , None ) if ( isinstance ( perms , int ) ) and ( perms > self . OCS_PERMISSION_ALL ) : perms = None if not ( perms or password or ( public_upload is not None ) ) : return False if not isinstance ( share_id , int ) : return False data = { } if perms : data [ 'permissions' ] = perms if isinstance ( password , six . string_types ) : data [ 'password' ] = password if ( public_upload is not None ) and ( isinstance ( public_upload , bool ) ) : data [ 'publicUpload' ] = str ( public_upload ) . lower ( ) res = self . _make_ocs_request ( 'PUT' , self . OCS_SERVICE_SHARE , 'shares/' + str ( share_id ) , data = data ) if res . status_code == 200 : return True raise HTTPResponseError ( res )
Updates a given share
19,911
def share_file_with_link ( self , path , ** kwargs ) : perms = kwargs . get ( 'perms' , None ) public_upload = kwargs . get ( 'public_upload' , 'false' ) password = kwargs . get ( 'password' , None ) path = self . _normalize_path ( path ) post_data = { 'shareType' : self . OCS_SHARE_TYPE_LINK , 'path' : self . _encode_string ( path ) , } if ( public_upload is not None ) and ( isinstance ( public_upload , bool ) ) : post_data [ 'publicUpload' ] = str ( public_upload ) . lower ( ) if isinstance ( password , six . string_types ) : post_data [ 'password' ] = password if perms : post_data [ 'permissions' ] = perms res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'shares' , data = post_data ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_el = tree . find ( 'data' ) return ShareInfo ( { 'id' : data_el . find ( 'id' ) . text , 'path' : path , 'url' : data_el . find ( 'url' ) . text , 'token' : data_el . find ( 'token' ) . text } ) raise HTTPResponseError ( res )
Shares a remote file with link
19,912
def is_shared ( self , path ) : self . file_info ( path ) try : result = self . get_shares ( path ) if result : return len ( result ) > 0 except OCSResponseError as e : if e . status_code != 404 : raise e return False return False
Checks whether a path is already shared
19,913
def get_share ( self , share_id ) : if ( share_id is None ) or not ( isinstance ( share_id , int ) ) : return None res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_SHARE , 'shares/' + str ( share_id ) ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) return self . _get_shareinfo ( tree . find ( 'data' ) . find ( 'element' ) ) raise HTTPResponseError ( res )
Returns share information about known share
19,914
def get_shares ( self , path = '' , ** kwargs ) : if not ( isinstance ( path , six . string_types ) ) : return None data = 'shares' if path != '' : data += '?' path = self . _encode_string ( self . _normalize_path ( path ) ) args = { 'path' : path } reshares = kwargs . get ( 'reshares' , False ) if isinstance ( reshares , bool ) and reshares : args [ 'reshares' ] = reshares subfiles = kwargs . get ( 'subfiles' , False ) if isinstance ( subfiles , bool ) and subfiles : args [ 'subfiles' ] = str ( subfiles ) . lower ( ) shared_with_me = kwargs . get ( 'shared_with_me' , False ) if isinstance ( shared_with_me , bool ) and shared_with_me : args [ 'shared_with_me' ] = "true" del args [ 'path' ] data += parse . urlencode ( args ) res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_SHARE , data ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) shares = [ ] for element in tree . find ( 'data' ) . iter ( 'element' ) : shares . append ( self . _get_shareinfo ( element ) ) return shares raise HTTPResponseError ( res )
Returns array of shares
19,915
def create_user ( self , user_name , initial_password ) : res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_CLOUD , 'users' , data = { 'password' : initial_password , 'userid' : user_name } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return True raise HTTPResponseError ( res )
Create a new user with an initial password via provisioning API . It is not an error if the user already existed before . If you get back an error 999 then the provisioning API is not enabled .
19,916
def delete_user ( self , user_name ) : res = self . _make_ocs_request ( 'DELETE' , self . OCS_SERVICE_CLOUD , 'users/' + user_name ) if res . status_code == 200 : return True raise HTTPResponseError ( res )
Deletes a user via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
19,917
def search_users ( self , user_name ) : action_path = 'users' if user_name : action_path += '?search={}' . format ( user_name ) res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , action_path ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) users = [ x . text for x in tree . findall ( 'data/users/element' ) ] return users raise HTTPResponseError ( res )
Searches for users via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
19,918
def set_user_attribute ( self , user_name , key , value ) : res = self . _make_ocs_request ( 'PUT' , self . OCS_SERVICE_CLOUD , 'users/' + parse . quote ( user_name ) , data = { 'key' : self . _encode_string ( key ) , 'value' : self . _encode_string ( value ) } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return True raise HTTPResponseError ( res )
Sets a user attribute
19,919
def add_user_to_group ( self , user_name , group_name ) : res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_CLOUD , 'users/' + user_name + '/groups' , data = { 'groupid' : group_name } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return True raise HTTPResponseError ( res )
Adds a user to a group .
19,920
def get_user_groups ( self , user_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'users/' + user_name + '/groups' , ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return [ group . text for group in tree . find ( 'data/groups' ) ] raise HTTPResponseError ( res )
Get a list of groups associated to a user .
19,921
def get_user ( self , user_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'users/' + parse . quote ( user_name ) , data = { } ) tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_element = tree . find ( 'data' ) return self . _xml_to_dict ( data_element )
Retrieves information about a user
19,922
def get_user_subadmin_groups ( self , user_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'users/' + user_name + '/subadmins' , ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) groups = tree . find ( 'data' ) return groups raise HTTPResponseError ( res )
Get a list of subadmin groups associated to a user .
19,923
def share_file_with_user ( self , path , user , ** kwargs ) : remote_user = kwargs . get ( 'remote_user' , False ) perms = kwargs . get ( 'perms' , self . OCS_PERMISSION_READ ) if ( ( ( not isinstance ( perms , int ) ) or ( perms > self . OCS_PERMISSION_ALL ) ) or ( ( not isinstance ( user , six . string_types ) ) or ( user == '' ) ) ) : return False if remote_user and ( not user . endswith ( '/' ) ) : user = user + '/' path = self . _normalize_path ( path ) post_data = { 'shareType' : self . OCS_SHARE_TYPE_REMOTE if remote_user else self . OCS_SHARE_TYPE_USER , 'shareWith' : user , 'path' : self . _encode_string ( path ) , 'permissions' : perms } res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'shares' , data = post_data ) if self . _debug : print ( 'OCS share_file request for file %s with permissions %i ' 'returned: %i' % ( path , perms , res . status_code ) ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_el = tree . find ( 'data' ) return ShareInfo ( { 'id' : data_el . find ( 'id' ) . text , 'path' : path , 'permissions' : perms } ) raise HTTPResponseError ( res )
Shares a remote file with specified user
19,924
def delete_group ( self , group_name ) : res = self . _make_ocs_request ( 'DELETE' , self . OCS_SERVICE_CLOUD , 'groups/' + group_name ) if res . status_code == 200 : return True raise HTTPResponseError ( res )
Delete a group via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
19,925
def get_groups ( self ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'groups' ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) groups = [ x . text for x in tree . findall ( 'data/groups/element' ) ] return groups raise HTTPResponseError ( res )
Get groups via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
19,926
def get_group_members ( self , group_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'groups/' + group_name ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return [ group . text for group in tree . find ( 'data/users' ) ] raise HTTPResponseError ( res )
Get group members via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
19,927
def group_exists ( self , group_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'groups?search=' + group_name ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) for code_el in tree . findall ( 'data/groups/element' ) : if code_el is not None and code_el . text == group_name : return True return False raise HTTPResponseError ( res )
Checks a group via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
19,928
def share_file_with_group ( self , path , group , ** kwargs ) : perms = kwargs . get ( 'perms' , self . OCS_PERMISSION_READ ) if ( ( ( not isinstance ( perms , int ) ) or ( perms > self . OCS_PERMISSION_ALL ) ) or ( ( not isinstance ( group , six . string_types ) ) or ( group == '' ) ) ) : return False path = self . _normalize_path ( path ) post_data = { 'shareType' : self . OCS_SHARE_TYPE_GROUP , 'shareWith' : group , 'path' : path , 'permissions' : perms } res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'shares' , data = post_data ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_el = tree . find ( 'data' ) return ShareInfo ( { 'id' : data_el . find ( 'id' ) . text , 'path' : path , 'permissions' : perms } ) raise HTTPResponseError ( res )
Shares a remote file with specified group
19,929
def get_attribute ( self , app = None , key = None ) : path = 'getattribute' if app is not None : path += '/' + parse . quote ( app , '' ) if key is not None : path += '/' + parse . quote ( self . _encode_string ( key ) , '' ) res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_PRIVATEDATA , path ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) values = [ ] for element in tree . find ( 'data' ) . iter ( 'element' ) : app_text = element . find ( 'app' ) . text key_text = element . find ( 'key' ) . text value_text = element . find ( 'value' ) . text or '' if key is None : if app is None : values . append ( ( app_text , key_text , value_text ) ) else : values . append ( ( key_text , value_text ) ) else : return value_text if len ( values ) == 0 and key is not None : return None return values raise HTTPResponseError ( res )
Returns an application attribute
19,930
def set_attribute ( self , app , key , value ) : path = 'setattribute/' + parse . quote ( app , '' ) + '/' + parse . quote ( self . _encode_string ( key ) , '' ) res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_PRIVATEDATA , path , data = { 'value' : self . _encode_string ( value ) } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) return True raise HTTPResponseError ( res )
Sets an application attribute
19,931
def get_apps ( self ) : ena_apps = { } res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'apps' ) if res . status_code != 200 : raise HTTPResponseError ( res ) tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) for el in tree . findall ( 'data/apps/element' ) : ena_apps [ el . text ] = False res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'apps?filter=enabled' ) if res . status_code != 200 : raise HTTPResponseError ( res ) tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) for el in tree . findall ( 'data/apps/element' ) : ena_apps [ el . text ] = True return ena_apps
List all enabled apps through the provisioning api .
19,932
def enable_app ( self , appname ) : res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_CLOUD , 'apps/' + appname ) if res . status_code == 200 : return True raise HTTPResponseError ( res )
Enable an app through provisioning_api
19,933
def _encode_string ( s ) : if six . PY2 and isinstance ( s , unicode ) : return s . encode ( 'utf-8' ) return s
Encodes a unicode instance to utf - 8 . If a str is passed it will simply be returned
19,934
def _check_ocs_status ( tree , accepted_codes = [ 100 ] ) : code_el = tree . find ( 'meta/statuscode' ) if code_el is not None and int ( code_el . text ) not in accepted_codes : r = requests . Response ( ) msg_el = tree . find ( 'meta/message' ) if msg_el is None : msg_el = tree r . _content = ET . tostring ( msg_el ) r . status_code = int ( code_el . text ) raise OCSResponseError ( r )
Checks the status code of an OCS request
19,935
def make_ocs_request ( self , method , service , action , ** kwargs ) : accepted_codes = kwargs . pop ( 'accepted_codes' , [ 100 ] ) res = self . _make_ocs_request ( method , service , action , ** kwargs ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , accepted_codes = accepted_codes ) return res raise OCSResponseError ( res )
Makes a OCS API request and analyses the response
19,936
def _make_ocs_request ( self , method , service , action , ** kwargs ) : slash = '' if service : slash = '/' path = self . OCS_BASEPATH + service + slash + action attributes = kwargs . copy ( ) if 'headers' not in attributes : attributes [ 'headers' ] = { } attributes [ 'headers' ] [ 'OCS-APIREQUEST' ] = 'true' if self . _debug : print ( 'OCS request: %s %s %s' % ( method , self . url + path , attributes ) ) res = self . _session . request ( method , self . url + path , ** attributes ) return res
Makes a OCS API request
19,937
def _make_dav_request ( self , method , path , ** kwargs ) : if self . _debug : print ( 'DAV request: %s %s' % ( method , path ) ) if kwargs . get ( 'headers' ) : print ( 'Headers: ' , kwargs . get ( 'headers' ) ) path = self . _normalize_path ( path ) res = self . _session . request ( method , self . _webdav_url + parse . quote ( self . _encode_string ( path ) ) , ** kwargs ) if self . _debug : print ( 'DAV status: %i' % res . status_code ) if res . status_code in [ 200 , 207 ] : return self . _parse_dav_response ( res ) if res . status_code in [ 204 , 201 ] : return True raise HTTPResponseError ( res )
Makes a WebDAV request
19,938
def _parse_dav_response ( self , res ) : if res . status_code == 207 : tree = ET . fromstring ( res . content ) items = [ ] for child in tree : items . append ( self . _parse_dav_element ( child ) ) return items return False
Parses the DAV responses from a multi - status response
19,939
def _parse_dav_element ( self , dav_response ) : href = parse . unquote ( self . _strip_dav_path ( dav_response . find ( '{DAV:}href' ) . text ) ) if six . PY2 : href = href . decode ( 'utf-8' ) file_type = 'file' if href [ - 1 ] == '/' : file_type = 'dir' file_attrs = { } attrs = dav_response . find ( '{DAV:}propstat' ) attrs = attrs . find ( '{DAV:}prop' ) for attr in attrs : file_attrs [ attr . tag ] = attr . text return FileInfo ( href , file_type , file_attrs )
Parses a single DAV element
19,940
def _webdav_move_copy ( self , remote_path_source , remote_path_target , operation ) : if operation != "MOVE" and operation != "COPY" : return False if remote_path_target [ - 1 ] == '/' : remote_path_target += os . path . basename ( remote_path_source ) if not ( remote_path_target [ 0 ] == '/' ) : remote_path_target = '/' + remote_path_target remote_path_source = self . _normalize_path ( remote_path_source ) headers = { 'Destination' : self . _webdav_url + parse . quote ( self . _encode_string ( remote_path_target ) ) } return self . _make_dav_request ( operation , remote_path_source , headers = headers )
Copies or moves a remote file or directory
19,941
def _xml_to_dict ( self , element ) : return_dict = { } for el in element : return_dict [ el . tag ] = None children = el . getchildren ( ) if children : return_dict [ el . tag ] = self . _xml_to_dict ( children ) else : return_dict [ el . tag ] = el . text return return_dict
Take an XML element iterate over it and build a dict
19,942
def _get_shareinfo ( self , data_el ) : if ( data_el is None ) or not ( isinstance ( data_el , ET . Element ) ) : return None return ShareInfo ( self . _xml_to_dict ( data_el ) )
Simple helper which returns instance of ShareInfo class
19,943
def emit ( self , * args , ** kwargs ) : if self . _block : return for slot in self . _slots : if not slot : continue elif isinstance ( slot , partial ) : slot ( ) elif isinstance ( slot , weakref . WeakKeyDictionary ) : for obj , method in slot . items ( ) : method ( obj , * args , ** kwargs ) elif isinstance ( slot , weakref . ref ) : if ( slot ( ) is not None ) : slot ( ) ( * args , ** kwargs ) else : slot ( * args , ** kwargs )
Calls all the connected slots with the provided args and kwargs unless block is activated
19,944
def connect ( self , slot ) : if not callable ( slot ) : raise ValueError ( "Connection to non-callable '%s' object failed" % slot . __class__ . __name__ ) if ( isinstance ( slot , partial ) or '<' in slot . __name__ ) : if slot not in self . _slots : self . _slots . append ( slot ) elif inspect . ismethod ( slot ) : slotSelf = slot . __self__ slotDict = weakref . WeakKeyDictionary ( ) slotDict [ slotSelf ] = slot . __func__ if slotDict not in self . _slots : self . _slots . append ( slotDict ) else : newSlotRef = weakref . ref ( slot ) if newSlotRef not in self . _slots : self . _slots . append ( newSlotRef )
Connects the signal to any callable object
19,945
def disconnect ( self , slot ) : if not callable ( slot ) : return if inspect . ismethod ( slot ) : slotSelf = slot . __self__ for s in self . _slots : if isinstance ( s , weakref . WeakKeyDictionary ) and ( slotSelf in s ) and ( s [ slotSelf ] is slot . __func__ ) : self . _slots . remove ( s ) break elif isinstance ( slot , partial ) or '<' in slot . __name__ : try : self . _slots . remove ( slot ) except ValueError : pass else : try : self . _slots . remove ( weakref . ref ( slot ) ) except ValueError : pass
Disconnects the slot from the signal
19,946
def block ( self , signals = None , isBlocked = True ) : if signals : try : if isinstance ( signals , basestring ) : signals = [ signals ] except NameError : if isinstance ( signals , str ) : signals = [ signals ] signals = signals or self . keys ( ) for signal in signals : if signal not in self : raise RuntimeError ( "Could not find signal matching %s" % signal ) self [ signal ] . block ( isBlocked )
Sets the block on any provided signals or to all signals
19,947
def _open ( file_or_str , ** kwargs ) : if hasattr ( file_or_str , 'read' ) : yield file_or_str elif isinstance ( file_or_str , six . string_types ) : with open ( file_or_str , ** kwargs ) as file_desc : yield file_desc else : raise IOError ( 'Invalid file-or-str object: {}' . format ( file_or_str ) )
Either open a file handle or use an existing file - like object .
19,948
def load_delimited ( filename , converters , delimiter = r'\s+' ) : r n_columns = len ( converters ) columns = tuple ( list ( ) for _ in range ( n_columns ) ) splitter = re . compile ( delimiter ) with _open ( filename , mode = 'r' ) as input_file : for row , line in enumerate ( input_file , 1 ) : data = splitter . split ( line . strip ( ) , n_columns - 1 ) if n_columns != len ( data ) : raise ValueError ( 'Expected {} columns, got {} at ' '{}:{:d}:\n\t{}' . format ( n_columns , len ( data ) , filename , row , line ) ) for value , column , converter in zip ( data , columns , converters ) : try : converted_value = converter ( value ) except : raise ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( value , converter . __name__ , filename , row , line ) ) column . append ( converted_value ) if n_columns == 1 : return columns [ 0 ] else : return columns
r Utility function for loading in data from an annotation file where columns are delimited . The number of columns is inferred from the length of the provided converters list .
19,949
def load_events ( filename , delimiter = r'\s+' ) : r events = load_delimited ( filename , [ float ] , delimiter ) events = np . array ( events ) try : util . validate_events ( events ) except ValueError as error : warnings . warn ( error . args [ 0 ] ) return events
r Import time - stamp events from an annotation file . The file should consist of a single column of numeric values corresponding to the event times . This is primarily useful for processing events which lack duration such as beats or onsets .
19,950
def load_labeled_events ( filename , delimiter = r'\s+' ) : r events , labels = load_delimited ( filename , [ float , str ] , delimiter ) events = np . array ( events ) try : util . validate_events ( events ) except ValueError as error : warnings . warn ( error . args [ 0 ] ) return events , labels
r Import labeled time - stamp events from an annotation file . The file should consist of two columns ; the first having numeric values corresponding to the event times and the second having string labels for each event . This is primarily useful for processing labeled events which lack duration such as beats with metric beat number or onsets with an instrument label .
19,951
def load_time_series ( filename , delimiter = r'\s+' ) : r times , values = load_delimited ( filename , [ float , float ] , delimiter ) times = np . array ( times ) values = np . array ( values ) return times , values
r Import a time series from an annotation file . The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series .
19,952
def load_wav ( path , mono = True ) : fs , audio_data = scipy . io . wavfile . read ( path ) if audio_data . dtype == 'int8' : audio_data = audio_data / float ( 2 ** 8 ) elif audio_data . dtype == 'int16' : audio_data = audio_data / float ( 2 ** 16 ) elif audio_data . dtype == 'int32' : audio_data = audio_data / float ( 2 ** 24 ) else : raise ValueError ( 'Got unexpected .wav data type ' '{}' . format ( audio_data . dtype ) ) if mono and audio_data . ndim != 1 : audio_data = audio_data . mean ( axis = 1 ) return audio_data , fs
Loads a . wav file as a numpy array using scipy . io . wavfile .
19,953
def load_ragged_time_series ( filename , dtype = float , delimiter = r'\s+' , header = False ) : r times = [ ] values = [ ] splitter = re . compile ( delimiter ) if header : start_row = 1 else : start_row = 0 with _open ( filename , mode = 'r' ) as input_file : for row , line in enumerate ( input_file , start_row ) : data = splitter . split ( line . strip ( ) ) try : converted_time = float ( data [ 0 ] ) except ( TypeError , ValueError ) as exe : six . raise_from ( ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( data [ 0 ] , float . __name__ , filename , row , line ) ) , exe ) times . append ( converted_time ) try : converted_value = np . array ( data [ 1 : ] , dtype = dtype ) except ( TypeError , ValueError ) as exe : six . raise_from ( ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( data [ 1 : ] , dtype . __name__ , filename , row , line ) ) , exe ) values . append ( converted_value ) return np . array ( times ) , values
r Utility function for loading in data from a delimited time series annotation file with a variable number of columns . Assumes that column 0 contains time stamps and columns 1 through n contain values . n may be variable from time stamp to time stamp .
19,954
def pitch_class_to_semitone ( pitch_class ) : r semitone = 0 for idx , char in enumerate ( pitch_class ) : if char == '#' and idx > 0 : semitone += 1 elif char == 'b' and idx > 0 : semitone -= 1 elif idx == 0 : semitone = PITCH_CLASSES . get ( char ) else : raise InvalidChordException ( "Pitch class improperly formed: %s" % pitch_class ) return semitone % 12
r Convert a pitch class to semitone .
19,955
def scale_degree_to_semitone ( scale_degree ) : r semitone = 0 offset = 0 if scale_degree . startswith ( "#" ) : offset = scale_degree . count ( "#" ) scale_degree = scale_degree . strip ( "#" ) elif scale_degree . startswith ( 'b' ) : offset = - 1 * scale_degree . count ( "b" ) scale_degree = scale_degree . strip ( "b" ) semitone = SCALE_DEGREES . get ( scale_degree , None ) if semitone is None : raise InvalidChordException ( "Scale degree improperly formed: {}, expected one of {}." . format ( scale_degree , list ( SCALE_DEGREES . keys ( ) ) ) ) return semitone + offset
r Convert a scale degree to semitone .
19,956
def scale_degree_to_bitmap ( scale_degree , modulo = False , length = BITMAP_LENGTH ) : sign = 1 if scale_degree . startswith ( "*" ) : sign = - 1 scale_degree = scale_degree . strip ( "*" ) edit_map = [ 0 ] * length sd_idx = scale_degree_to_semitone ( scale_degree ) if sd_idx < length or modulo : edit_map [ sd_idx % length ] = sign return np . array ( edit_map )
Create a bitmap representation of a scale degree .
19,957
def quality_to_bitmap ( quality ) : if quality not in QUALITIES : raise InvalidChordException ( "Unsupported chord quality shorthand: '%s' " "Did you mean to reduce extended chords?" % quality ) return np . array ( QUALITIES [ quality ] )
Return the bitmap for a given quality .
19,958
def validate_chord_label ( chord_label ) : pattern = re . compile ( r ) if not pattern . match ( chord_label ) : raise InvalidChordException ( 'Invalid chord label: ' '{}' . format ( chord_label ) ) pass
Test for well - formedness of a chord label .
19,959
def join ( chord_root , quality = '' , extensions = None , bass = '' ) : r chord_label = chord_root if quality or extensions : chord_label += ":%s" % quality if extensions : chord_label += "(%s)" % "," . join ( extensions ) if bass and bass != '1' : chord_label += "/%s" % bass validate_chord_label ( chord_label ) return chord_label
r Join the parts of a chord into a complete chord label .
19,960
def encode ( chord_label , reduce_extended_chords = False , strict_bass_intervals = False ) : if chord_label == NO_CHORD : return NO_CHORD_ENCODED if chord_label == X_CHORD : return X_CHORD_ENCODED chord_root , quality , scale_degrees , bass = split ( chord_label , reduce_extended_chords = reduce_extended_chords ) root_number = pitch_class_to_semitone ( chord_root ) bass_number = scale_degree_to_semitone ( bass ) % 12 semitone_bitmap = quality_to_bitmap ( quality ) semitone_bitmap [ 0 ] = 1 for scale_degree in scale_degrees : semitone_bitmap += scale_degree_to_bitmap ( scale_degree , reduce_extended_chords ) semitone_bitmap = ( semitone_bitmap > 0 ) . astype ( np . int ) if not semitone_bitmap [ bass_number ] and strict_bass_intervals : raise InvalidChordException ( "Given bass scale degree is absent from this chord: " "%s" % chord_label , chord_label ) else : semitone_bitmap [ bass_number ] = 1 return root_number , semitone_bitmap , bass_number
Translate a chord label to numerical representations for evaluation .
19,961
def encode_many ( chord_labels , reduce_extended_chords = False ) : num_items = len ( chord_labels ) roots , basses = np . zeros ( [ 2 , num_items ] , dtype = np . int ) semitones = np . zeros ( [ num_items , 12 ] , dtype = np . int ) local_cache = dict ( ) for i , label in enumerate ( chord_labels ) : result = local_cache . get ( label , None ) if result is None : result = encode ( label , reduce_extended_chords ) local_cache [ label ] = result roots [ i ] , semitones [ i ] , basses [ i ] = result return roots , semitones , basses
Translate a set of chord labels to numerical representations for sane evaluation .
19,962
def rotate_bitmap_to_root ( bitmap , chord_root ) : bitmap = np . asarray ( bitmap ) assert bitmap . ndim == 1 , "Currently only 1D bitmaps are supported." idxs = list ( np . nonzero ( bitmap ) ) idxs [ - 1 ] = ( idxs [ - 1 ] + chord_root ) % 12 abs_bitmap = np . zeros_like ( bitmap ) abs_bitmap [ tuple ( idxs ) ] = 1 return abs_bitmap
Circularly shift a relative bitmap to its asbolute pitch classes .
19,963
def rotate_bitmaps_to_roots ( bitmaps , roots ) : abs_bitmaps = [ ] for bitmap , chord_root in zip ( bitmaps , roots ) : abs_bitmaps . append ( rotate_bitmap_to_root ( bitmap , chord_root ) ) return np . asarray ( abs_bitmaps )
Circularly shift a relative bitmaps to asbolute pitch classes .
19,964
def validate ( reference_labels , estimated_labels ) : N = len ( reference_labels ) M = len ( estimated_labels ) if N != M : raise ValueError ( "Chord comparison received different length lists: " "len(reference)=%d\tlen(estimates)=%d" % ( N , M ) ) for labels in [ reference_labels , estimated_labels ] : for chord_label in labels : validate_chord_label ( chord_label ) if len ( reference_labels ) == 0 : warnings . warn ( 'Reference labels are empty' ) if len ( estimated_labels ) == 0 : warnings . warn ( 'Estimated labels are empty' )
Checks that the input annotations to a comparison function look like valid chord labels .
19,965
def weighted_accuracy ( comparisons , weights ) : N = len ( comparisons ) if weights . shape [ 0 ] != N : raise ValueError ( 'weights and comparisons should be of the same' ' length. len(weights) = {} but len(comparisons)' ' = {}' . format ( weights . shape [ 0 ] , N ) ) if ( weights < 0 ) . any ( ) : raise ValueError ( 'Weights should all be positive.' ) if np . sum ( weights ) == 0 : warnings . warn ( 'No nonzero weights, returning 0' ) return 0 valid_idx = ( comparisons >= 0 ) if valid_idx . sum ( ) == 0 : warnings . warn ( "No reference chords were comparable " "to estimated chords, returning 0." ) return 0 comparisons = comparisons [ valid_idx ] weights = weights [ valid_idx ] total_weight = float ( np . sum ( weights ) ) normalized_weights = np . asarray ( weights , dtype = float ) / total_weight return np . sum ( comparisons * normalized_weights )
Compute the weighted accuracy of a list of chord comparisons .
19,966
def thirds ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) ref_roots , ref_semitones = encode_many ( reference_labels , False ) [ : 2 ] est_roots , est_semitones = encode_many ( estimated_labels , False ) [ : 2 ] eq_roots = ref_roots == est_roots eq_thirds = ref_semitones [ : , 3 ] == est_semitones [ : , 3 ] comparison_scores = ( eq_roots * eq_thirds ) . astype ( np . float ) comparison_scores [ np . any ( ref_semitones < 0 , axis = 1 ) ] = - 1.0 return comparison_scores
Compare chords along root & third relationships .
19,967
def thirds_inv ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) ref_roots , ref_semitones , ref_bass = encode_many ( reference_labels , False ) est_roots , est_semitones , est_bass = encode_many ( estimated_labels , False ) eq_root = ref_roots == est_roots eq_bass = ref_bass == est_bass eq_third = ref_semitones [ : , 3 ] == est_semitones [ : , 3 ] comparison_scores = ( eq_root * eq_third * eq_bass ) . astype ( np . float ) comparison_scores [ np . any ( ref_semitones < 0 , axis = 1 ) ] = - 1.0 return comparison_scores
Score chords along root third & bass relationships .
19,968
def root ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) ref_roots , ref_semitones = encode_many ( reference_labels , False ) [ : 2 ] est_roots = encode_many ( estimated_labels , False ) [ 0 ] comparison_scores = ( ref_roots == est_roots ) . astype ( np . float ) comparison_scores [ np . any ( ref_semitones < 0 , axis = 1 ) ] = - 1.0 return comparison_scores
Compare chords according to roots .
19,969
def mirex ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) min_intersection = 3 ref_data = encode_many ( reference_labels , False ) ref_chroma = rotate_bitmaps_to_roots ( ref_data [ 1 ] , ref_data [ 0 ] ) est_data = encode_many ( estimated_labels , False ) est_chroma = rotate_bitmaps_to_roots ( est_data [ 1 ] , est_data [ 0 ] ) eq_chroma = ( ref_chroma * est_chroma ) . sum ( axis = - 1 ) comparison_scores = ( eq_chroma >= min_intersection ) . astype ( np . float ) no_root = np . logical_and ( ref_data [ 0 ] == - 1 , est_data [ 0 ] == - 1 ) comparison_scores [ no_root ] = 1.0 ref_semitone_count = ( ref_data [ 1 ] > 0 ) . sum ( axis = 1 ) skip_idx = np . logical_and ( ref_semitone_count > 0 , ref_semitone_count < min_intersection ) np . logical_or ( skip_idx , np . any ( ref_data [ 1 ] < 0 , axis = 1 ) , skip_idx ) comparison_scores [ skip_idx ] = - 1.0 return comparison_scores
Compare chords along MIREX rules .
19,970
def seg ( reference_intervals , estimated_intervals ) : return min ( underseg ( reference_intervals , estimated_intervals ) , overseg ( reference_intervals , estimated_intervals ) )
Compute the MIREX MeanSeg score .
19,971
def merge_chord_intervals ( intervals , labels ) : roots , semitones , basses = encode_many ( labels , True ) merged_ivs = [ ] prev_rt = None prev_st = None prev_ba = None for s , e , rt , st , ba in zip ( intervals [ : , 0 ] , intervals [ : , 1 ] , roots , semitones , basses ) : if rt != prev_rt or ( st != prev_st ) . any ( ) or ba != prev_ba : prev_rt , prev_st , prev_ba = rt , st , ba merged_ivs . append ( [ s , e ] ) else : merged_ivs [ - 1 ] [ - 1 ] = e return np . array ( merged_ivs )
Merge consecutive chord intervals if they represent the same chord .
19,972
def evaluate ( ref_intervals , ref_labels , est_intervals , est_labels , ** kwargs ) : est_intervals , est_labels = util . adjust_intervals ( est_intervals , est_labels , ref_intervals . min ( ) , ref_intervals . max ( ) , NO_CHORD , NO_CHORD ) merged_ref_intervals = merge_chord_intervals ( ref_intervals , ref_labels ) merged_est_intervals = merge_chord_intervals ( est_intervals , est_labels ) intervals , ref_labels , est_labels = util . merge_labeled_intervals ( ref_intervals , ref_labels , est_intervals , est_labels ) durations = util . intervals_to_durations ( intervals ) scores = collections . OrderedDict ( ) scores [ 'thirds' ] = weighted_accuracy ( thirds ( ref_labels , est_labels ) , durations ) scores [ 'thirds_inv' ] = weighted_accuracy ( thirds_inv ( ref_labels , est_labels ) , durations ) scores [ 'triads' ] = weighted_accuracy ( triads ( ref_labels , est_labels ) , durations ) scores [ 'triads_inv' ] = weighted_accuracy ( triads_inv ( ref_labels , est_labels ) , durations ) scores [ 'tetrads' ] = weighted_accuracy ( tetrads ( ref_labels , est_labels ) , durations ) scores [ 'tetrads_inv' ] = weighted_accuracy ( tetrads_inv ( ref_labels , est_labels ) , durations ) scores [ 'root' ] = weighted_accuracy ( root ( ref_labels , est_labels ) , durations ) scores [ 'mirex' ] = weighted_accuracy ( mirex ( ref_labels , est_labels ) , durations ) scores [ 'majmin' ] = weighted_accuracy ( majmin ( ref_labels , est_labels ) , durations ) scores [ 'majmin_inv' ] = weighted_accuracy ( majmin_inv ( ref_labels , est_labels ) , durations ) scores [ 'sevenths' ] = weighted_accuracy ( sevenths ( ref_labels , est_labels ) , durations ) scores [ 'sevenths_inv' ] = weighted_accuracy ( sevenths_inv ( ref_labels , est_labels ) , durations ) scores [ 'underseg' ] = underseg ( merged_ref_intervals , merged_est_intervals ) scores [ 'overseg' ] = overseg ( merged_ref_intervals , merged_est_intervals ) scores [ 'seg' ] = min ( scores [ 'overseg' ] , scores [ 'underseg' ] ) return scores
Computes weighted accuracy for all comparison functions for the given reference and estimated annotations .
19,973
def _n_onset_midi ( patterns ) : return len ( [ o_m for pat in patterns for occ in pat for o_m in occ ] )
Computes the number of onset_midi objects in a pattern
19,974
def validate ( reference_patterns , estimated_patterns ) : if _n_onset_midi ( reference_patterns ) == 0 : warnings . warn ( 'Reference patterns are empty.' ) if _n_onset_midi ( estimated_patterns ) == 0 : warnings . warn ( 'Estimated patterns are empty.' ) for patterns in [ reference_patterns , estimated_patterns ] : for pattern in patterns : if len ( pattern ) <= 0 : raise ValueError ( "Each pattern must contain at least one " "occurrence." ) for occurrence in pattern : for onset_midi in occurrence : if len ( onset_midi ) != 2 : raise ValueError ( "The (onset, midi) tuple must " "contain exactly 2 elements." )
Checks that the input annotations to a metric look like valid pattern lists and throws helpful errors if not .
19,975
def _occurrence_intersection ( occ_P , occ_Q ) : set_P = set ( [ tuple ( onset_midi ) for onset_midi in occ_P ] ) set_Q = set ( [ tuple ( onset_midi ) for onset_midi in occ_Q ] ) return set_P & set_Q
Computes the intersection between two occurrences .
19,976
def _compute_score_matrix ( P , Q , similarity_metric = "cardinality_score" ) : sm = np . zeros ( ( len ( P ) , len ( Q ) ) ) for iP , occ_P in enumerate ( P ) : for iQ , occ_Q in enumerate ( Q ) : if similarity_metric == "cardinality_score" : denom = float ( np . max ( [ len ( occ_P ) , len ( occ_Q ) ] ) ) sm [ iP , iQ ] = len ( _occurrence_intersection ( occ_P , occ_Q ) ) / denom else : raise ValueError ( "The similarity metric (%s) can only be: " "'cardinality_score'." ) return sm
Computes the score matrix between the patterns P and Q .
19,977
def standard_FPR ( reference_patterns , estimated_patterns , tol = 1e-5 ) : validate ( reference_patterns , estimated_patterns ) nP = len ( reference_patterns ) nQ = len ( estimated_patterns ) k = 0 if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. for ref_pattern in reference_patterns : P = np . asarray ( ref_pattern [ 0 ] ) for est_pattern in estimated_patterns : Q = np . asarray ( est_pattern [ 0 ] ) if len ( P ) != len ( Q ) : continue if ( len ( P ) == len ( Q ) == 1 or np . max ( np . abs ( np . diff ( P - Q , axis = 0 ) ) ) < tol ) : k += 1 break precision = k / float ( nQ ) recall = k / float ( nP ) f_measure = util . f_measure ( precision , recall ) return f_measure , precision , recall
Standard F1 Score Precision and Recall .
19,978
def three_layer_FPR ( reference_patterns , estimated_patterns ) : validate ( reference_patterns , estimated_patterns ) def compute_first_layer_PR ( ref_occs , est_occs ) : s = len ( _occurrence_intersection ( ref_occs , est_occs ) ) precision = s / float ( len ( ref_occs ) ) recall = s / float ( len ( est_occs ) ) return precision , recall def compute_second_layer_PR ( ref_pattern , est_pattern ) : F_1 = compute_layer ( ref_pattern , est_pattern ) precision = np . mean ( np . max ( F_1 , axis = 0 ) ) recall = np . mean ( np . max ( F_1 , axis = 1 ) ) return precision , recall def compute_layer ( ref_elements , est_elements , layer = 1 ) : if layer != 1 and layer != 2 : raise ValueError ( "Layer (%d) must be an integer between 1 and 2" % layer ) nP = len ( ref_elements ) nQ = len ( est_elements ) F = np . zeros ( ( nP , nQ ) ) for iP in range ( nP ) : for iQ in range ( nQ ) : if layer == 1 : func = compute_first_layer_PR elif layer == 2 : func = compute_second_layer_PR precision , recall = func ( ref_elements [ iP ] , est_elements [ iQ ] ) F [ iP , iQ ] = util . f_measure ( precision , recall ) return F if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. F_2 = compute_layer ( reference_patterns , estimated_patterns , layer = 2 ) precision_3 = np . mean ( np . max ( F_2 , axis = 0 ) ) recall_3 = np . mean ( np . max ( F_2 , axis = 1 ) ) f_measure_3 = util . f_measure ( precision_3 , recall_3 ) return f_measure_3 , precision_3 , recall_3
Three Layer F1 Score Precision and Recall . As described by Meridith .
19,979
def first_n_three_layer_P ( reference_patterns , estimated_patterns , n = 5 ) : validate ( reference_patterns , estimated_patterns ) if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. fn_est_patterns = estimated_patterns [ : min ( len ( estimated_patterns ) , n ) ] F , P , R = three_layer_FPR ( reference_patterns , fn_est_patterns ) return P
First n three - layer precision .
19,980
def first_n_target_proportion_R ( reference_patterns , estimated_patterns , n = 5 ) : validate ( reference_patterns , estimated_patterns ) if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. fn_est_patterns = estimated_patterns [ : min ( len ( estimated_patterns ) , n ) ] F , P , R = establishment_FPR ( reference_patterns , fn_est_patterns ) return R
First n target proportion establishment recall metric .
19,981
def evaluate ( ref_patterns , est_patterns , ** kwargs ) : scores = collections . OrderedDict ( ) scores [ 'F' ] , scores [ 'P' ] , scores [ 'R' ] = util . filter_kwargs ( standard_FPR , ref_patterns , est_patterns , ** kwargs ) scores [ 'F_est' ] , scores [ 'P_est' ] , scores [ 'R_est' ] = util . filter_kwargs ( establishment_FPR , ref_patterns , est_patterns , ** kwargs ) kwargs [ 'thresh' ] = .5 scores [ 'F_occ.5' ] , scores [ 'P_occ.5' ] , scores [ 'R_occ.5' ] = util . filter_kwargs ( occurrence_FPR , ref_patterns , est_patterns , ** kwargs ) kwargs [ 'thresh' ] = .75 scores [ 'F_occ.75' ] , scores [ 'P_occ.75' ] , scores [ 'R_occ.75' ] = util . filter_kwargs ( occurrence_FPR , ref_patterns , est_patterns , ** kwargs ) scores [ 'F_3' ] , scores [ 'P_3' ] , scores [ 'R_3' ] = util . filter_kwargs ( three_layer_FPR , ref_patterns , est_patterns , ** kwargs ) if 'n' not in kwargs : kwargs [ 'n' ] = 5 scores [ 'FFP' ] = util . filter_kwargs ( first_n_three_layer_P , ref_patterns , est_patterns , ** kwargs ) scores [ 'FFTP_est' ] = util . filter_kwargs ( first_n_target_proportion_R , ref_patterns , est_patterns , ** kwargs ) return scores
Load data and perform the evaluation .
19,982
def validate ( ref_intervals , ref_pitches , ref_velocities , est_intervals , est_pitches , est_velocities ) : transcription . validate ( ref_intervals , ref_pitches , est_intervals , est_pitches ) if not ref_velocities . shape [ 0 ] == ref_pitches . shape [ 0 ] : raise ValueError ( 'Reference velocities must have the same length as ' 'pitches and intervals.' ) if not est_velocities . shape [ 0 ] == est_pitches . shape [ 0 ] : raise ValueError ( 'Estimated velocities must have the same length as ' 'pitches and intervals.' ) if ref_velocities . size > 0 and np . min ( ref_velocities ) < 0 : raise ValueError ( 'Reference velocities must be positive.' ) if est_velocities . size > 0 and np . min ( est_velocities ) < 0 : raise ValueError ( 'Estimated velocities must be positive.' )
Checks that the input annotations have valid time intervals pitches and velocities and throws helpful errors if not .
19,983
def match_notes ( ref_intervals , ref_pitches , ref_velocities , est_intervals , est_pitches , est_velocities , onset_tolerance = 0.05 , pitch_tolerance = 50.0 , offset_ratio = 0.2 , offset_min_tolerance = 0.05 , strict = False , velocity_tolerance = 0.1 ) : matching = transcription . match_notes ( ref_intervals , ref_pitches , est_intervals , est_pitches , onset_tolerance , pitch_tolerance , offset_ratio , offset_min_tolerance , strict ) min_velocity , max_velocity = np . min ( ref_velocities ) , np . max ( ref_velocities ) velocity_range = max ( 1 , max_velocity - min_velocity ) ref_velocities = ( ref_velocities - min_velocity ) / float ( velocity_range ) matching = np . array ( matching ) if matching . size == 0 : return [ ] ref_matched_velocities = ref_velocities [ matching [ : , 0 ] ] est_matched_velocities = est_velocities [ matching [ : , 1 ] ] slope , intercept = np . linalg . lstsq ( np . vstack ( [ est_matched_velocities , np . ones ( len ( est_matched_velocities ) ) ] ) . T , ref_matched_velocities ) [ 0 ] est_matched_velocities = slope * est_matched_velocities + intercept velocity_diff = np . abs ( est_matched_velocities - ref_matched_velocities ) velocity_within_tolerance = ( velocity_diff < velocity_tolerance ) matching = matching [ velocity_within_tolerance ] matching = [ tuple ( _ ) for _ in matching ] return matching
Match notes taking note velocity into consideration .
19,984
def validate ( reference_beats , estimated_beats ) : if reference_beats . size == 0 : warnings . warn ( "Reference beats are empty." ) if estimated_beats . size == 0 : warnings . warn ( "Estimated beats are empty." ) for beats in [ reference_beats , estimated_beats ] : util . validate_events ( beats , MAX_TIME )
Checks that the input annotations to a metric look like valid beat time arrays and throws helpful errors if not .
19,985
def _get_reference_beat_variations ( reference_beats ) : interpolated_indices = np . arange ( 0 , reference_beats . shape [ 0 ] - .5 , .5 ) original_indices = np . arange ( 0 , reference_beats . shape [ 0 ] ) double_reference_beats = np . interp ( interpolated_indices , original_indices , reference_beats ) return ( reference_beats , double_reference_beats [ 1 : : 2 ] , double_reference_beats , reference_beats [ : : 2 ] , reference_beats [ 1 : : 2 ] )
Return metric variations of the reference beats
19,986
def f_measure ( reference_beats , estimated_beats , f_measure_threshold = 0.07 ) : validate ( reference_beats , estimated_beats ) if estimated_beats . size == 0 or reference_beats . size == 0 : return 0. matching = util . match_events ( reference_beats , estimated_beats , f_measure_threshold ) precision = float ( len ( matching ) ) / len ( estimated_beats ) recall = float ( len ( matching ) ) / len ( reference_beats ) return util . f_measure ( precision , recall )
Compute the F - measure of correct vs incorrectly predicted beats . Correctness is determined over a small window .
19,987
def cemgil ( reference_beats , estimated_beats , cemgil_sigma = 0.04 ) : validate ( reference_beats , estimated_beats ) if estimated_beats . size == 0 or reference_beats . size == 0 : return 0. , 0. accuracies = [ ] for reference_beats in _get_reference_beat_variations ( reference_beats ) : accuracy = 0 for beat in reference_beats : beat_diff = np . min ( np . abs ( beat - estimated_beats ) ) accuracy += np . exp ( - ( beat_diff ** 2 ) / ( 2.0 * cemgil_sigma ** 2 ) ) accuracy /= .5 * ( estimated_beats . shape [ 0 ] + reference_beats . shape [ 0 ] ) accuracies . append ( accuracy ) return accuracies [ 0 ] , np . max ( accuracies )
Cemgil s score computes a gaussian error of each estimated beat . Compares against the original beat times and all metrical variations .
19,988
def goto ( reference_beats , estimated_beats , goto_threshold = 0.35 , goto_mu = 0.2 , goto_sigma = 0.2 ) : validate ( reference_beats , estimated_beats ) if estimated_beats . size == 0 or reference_beats . size == 0 : return 0. beat_error = np . ones ( reference_beats . shape [ 0 ] ) paired = np . zeros ( reference_beats . shape [ 0 ] ) goto_criteria = 0 for n in range ( 1 , reference_beats . shape [ 0 ] - 1 ) : previous_interval = 0.5 * ( reference_beats [ n ] - reference_beats [ n - 1 ] ) window_min = reference_beats [ n ] - previous_interval next_interval = 0.5 * ( reference_beats [ n + 1 ] - reference_beats [ n ] ) window_max = reference_beats [ n ] + next_interval beats_in_window = np . logical_and ( ( estimated_beats >= window_min ) , ( estimated_beats < window_max ) ) if beats_in_window . sum ( ) == 0 or beats_in_window . sum ( ) > 1 : paired [ n ] = 0 beat_error [ n ] = 1 else : paired [ n ] = 1 offset = estimated_beats [ beats_in_window ] - reference_beats [ n ] if offset < 0 : beat_error [ n ] = offset / previous_interval else : beat_error [ n ] = offset / next_interval incorrect_beats = np . flatnonzero ( np . abs ( beat_error ) > goto_threshold ) if incorrect_beats . shape [ 0 ] < 3 : track = beat_error [ incorrect_beats [ 0 ] + 1 : incorrect_beats [ - 1 ] - 1 ] goto_criteria = 1 else : track_len = np . max ( np . diff ( incorrect_beats ) ) track_start = np . flatnonzero ( np . diff ( incorrect_beats ) == track_len ) [ 0 ] if track_len - 1 > .25 * ( reference_beats . shape [ 0 ] - 2 ) : goto_criteria = 1 start_beat = incorrect_beats [ track_start ] end_beat = incorrect_beats [ track_start + 1 ] track = beat_error [ start_beat : end_beat + 1 ] if goto_criteria : if np . mean ( np . abs ( track ) ) < goto_mu and np . std ( track , ddof = 1 ) < goto_sigma : goto_criteria = 3 return 1.0 * ( goto_criteria == 3 )
Calculate Goto s score a binary 1 or 0 depending on some specific heuristic criteria
19,989
def p_score ( reference_beats , estimated_beats , p_score_threshold = 0.2 ) : validate ( reference_beats , estimated_beats ) if reference_beats . size == 1 : warnings . warn ( "Only one reference beat was provided, so beat intervals" " cannot be computed." ) if estimated_beats . size == 1 : warnings . warn ( "Only one estimated beat was provided, so beat intervals" " cannot be computed." ) if estimated_beats . size <= 1 or reference_beats . size <= 1 : return 0. sampling_rate = int ( 1.0 / 0.010 ) offset = min ( estimated_beats . min ( ) , reference_beats . min ( ) ) estimated_beats = np . array ( estimated_beats - offset ) reference_beats = np . array ( reference_beats - offset ) end_point = np . int ( np . ceil ( np . max ( [ np . max ( estimated_beats ) , np . max ( reference_beats ) ] ) ) ) reference_train = np . zeros ( end_point * sampling_rate + 1 ) beat_indices = np . ceil ( reference_beats * sampling_rate ) . astype ( np . int ) reference_train [ beat_indices ] = 1.0 estimated_train = np . zeros ( end_point * sampling_rate + 1 ) beat_indices = np . ceil ( estimated_beats * sampling_rate ) . astype ( np . int ) estimated_train [ beat_indices ] = 1.0 annotation_intervals = np . diff ( np . flatnonzero ( reference_train ) ) win_size = int ( np . round ( p_score_threshold * np . median ( annotation_intervals ) ) ) train_correlation = np . correlate ( reference_train , estimated_train , 'full' ) middle_lag = train_correlation . shape [ 0 ] // 2 start = middle_lag - win_size end = middle_lag + win_size + 1 train_correlation = train_correlation [ start : end ] n_beats = np . max ( [ estimated_beats . shape [ 0 ] , reference_beats . shape [ 0 ] ] ) return np . sum ( train_correlation ) / n_beats
Get McKinney s P - score . Based on the autocorrelation of the reference and estimated beats
19,990
def information_gain ( reference_beats , estimated_beats , bins = 41 ) : validate ( reference_beats , estimated_beats ) if not bins % 2 : warnings . warn ( "bins parameter is even, " "so there will not be a bin centered at zero." ) if reference_beats . size == 1 : warnings . warn ( "Only one reference beat was provided, so beat intervals" " cannot be computed." ) if estimated_beats . size == 1 : warnings . warn ( "Only one estimated beat was provided, so beat intervals" " cannot be computed." ) if estimated_beats . size <= 1 or reference_beats . size <= 1 : return 0. forward_entropy = _get_entropy ( reference_beats , estimated_beats , bins ) backward_entropy = _get_entropy ( estimated_beats , reference_beats , bins ) norm = np . log2 ( bins ) if forward_entropy > backward_entropy : information_gain_score = ( norm - forward_entropy ) / norm else : information_gain_score = ( norm - backward_entropy ) / norm return information_gain_score
Get the information gain - K - L divergence of the beat error histogram to a uniform histogram
19,991
def index_labels ( labels , case_sensitive = False ) : label_to_index = { } index_to_label = { } if not case_sensitive : labels = [ str ( s ) . lower ( ) for s in labels ] for index , s in enumerate ( sorted ( set ( labels ) ) ) : label_to_index [ s ] = index index_to_label [ index ] = s indices = [ label_to_index [ s ] for s in labels ] return indices , index_to_label
Convert a list of string identifiers into numerical indices .
19,992
def intervals_to_samples ( intervals , labels , offset = 0 , sample_size = 0.1 , fill_value = None ) : num_samples = int ( np . floor ( intervals . max ( ) / sample_size ) ) sample_indices = np . arange ( num_samples , dtype = np . float32 ) sample_times = ( sample_indices * sample_size + offset ) . tolist ( ) sampled_labels = interpolate_intervals ( intervals , labels , sample_times , fill_value ) return sample_times , sampled_labels
Convert an array of labeled time intervals to annotated samples .
19,993
def interpolate_intervals ( intervals , labels , time_points , fill_value = None ) : time_points = np . asarray ( time_points ) if np . any ( time_points [ 1 : ] < time_points [ : - 1 ] ) : raise ValueError ( 'time_points must be in non-decreasing order' ) aligned_labels = [ fill_value ] * len ( time_points ) starts = np . searchsorted ( time_points , intervals [ : , 0 ] , side = 'left' ) ends = np . searchsorted ( time_points , intervals [ : , 1 ] , side = 'right' ) for ( start , end , lab ) in zip ( starts , ends , labels ) : aligned_labels [ start : end ] = [ lab ] * ( end - start ) return aligned_labels
Assign labels to a set of points in time given a set of intervals .
19,994
def sort_labeled_intervals ( intervals , labels = None ) : idx = np . argsort ( intervals [ : , 0 ] ) intervals_sorted = intervals [ idx ] if labels is None : return intervals_sorted else : return intervals_sorted , [ labels [ _ ] for _ in idx ]
Sort intervals and optionally their corresponding labels according to start time .
19,995
def f_measure ( precision , recall , beta = 1.0 ) : if precision == 0 and recall == 0 : return 0.0 return ( 1 + beta ** 2 ) * precision * recall / ( ( beta ** 2 ) * precision + recall )
Compute the f - measure from precision and recall scores .
19,996
def intervals_to_boundaries ( intervals , q = 5 ) : return np . unique ( np . ravel ( np . round ( intervals , decimals = q ) ) )
Convert interval times into boundaries .
19,997
def boundaries_to_intervals ( boundaries ) : if not np . allclose ( boundaries , np . unique ( boundaries ) ) : raise ValueError ( 'Boundary times are not unique or not ascending.' ) intervals = np . asarray ( list ( zip ( boundaries [ : - 1 ] , boundaries [ 1 : ] ) ) ) return intervals
Convert an array of event times into intervals
19,998
def merge_labeled_intervals ( x_intervals , x_labels , y_intervals , y_labels ) : r align_check = [ x_intervals [ 0 , 0 ] == y_intervals [ 0 , 0 ] , x_intervals [ - 1 , 1 ] == y_intervals [ - 1 , 1 ] ] if False in align_check : raise ValueError ( "Time intervals do not align; did you mean to call " "'adjust_intervals()' first?" ) time_boundaries = np . unique ( np . concatenate ( [ x_intervals , y_intervals ] , axis = 0 ) ) output_intervals = np . array ( [ time_boundaries [ : - 1 ] , time_boundaries [ 1 : ] ] ) . T x_labels_out , y_labels_out = [ ] , [ ] x_label_range = np . arange ( len ( x_labels ) ) y_label_range = np . arange ( len ( y_labels ) ) for t0 , _ in output_intervals : x_idx = x_label_range [ ( t0 >= x_intervals [ : , 0 ] ) ] x_labels_out . append ( x_labels [ x_idx [ - 1 ] ] ) y_idx = y_label_range [ ( t0 >= y_intervals [ : , 0 ] ) ] y_labels_out . append ( y_labels [ y_idx [ - 1 ] ] ) return output_intervals , x_labels_out , y_labels_out
r Merge the time intervals of two sequences .
19,999
def match_events ( ref , est , window , distance = None ) : if distance is not None : hits = np . where ( distance ( ref , est ) <= window ) else : hits = _fast_hit_windows ( ref , est , window ) G = { } for ref_i , est_i in zip ( * hits ) : if est_i not in G : G [ est_i ] = [ ] G [ est_i ] . append ( ref_i ) matching = sorted ( _bipartite_match ( G ) . items ( ) ) return matching
Compute a maximum matching between reference and estimated event times subject to a window constraint .