idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
19,800
def expand ( template , variables = None ) : if variables is None : variables = { } return patterns . sub ( functools . partial ( _replace , variables ) , template )
Expand a URL template string using the passed variables
38
10
19,801
def _format_pair_no_equals ( explode , separator , escape , key , value ) : if not value : return key return _format_pair ( explode , separator , escape , key , value )
Format a key value pair but don t include the equals sign when there is no value
46
17
19,802
def _format_pair_with_equals ( explode , separator , escape , key , value ) : if not value : return key + '=' return _format_pair ( explode , separator , escape , key , value )
Format a key value pair including the equals sign when there is no value
49
14
19,803
def _replace ( variables , match ) : expression = match . group ( 1 ) # Look-up chars and functions for the specified operator ( prefix_char , separator_char , split_fn , escape_fn , format_fn ) = operator_map . get ( expression [ 0 ] , defaults ) replacements = [ ] for key , modify_fn , explode in split_fn ( expression ) : if key in variables : variable = modify_fn ( variables [ key ] ) replacement = format_fn ( explode , separator_char , escape_fn , key , variable ) replacements . append ( replacement ) if not replacements : return '' return prefix_char + separator_char . join ( replacements )
Return the appropriate replacement for match using the passed variables
147
10
19,804
def predict ( self , document_path : str , model_name : str , consent_id : str = None ) -> Prediction : content_type = self . _get_content_type ( document_path ) consent_id = consent_id or str ( uuid4 ( ) ) document_id = self . _upload_document ( document_path , content_type , consent_id ) prediction_response = self . post_predictions ( document_id , model_name ) return Prediction ( document_id , consent_id , model_name , prediction_response )
Run inference and create prediction on document . This method takes care of creating and uploading a document specified by document_path . as well as running inference using model specified by model_name to create prediction on the document .
122
43
19,805
def send_feedback ( self , document_id : str , feedback : List [ Field ] ) -> dict : return self . post_document_id ( document_id , feedback )
Send feedback to the model . This method takes care of sending feedback related to document specified by document_id . Feedback consists of ground truth values for the document specified as a list of Field instances .
39
39
19,806
def extra_what ( file , h = None ) : tests = [ ] def test_pdf ( h , f ) : if b'PDF' in h [ 0 : 10 ] : return 'pdf' tests . append ( test_pdf ) f = None try : if h is None : if isinstance ( file , ( str , PathLike ) ) : f = open ( file , 'rb' ) h = f . read ( 32 ) else : location = file . tell ( ) h = file . read ( 32 ) file . seek ( location ) for tf in tests : res = tf ( h , f ) if res : return res finally : if f : f . close ( ) return None
Code mostly copied from imghdr . what
146
9
19,807
def put_document ( document_path : str , content_type : str , presigned_url : str ) -> str : body = pathlib . Path ( document_path ) . read_bytes ( ) headers = { 'Content-Type' : content_type } put_document_response = requests . put ( presigned_url , data = body , headers = headers ) put_document_response . raise_for_status ( ) return put_document_response . content . decode ( )
Convenience method for putting a document to presigned url .
105
13
19,808
def get_expiration ( self ) : exp = self . _get_int ( 'expiration' ) if exp is not None : return datetime . datetime . fromtimestamp ( exp ) return None
Returns the expiration date .
44
5
19,809
def login ( self , user_id , password ) : self . _session = requests . session ( ) self . _session . verify = self . _verify_certs self . _session . auth = ( user_id , password ) try : self . _update_capabilities ( ) url_components = parse . urlparse ( self . url ) if self . _dav_endpoint_version == 1 : self . _davpath = url_components . path + 'remote.php/dav/files/' + parse . quote ( user_id ) self . _webdav_url = self . url + 'remote.php/dav/files/' + parse . quote ( user_id ) else : self . _davpath = url_components . path + 'remote.php/webdav' self . _webdav_url = self . url + 'remote.php/webdav' except HTTPResponseError as e : self . _session . close ( ) self . _session = None raise e
Authenticate to ownCloud . This will create a session on the server .
230
15
19,810
def file_info ( self , path ) : res = self . _make_dav_request ( 'PROPFIND' , path , headers = { 'Depth' : '0' } ) if res : return res [ 0 ] return None
Returns the file info for the given remote file
53
9
19,811
def get_file_contents ( self , path ) : path = self . _normalize_path ( path ) res = self . _session . get ( self . _webdav_url + parse . quote ( self . _encode_string ( path ) ) ) if res . status_code == 200 : return res . content elif res . status_code >= 400 : raise HTTPResponseError ( res ) return False
Returns the contents of a remote file
94
7
19,812
def get_directory_as_zip ( self , remote_path , local_file ) : remote_path = self . _normalize_path ( remote_path ) url = self . url + 'index.php/apps/files/ajax/download.php?dir=' + parse . quote ( remote_path ) res = self . _session . get ( url , stream = True ) if res . status_code == 200 : if local_file is None : # use downloaded file name from Content-Disposition # targetFile = res.headers['content-disposition'] local_file = os . path . basename ( remote_path ) file_handle = open ( local_file , 'wb' , 8192 ) for chunk in res . iter_content ( 8192 ) : file_handle . write ( chunk ) file_handle . close ( ) return True elif res . status_code >= 400 : raise HTTPResponseError ( res ) return False
Downloads a remote directory as zip
207
7
19,813
def put_directory ( self , target_path , local_directory , * * kwargs ) : target_path = self . _normalize_path ( target_path ) if not target_path . endswith ( '/' ) : target_path += '/' gathered_files = [ ] if not local_directory . endswith ( '/' ) : local_directory += '/' basedir = os . path . basename ( local_directory [ 0 : - 1 ] ) + '/' # gather files to upload for path , _ , files in os . walk ( local_directory ) : gathered_files . append ( ( path , basedir + path [ len ( local_directory ) : ] , files ) ) for path , remote_path , files in gathered_files : self . mkdir ( target_path + remote_path + '/' ) for name in files : if not self . put_file ( target_path + remote_path + '/' , path + '/' + name , * * kwargs ) : return False return True
Upload a directory with all its contents
225
7
19,814
def _put_file_chunked ( self , remote_path , local_source_file , * * kwargs ) : chunk_size = kwargs . get ( 'chunk_size' , 10 * 1024 * 1024 ) result = True transfer_id = int ( time . time ( ) ) remote_path = self . _normalize_path ( remote_path ) if remote_path . endswith ( '/' ) : remote_path += os . path . basename ( local_source_file ) stat_result = os . stat ( local_source_file ) file_handle = open ( local_source_file , 'rb' , 8192 ) file_handle . seek ( 0 , os . SEEK_END ) size = file_handle . tell ( ) file_handle . seek ( 0 ) headers = { } if kwargs . get ( 'keep_mtime' , True ) : headers [ 'X-OC-MTIME' ] = str ( int ( stat_result . st_mtime ) ) if size == 0 : return self . _make_dav_request ( 'PUT' , remote_path , data = '' , headers = headers ) chunk_count = int ( math . ceil ( float ( size ) / float ( chunk_size ) ) ) if chunk_count > 1 : headers [ 'OC-CHUNKED' ] = '1' for chunk_index in range ( 0 , int ( chunk_count ) ) : data = file_handle . read ( chunk_size ) if chunk_count > 1 : chunk_name = '%s-chunking-%s-%i-%i' % ( remote_path , transfer_id , chunk_count , chunk_index ) else : chunk_name = remote_path if not self . _make_dav_request ( 'PUT' , chunk_name , data = data , headers = headers ) : result = False break file_handle . close ( ) return result
Uploads a file using chunks . If the file is smaller than chunk_size it will be uploaded directly .
429
22
19,815
def list_open_remote_share ( self ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_SHARE , 'remote_shares/pending' ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) shares = [ ] for element in tree . find ( 'data' ) . iter ( 'element' ) : share_attr = { } for child in element : key = child . tag value = child . text share_attr [ key ] = value shares . append ( share_attr ) return shares raise HTTPResponseError ( res )
List all pending remote shares
152
5
19,816
def accept_remote_share ( self , share_id ) : if not isinstance ( share_id , int ) : return False res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'remote_shares/pending/' + str ( share_id ) ) if res . status_code == 200 : return res raise HTTPResponseError ( res )
Accepts a remote share
93
5
19,817
def update_share ( self , share_id , * * kwargs ) : perms = kwargs . get ( 'perms' , None ) password = kwargs . get ( 'password' , None ) public_upload = kwargs . get ( 'public_upload' , None ) if ( isinstance ( perms , int ) ) and ( perms > self . OCS_PERMISSION_ALL ) : perms = None if not ( perms or password or ( public_upload is not None ) ) : return False if not isinstance ( share_id , int ) : return False data = { } if perms : data [ 'permissions' ] = perms if isinstance ( password , six . string_types ) : data [ 'password' ] = password if ( public_upload is not None ) and ( isinstance ( public_upload , bool ) ) : data [ 'publicUpload' ] = str ( public_upload ) . lower ( ) res = self . _make_ocs_request ( 'PUT' , self . OCS_SERVICE_SHARE , 'shares/' + str ( share_id ) , data = data ) if res . status_code == 200 : return True raise HTTPResponseError ( res )
Updates a given share
275
5
19,818
def share_file_with_link ( self , path , * * kwargs ) : perms = kwargs . get ( 'perms' , None ) public_upload = kwargs . get ( 'public_upload' , 'false' ) password = kwargs . get ( 'password' , None ) path = self . _normalize_path ( path ) post_data = { 'shareType' : self . OCS_SHARE_TYPE_LINK , 'path' : self . _encode_string ( path ) , } if ( public_upload is not None ) and ( isinstance ( public_upload , bool ) ) : post_data [ 'publicUpload' ] = str ( public_upload ) . lower ( ) if isinstance ( password , six . string_types ) : post_data [ 'password' ] = password if perms : post_data [ 'permissions' ] = perms res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'shares' , data = post_data ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_el = tree . find ( 'data' ) return ShareInfo ( { 'id' : data_el . find ( 'id' ) . text , 'path' : path , 'url' : data_el . find ( 'url' ) . text , 'token' : data_el . find ( 'token' ) . text } ) raise HTTPResponseError ( res )
Shares a remote file with link
355
6
19,819
def is_shared ( self , path ) : # make sure that the path exist - if not, raise HTTPResponseError self . file_info ( path ) try : result = self . get_shares ( path ) if result : return len ( result ) > 0 except OCSResponseError as e : if e . status_code != 404 : raise e return False return False
Checks whether a path is already shared
82
8
19,820
def get_share ( self , share_id ) : if ( share_id is None ) or not ( isinstance ( share_id , int ) ) : return None res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_SHARE , 'shares/' + str ( share_id ) ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) return self . _get_shareinfo ( tree . find ( 'data' ) . find ( 'element' ) ) raise HTTPResponseError ( res )
Returns share information about known share
142
6
19,821
def get_shares ( self , path = '' , * * kwargs ) : if not ( isinstance ( path , six . string_types ) ) : return None data = 'shares' if path != '' : data += '?' path = self . _encode_string ( self . _normalize_path ( path ) ) args = { 'path' : path } reshares = kwargs . get ( 'reshares' , False ) if isinstance ( reshares , bool ) and reshares : args [ 'reshares' ] = reshares subfiles = kwargs . get ( 'subfiles' , False ) if isinstance ( subfiles , bool ) and subfiles : args [ 'subfiles' ] = str ( subfiles ) . lower ( ) shared_with_me = kwargs . get ( 'shared_with_me' , False ) if isinstance ( shared_with_me , bool ) and shared_with_me : args [ 'shared_with_me' ] = "true" del args [ 'path' ] data += parse . urlencode ( args ) res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_SHARE , data ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) shares = [ ] for element in tree . find ( 'data' ) . iter ( 'element' ) : '''share_attr = {} for child in element: key = child.tag value = child.text share_attr[key] = value shares.append(share_attr)''' shares . append ( self . _get_shareinfo ( element ) ) return shares raise HTTPResponseError ( res )
Returns array of shares
395
4
19,822
def create_user ( self , user_name , initial_password ) : res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_CLOUD , 'users' , data = { 'password' : initial_password , 'userid' : user_name } ) # We get 200 when the user was just created. if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return True raise HTTPResponseError ( res )
Create a new user with an initial password via provisioning API . It is not an error if the user already existed before . If you get back an error 999 then the provisioning API is not enabled .
128
41
19,823
def delete_user ( self , user_name ) : res = self . _make_ocs_request ( 'DELETE' , self . OCS_SERVICE_CLOUD , 'users/' + user_name ) # We get 200 when the user was deleted. if res . status_code == 200 : return True raise HTTPResponseError ( res )
Deletes a user via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
82
25
19,824
def search_users ( self , user_name ) : action_path = 'users' if user_name : action_path += '?search={}' . format ( user_name ) res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , action_path ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) users = [ x . text for x in tree . findall ( 'data/users/element' ) ] return users raise HTTPResponseError ( res )
Searches for users via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
128
26
19,825
def set_user_attribute ( self , user_name , key , value ) : res = self . _make_ocs_request ( 'PUT' , self . OCS_SERVICE_CLOUD , 'users/' + parse . quote ( user_name ) , data = { 'key' : self . _encode_string ( key ) , 'value' : self . _encode_string ( value ) } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return True raise HTTPResponseError ( res )
Sets a user attribute
142
5
19,826
def add_user_to_group ( self , user_name , group_name ) : res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_CLOUD , 'users/' + user_name + '/groups' , data = { 'groupid' : group_name } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return True raise HTTPResponseError ( res )
Adds a user to a group .
122
7
19,827
def get_user_groups ( self , user_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'users/' + user_name + '/groups' , ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return [ group . text for group in tree . find ( 'data/groups' ) ] raise HTTPResponseError ( res )
Get a list of groups associated to a user .
121
10
19,828
def get_user ( self , user_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'users/' + parse . quote ( user_name ) , data = { } ) tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) # <ocs><meta><statuscode>100</statuscode><status>ok</status></meta> # <data> # <email>frank@example.org</email><quota>0</quota><enabled>true</enabled> # </data> # </ocs> data_element = tree . find ( 'data' ) return self . _xml_to_dict ( data_element )
Retrieves information about a user
171
7
19,829
def get_user_subadmin_groups ( self , user_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'users/' + user_name + '/subadmins' , ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) groups = tree . find ( 'data' ) return groups raise HTTPResponseError ( res )
Get a list of subadmin groups associated to a user .
119
12
19,830
def share_file_with_user ( self , path , user , * * kwargs ) : remote_user = kwargs . get ( 'remote_user' , False ) perms = kwargs . get ( 'perms' , self . OCS_PERMISSION_READ ) if ( ( ( not isinstance ( perms , int ) ) or ( perms > self . OCS_PERMISSION_ALL ) ) or ( ( not isinstance ( user , six . string_types ) ) or ( user == '' ) ) ) : return False if remote_user and ( not user . endswith ( '/' ) ) : user = user + '/' path = self . _normalize_path ( path ) post_data = { 'shareType' : self . OCS_SHARE_TYPE_REMOTE if remote_user else self . OCS_SHARE_TYPE_USER , 'shareWith' : user , 'path' : self . _encode_string ( path ) , 'permissions' : perms } res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'shares' , data = post_data ) if self . _debug : print ( 'OCS share_file request for file %s with permissions %i ' 'returned: %i' % ( path , perms , res . status_code ) ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_el = tree . find ( 'data' ) return ShareInfo ( { 'id' : data_el . find ( 'id' ) . text , 'path' : path , 'permissions' : perms } ) raise HTTPResponseError ( res )
Shares a remote file with specified user
399
7
19,831
def delete_group ( self , group_name ) : res = self . _make_ocs_request ( 'DELETE' , self . OCS_SERVICE_CLOUD , 'groups/' + group_name ) # We get 200 when the group was just deleted. if res . status_code == 200 : return True raise HTTPResponseError ( res )
Delete a group via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
83
24
19,832
def get_groups ( self ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'groups' ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) groups = [ x . text for x in tree . findall ( 'data/groups/element' ) ] return groups raise HTTPResponseError ( res )
Get groups via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
95
23
19,833
def get_group_members ( self , group_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'groups/' + group_name ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , [ 100 ] ) return [ group . text for group in tree . find ( 'data/users' ) ] raise HTTPResponseError ( res )
Get group members via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
116
24
19,834
def group_exists ( self , group_name ) : res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'groups?search=' + group_name ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) for code_el in tree . findall ( 'data/groups/element' ) : if code_el is not None and code_el . text == group_name : return True return False raise HTTPResponseError ( res )
Checks a group via provisioning API . If you get back an error 999 then the provisioning API is not enabled .
122
25
19,835
def share_file_with_group ( self , path , group , * * kwargs ) : perms = kwargs . get ( 'perms' , self . OCS_PERMISSION_READ ) if ( ( ( not isinstance ( perms , int ) ) or ( perms > self . OCS_PERMISSION_ALL ) ) or ( ( not isinstance ( group , six . string_types ) ) or ( group == '' ) ) ) : return False path = self . _normalize_path ( path ) post_data = { 'shareType' : self . OCS_SHARE_TYPE_GROUP , 'shareWith' : group , 'path' : path , 'permissions' : perms } res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_SHARE , 'shares' , data = post_data ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) data_el = tree . find ( 'data' ) return ShareInfo ( { 'id' : data_el . find ( 'id' ) . text , 'path' : path , 'permissions' : perms } ) raise HTTPResponseError ( res )
Shares a remote file with specified group
287
7
19,836
def get_attribute ( self , app = None , key = None ) : path = 'getattribute' if app is not None : path += '/' + parse . quote ( app , '' ) if key is not None : path += '/' + parse . quote ( self . _encode_string ( key ) , '' ) res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_PRIVATEDATA , path ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) values = [ ] for element in tree . find ( 'data' ) . iter ( 'element' ) : app_text = element . find ( 'app' ) . text key_text = element . find ( 'key' ) . text value_text = element . find ( 'value' ) . text or '' if key is None : if app is None : values . append ( ( app_text , key_text , value_text ) ) else : values . append ( ( key_text , value_text ) ) else : return value_text if len ( values ) == 0 and key is not None : return None return values raise HTTPResponseError ( res )
Returns an application attribute
276
4
19,837
def set_attribute ( self , app , key , value ) : path = 'setattribute/' + parse . quote ( app , '' ) + '/' + parse . quote ( self . _encode_string ( key ) , '' ) res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_PRIVATEDATA , path , data = { 'value' : self . _encode_string ( value ) } ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) return True raise HTTPResponseError ( res )
Sets an application attribute
145
5
19,838
def get_apps ( self ) : ena_apps = { } res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'apps' ) if res . status_code != 200 : raise HTTPResponseError ( res ) tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) # <data><apps><element>files</element><element>activity</element> ... for el in tree . findall ( 'data/apps/element' ) : ena_apps [ el . text ] = False res = self . _make_ocs_request ( 'GET' , self . OCS_SERVICE_CLOUD , 'apps?filter=enabled' ) if res . status_code != 200 : raise HTTPResponseError ( res ) tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree ) for el in tree . findall ( 'data/apps/element' ) : ena_apps [ el . text ] = True return ena_apps
List all enabled apps through the provisioning api .
247
10
19,839
def enable_app ( self , appname ) : res = self . _make_ocs_request ( 'POST' , self . OCS_SERVICE_CLOUD , 'apps/' + appname ) if res . status_code == 200 : return True raise HTTPResponseError ( res )
Enable an app through provisioning_api
68
8
19,840
def _encode_string ( s ) : if six . PY2 and isinstance ( s , unicode ) : return s . encode ( 'utf-8' ) return s
Encodes a unicode instance to utf - 8 . If a str is passed it will simply be returned
39
22
19,841
def _check_ocs_status ( tree , accepted_codes = [ 100 ] ) : code_el = tree . find ( 'meta/statuscode' ) if code_el is not None and int ( code_el . text ) not in accepted_codes : r = requests . Response ( ) msg_el = tree . find ( 'meta/message' ) if msg_el is None : msg_el = tree # fallback to the entire ocs response, if we find no message. r . _content = ET . tostring ( msg_el ) r . status_code = int ( code_el . text ) raise OCSResponseError ( r )
Checks the status code of an OCS request
142
10
19,842
def make_ocs_request ( self , method , service , action , * * kwargs ) : accepted_codes = kwargs . pop ( 'accepted_codes' , [ 100 ] ) res = self . _make_ocs_request ( method , service , action , * * kwargs ) if res . status_code == 200 : tree = ET . fromstring ( res . content ) self . _check_ocs_status ( tree , accepted_codes = accepted_codes ) return res raise OCSResponseError ( res )
Makes a OCS API request and analyses the response
118
11
19,843
def _make_ocs_request ( self , method , service , action , * * kwargs ) : slash = '' if service : slash = '/' path = self . OCS_BASEPATH + service + slash + action attributes = kwargs . copy ( ) if 'headers' not in attributes : attributes [ 'headers' ] = { } attributes [ 'headers' ] [ 'OCS-APIREQUEST' ] = 'true' if self . _debug : print ( 'OCS request: %s %s %s' % ( method , self . url + path , attributes ) ) res = self . _session . request ( method , self . url + path , * * attributes ) return res
Makes a OCS API request
153
7
19,844
def _make_dav_request ( self , method , path , * * kwargs ) : if self . _debug : print ( 'DAV request: %s %s' % ( method , path ) ) if kwargs . get ( 'headers' ) : print ( 'Headers: ' , kwargs . get ( 'headers' ) ) path = self . _normalize_path ( path ) res = self . _session . request ( method , self . _webdav_url + parse . quote ( self . _encode_string ( path ) ) , * * kwargs ) if self . _debug : print ( 'DAV status: %i' % res . status_code ) if res . status_code in [ 200 , 207 ] : return self . _parse_dav_response ( res ) if res . status_code in [ 204 , 201 ] : return True raise HTTPResponseError ( res )
Makes a WebDAV request
206
7
19,845
def _parse_dav_response ( self , res ) : if res . status_code == 207 : tree = ET . fromstring ( res . content ) items = [ ] for child in tree : items . append ( self . _parse_dav_element ( child ) ) return items return False
Parses the DAV responses from a multi - status response
64
12
19,846
def _parse_dav_element ( self , dav_response ) : href = parse . unquote ( self . _strip_dav_path ( dav_response . find ( '{DAV:}href' ) . text ) ) if six . PY2 : href = href . decode ( 'utf-8' ) file_type = 'file' if href [ - 1 ] == '/' : file_type = 'dir' file_attrs = { } attrs = dav_response . find ( '{DAV:}propstat' ) attrs = attrs . find ( '{DAV:}prop' ) for attr in attrs : file_attrs [ attr . tag ] = attr . text return FileInfo ( href , file_type , file_attrs )
Parses a single DAV element
178
7
19,847
def _webdav_move_copy ( self , remote_path_source , remote_path_target , operation ) : if operation != "MOVE" and operation != "COPY" : return False if remote_path_target [ - 1 ] == '/' : remote_path_target += os . path . basename ( remote_path_source ) if not ( remote_path_target [ 0 ] == '/' ) : remote_path_target = '/' + remote_path_target remote_path_source = self . _normalize_path ( remote_path_source ) headers = { 'Destination' : self . _webdav_url + parse . quote ( self . _encode_string ( remote_path_target ) ) } return self . _make_dav_request ( operation , remote_path_source , headers = headers )
Copies or moves a remote file or directory
188
9
19,848
def _xml_to_dict ( self , element ) : return_dict = { } for el in element : return_dict [ el . tag ] = None children = el . getchildren ( ) if children : return_dict [ el . tag ] = self . _xml_to_dict ( children ) else : return_dict [ el . tag ] = el . text return return_dict
Take an XML element iterate over it and build a dict
83
12
19,849
def _get_shareinfo ( self , data_el ) : if ( data_el is None ) or not ( isinstance ( data_el , ET . Element ) ) : return None return ShareInfo ( self . _xml_to_dict ( data_el ) )
Simple helper which returns instance of ShareInfo class
58
9
19,850
def emit ( self , * args , * * kwargs ) : if self . _block : return for slot in self . _slots : if not slot : continue elif isinstance ( slot , partial ) : slot ( ) elif isinstance ( slot , weakref . WeakKeyDictionary ) : # For class methods, get the class object and call the method accordingly. for obj , method in slot . items ( ) : method ( obj , * args , * * kwargs ) elif isinstance ( slot , weakref . ref ) : # If it's a weakref, call the ref to get the instance and then call the func # Don't wrap in try/except so we don't risk masking exceptions from the actual func call if ( slot ( ) is not None ) : slot ( ) ( * args , * * kwargs ) else : # Else call it in a standard way. Should be just lambdas at this point slot ( * args , * * kwargs )
Calls all the connected slots with the provided args and kwargs unless block is activated
212
18
19,851
def connect ( self , slot ) : if not callable ( slot ) : raise ValueError ( "Connection to non-callable '%s' object failed" % slot . __class__ . __name__ ) if ( isinstance ( slot , partial ) or '<' in slot . __name__ ) : # If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find if slot not in self . _slots : self . _slots . append ( slot ) elif inspect . ismethod ( slot ) : # Check if it's an instance method and store it with the instance as the key slotSelf = slot . __self__ slotDict = weakref . WeakKeyDictionary ( ) slotDict [ slotSelf ] = slot . __func__ if slotDict not in self . _slots : self . _slots . append ( slotDict ) else : # If it's just a function then just store it as a weakref. newSlotRef = weakref . ref ( slot ) if newSlotRef not in self . _slots : self . _slots . append ( newSlotRef )
Connects the signal to any callable object
252
9
19,852
def disconnect ( self , slot ) : if not callable ( slot ) : return if inspect . ismethod ( slot ) : # If it's a method, then find it by its instance slotSelf = slot . __self__ for s in self . _slots : if isinstance ( s , weakref . WeakKeyDictionary ) and ( slotSelf in s ) and ( s [ slotSelf ] is slot . __func__ ) : self . _slots . remove ( s ) break elif isinstance ( slot , partial ) or '<' in slot . __name__ : # If it's a partial or lambda, try to remove directly try : self . _slots . remove ( slot ) except ValueError : pass else : # It's probably a function, so try to remove by weakref try : self . _slots . remove ( weakref . ref ( slot ) ) except ValueError : pass
Disconnects the slot from the signal
192
8
19,853
def block ( self , signals = None , isBlocked = True ) : if signals : try : if isinstance ( signals , basestring ) : signals = [ signals ] except NameError : if isinstance ( signals , str ) : signals = [ signals ] signals = signals or self . keys ( ) for signal in signals : if signal not in self : raise RuntimeError ( "Could not find signal matching %s" % signal ) self [ signal ] . block ( isBlocked )
Sets the block on any provided signals or to all signals
102
12
19,854
def _open ( file_or_str , * * kwargs ) : if hasattr ( file_or_str , 'read' ) : yield file_or_str elif isinstance ( file_or_str , six . string_types ) : with open ( file_or_str , * * kwargs ) as file_desc : yield file_desc else : raise IOError ( 'Invalid file-or-str object: {}' . format ( file_or_str ) )
Either open a file handle or use an existing file - like object .
107
14
19,855
def load_delimited ( filename , converters , delimiter = r'\s+' ) : # Initialize list of empty lists n_columns = len ( converters ) columns = tuple ( list ( ) for _ in range ( n_columns ) ) # Create re object for splitting lines splitter = re . compile ( delimiter ) # Note: we do io manually here for two reasons. # 1. The csv module has difficulties with unicode, which may lead # to failures on certain annotation strings # # 2. numpy's text loader does not handle non-numeric data # with _open ( filename , mode = 'r' ) as input_file : for row , line in enumerate ( input_file , 1 ) : # Split each line using the supplied delimiter data = splitter . split ( line . strip ( ) , n_columns - 1 ) # Throw a helpful error if we got an unexpected # of columns if n_columns != len ( data ) : raise ValueError ( 'Expected {} columns, got {} at ' '{}:{:d}:\n\t{}' . format ( n_columns , len ( data ) , filename , row , line ) ) for value , column , converter in zip ( data , columns , converters ) : # Try converting the value, throw a helpful error on failure try : converted_value = converter ( value ) except : raise ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( value , converter . __name__ , filename , row , line ) ) column . append ( converted_value ) # Sane output if n_columns == 1 : return columns [ 0 ] else : return columns
r Utility function for loading in data from an annotation file where columns are delimited . The number of columns is inferred from the length of the provided converters list .
377
33
19,856
def load_events ( filename , delimiter = r'\s+' ) : # Use our universal function to load in the events events = load_delimited ( filename , [ float ] , delimiter ) events = np . array ( events ) # Validate them, but throw a warning in place of an error try : util . validate_events ( events ) except ValueError as error : warnings . warn ( error . args [ 0 ] ) return events
r Import time - stamp events from an annotation file . The file should consist of a single column of numeric values corresponding to the event times . This is primarily useful for processing events which lack duration such as beats or onsets .
96
45
19,857
def load_labeled_events ( filename , delimiter = r'\s+' ) : # Use our universal function to load in the events events , labels = load_delimited ( filename , [ float , str ] , delimiter ) events = np . array ( events ) # Validate them, but throw a warning in place of an error try : util . validate_events ( events ) except ValueError as error : warnings . warn ( error . args [ 0 ] ) return events , labels
r Import labeled time - stamp events from an annotation file . The file should consist of two columns ; the first having numeric values corresponding to the event times and the second having string labels for each event . This is primarily useful for processing labeled events which lack duration such as beats with metric beat number or onsets with an instrument label .
105
66
19,858
def load_time_series ( filename , delimiter = r'\s+' ) : # Use our universal function to load in the events times , values = load_delimited ( filename , [ float , float ] , delimiter ) times = np . array ( times ) values = np . array ( values ) return times , values
r Import a time series from an annotation file . The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series .
71
34
19,859
def load_wav ( path , mono = True ) : fs , audio_data = scipy . io . wavfile . read ( path ) # Make float in range [-1, 1] if audio_data . dtype == 'int8' : audio_data = audio_data / float ( 2 ** 8 ) elif audio_data . dtype == 'int16' : audio_data = audio_data / float ( 2 ** 16 ) elif audio_data . dtype == 'int32' : audio_data = audio_data / float ( 2 ** 24 ) else : raise ValueError ( 'Got unexpected .wav data type ' '{}' . format ( audio_data . dtype ) ) # Optionally convert to mono if mono and audio_data . ndim != 1 : audio_data = audio_data . mean ( axis = 1 ) return audio_data , fs
Loads a . wav file as a numpy array using scipy . io . wavfile .
192
23
19,860
def load_ragged_time_series ( filename , dtype = float , delimiter = r'\s+' , header = False ) : # Initialize empty lists times = [ ] values = [ ] # Create re object for splitting lines splitter = re . compile ( delimiter ) if header : start_row = 1 else : start_row = 0 with _open ( filename , mode = 'r' ) as input_file : for row , line in enumerate ( input_file , start_row ) : # Split each line using the supplied delimiter data = splitter . split ( line . strip ( ) ) try : converted_time = float ( data [ 0 ] ) except ( TypeError , ValueError ) as exe : six . raise_from ( ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( data [ 0 ] , float . __name__ , filename , row , line ) ) , exe ) times . append ( converted_time ) # cast values to a numpy array. time stamps with no values are cast # to an empty array. try : converted_value = np . array ( data [ 1 : ] , dtype = dtype ) except ( TypeError , ValueError ) as exe : six . raise_from ( ValueError ( "Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}" . format ( data [ 1 : ] , dtype . __name__ , filename , row , line ) ) , exe ) values . append ( converted_value ) return np . array ( times ) , values
r Utility function for loading in data from a delimited time series annotation file with a variable number of columns . Assumes that column 0 contains time stamps and columns 1 through n contain values . n may be variable from time stamp to time stamp .
357
49
19,861
def pitch_class_to_semitone ( pitch_class ) : semitone = 0 for idx , char in enumerate ( pitch_class ) : if char == '#' and idx > 0 : semitone += 1 elif char == 'b' and idx > 0 : semitone -= 1 elif idx == 0 : semitone = PITCH_CLASSES . get ( char ) else : raise InvalidChordException ( "Pitch class improperly formed: %s" % pitch_class ) return semitone % 12
r Convert a pitch class to semitone .
115
9
19,862
def scale_degree_to_semitone ( scale_degree ) : semitone = 0 offset = 0 if scale_degree . startswith ( "#" ) : offset = scale_degree . count ( "#" ) scale_degree = scale_degree . strip ( "#" ) elif scale_degree . startswith ( 'b' ) : offset = - 1 * scale_degree . count ( "b" ) scale_degree = scale_degree . strip ( "b" ) semitone = SCALE_DEGREES . get ( scale_degree , None ) if semitone is None : raise InvalidChordException ( "Scale degree improperly formed: {}, expected one of {}." . format ( scale_degree , list ( SCALE_DEGREES . keys ( ) ) ) ) return semitone + offset
r Convert a scale degree to semitone .
176
9
19,863
def scale_degree_to_bitmap ( scale_degree , modulo = False , length = BITMAP_LENGTH ) : sign = 1 if scale_degree . startswith ( "*" ) : sign = - 1 scale_degree = scale_degree . strip ( "*" ) edit_map = [ 0 ] * length sd_idx = scale_degree_to_semitone ( scale_degree ) if sd_idx < length or modulo : edit_map [ sd_idx % length ] = sign return np . array ( edit_map )
Create a bitmap representation of a scale degree .
124
10
19,864
def quality_to_bitmap ( quality ) : if quality not in QUALITIES : raise InvalidChordException ( "Unsupported chord quality shorthand: '%s' " "Did you mean to reduce extended chords?" % quality ) return np . array ( QUALITIES [ quality ] )
Return the bitmap for a given quality .
61
9
19,865
def validate_chord_label ( chord_label ) : # This monster regexp is pulled from the JAMS chord namespace, # which is in turn derived from the context-free grammar of # Harte et al., 2005. pattern = re . compile ( r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\))?)|(:\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$''' ) # nopep8 if not pattern . match ( chord_label ) : raise InvalidChordException ( 'Invalid chord label: ' '{}' . format ( chord_label ) ) pass
Test for well - formedness of a chord label .
341
11
19,866
def join ( chord_root , quality = '' , extensions = None , bass = '' ) : chord_label = chord_root if quality or extensions : chord_label += ":%s" % quality if extensions : chord_label += "(%s)" % "," . join ( extensions ) if bass and bass != '1' : chord_label += "/%s" % bass validate_chord_label ( chord_label ) return chord_label
r Join the parts of a chord into a complete chord label .
96
13
19,867
def encode ( chord_label , reduce_extended_chords = False , strict_bass_intervals = False ) : if chord_label == NO_CHORD : return NO_CHORD_ENCODED if chord_label == X_CHORD : return X_CHORD_ENCODED chord_root , quality , scale_degrees , bass = split ( chord_label , reduce_extended_chords = reduce_extended_chords ) root_number = pitch_class_to_semitone ( chord_root ) bass_number = scale_degree_to_semitone ( bass ) % 12 semitone_bitmap = quality_to_bitmap ( quality ) semitone_bitmap [ 0 ] = 1 for scale_degree in scale_degrees : semitone_bitmap += scale_degree_to_bitmap ( scale_degree , reduce_extended_chords ) semitone_bitmap = ( semitone_bitmap > 0 ) . astype ( np . int ) if not semitone_bitmap [ bass_number ] and strict_bass_intervals : raise InvalidChordException ( "Given bass scale degree is absent from this chord: " "%s" % chord_label , chord_label ) else : semitone_bitmap [ bass_number ] = 1 return root_number , semitone_bitmap , bass_number
Translate a chord label to numerical representations for evaluation .
299
11
19,868
def encode_many ( chord_labels , reduce_extended_chords = False ) : num_items = len ( chord_labels ) roots , basses = np . zeros ( [ 2 , num_items ] , dtype = np . int ) semitones = np . zeros ( [ num_items , 12 ] , dtype = np . int ) local_cache = dict ( ) for i , label in enumerate ( chord_labels ) : result = local_cache . get ( label , None ) if result is None : result = encode ( label , reduce_extended_chords ) local_cache [ label ] = result roots [ i ] , semitones [ i ] , basses [ i ] = result return roots , semitones , basses
Translate a set of chord labels to numerical representations for sane evaluation .
169
14
19,869
def rotate_bitmap_to_root ( bitmap , chord_root ) : bitmap = np . asarray ( bitmap ) assert bitmap . ndim == 1 , "Currently only 1D bitmaps are supported." idxs = list ( np . nonzero ( bitmap ) ) idxs [ - 1 ] = ( idxs [ - 1 ] + chord_root ) % 12 abs_bitmap = np . zeros_like ( bitmap ) abs_bitmap [ tuple ( idxs ) ] = 1 return abs_bitmap
Circularly shift a relative bitmap to its asbolute pitch classes .
116
16
19,870
def rotate_bitmaps_to_roots ( bitmaps , roots ) : abs_bitmaps = [ ] for bitmap , chord_root in zip ( bitmaps , roots ) : abs_bitmaps . append ( rotate_bitmap_to_root ( bitmap , chord_root ) ) return np . asarray ( abs_bitmaps )
Circularly shift a relative bitmaps to asbolute pitch classes .
74
15
19,871
def validate ( reference_labels , estimated_labels ) : N = len ( reference_labels ) M = len ( estimated_labels ) if N != M : raise ValueError ( "Chord comparison received different length lists: " "len(reference)=%d\tlen(estimates)=%d" % ( N , M ) ) for labels in [ reference_labels , estimated_labels ] : for chord_label in labels : validate_chord_label ( chord_label ) # When either label list is empty, warn the user if len ( reference_labels ) == 0 : warnings . warn ( 'Reference labels are empty' ) if len ( estimated_labels ) == 0 : warnings . warn ( 'Estimated labels are empty' )
Checks that the input annotations to a comparison function look like valid chord labels .
164
16
19,872
def weighted_accuracy ( comparisons , weights ) : N = len ( comparisons ) # There should be as many weights as comparisons if weights . shape [ 0 ] != N : raise ValueError ( 'weights and comparisons should be of the same' ' length. len(weights) = {} but len(comparisons)' ' = {}' . format ( weights . shape [ 0 ] , N ) ) if ( weights < 0 ) . any ( ) : raise ValueError ( 'Weights should all be positive.' ) if np . sum ( weights ) == 0 : warnings . warn ( 'No nonzero weights, returning 0' ) return 0 # Find all comparison scores which are valid valid_idx = ( comparisons >= 0 ) # If no comparable chords were provided, warn and return 0 if valid_idx . sum ( ) == 0 : warnings . warn ( "No reference chords were comparable " "to estimated chords, returning 0." ) return 0 # Remove any uncomparable labels comparisons = comparisons [ valid_idx ] weights = weights [ valid_idx ] # Normalize the weights total_weight = float ( np . sum ( weights ) ) normalized_weights = np . asarray ( weights , dtype = float ) / total_weight # Score is the sum of all weighted comparisons return np . sum ( comparisons * normalized_weights )
Compute the weighted accuracy of a list of chord comparisons .
279
12
19,873
def thirds ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) ref_roots , ref_semitones = encode_many ( reference_labels , False ) [ : 2 ] est_roots , est_semitones = encode_many ( estimated_labels , False ) [ : 2 ] eq_roots = ref_roots == est_roots eq_thirds = ref_semitones [ : , 3 ] == est_semitones [ : , 3 ] comparison_scores = ( eq_roots * eq_thirds ) . astype ( np . float ) # Ignore 'X' chords comparison_scores [ np . any ( ref_semitones < 0 , axis = 1 ) ] = - 1.0 return comparison_scores
Compare chords along root & third relationships .
172
8
19,874
def thirds_inv ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) ref_roots , ref_semitones , ref_bass = encode_many ( reference_labels , False ) est_roots , est_semitones , est_bass = encode_many ( estimated_labels , False ) eq_root = ref_roots == est_roots eq_bass = ref_bass == est_bass eq_third = ref_semitones [ : , 3 ] == est_semitones [ : , 3 ] comparison_scores = ( eq_root * eq_third * eq_bass ) . astype ( np . float ) # Ignore 'X' chords comparison_scores [ np . any ( ref_semitones < 0 , axis = 1 ) ] = - 1.0 return comparison_scores
Score chords along root third & bass relationships .
189
9
19,875
def root ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) ref_roots , ref_semitones = encode_many ( reference_labels , False ) [ : 2 ] est_roots = encode_many ( estimated_labels , False ) [ 0 ] comparison_scores = ( ref_roots == est_roots ) . astype ( np . float ) # Ignore 'X' chords comparison_scores [ np . any ( ref_semitones < 0 , axis = 1 ) ] = - 1.0 return comparison_scores
Compare chords according to roots .
129
6
19,876
def mirex ( reference_labels , estimated_labels ) : validate ( reference_labels , estimated_labels ) # TODO(?): Should this be an argument? min_intersection = 3 ref_data = encode_many ( reference_labels , False ) ref_chroma = rotate_bitmaps_to_roots ( ref_data [ 1 ] , ref_data [ 0 ] ) est_data = encode_many ( estimated_labels , False ) est_chroma = rotate_bitmaps_to_roots ( est_data [ 1 ] , est_data [ 0 ] ) eq_chroma = ( ref_chroma * est_chroma ) . sum ( axis = - 1 ) # Chroma matching for set bits comparison_scores = ( eq_chroma >= min_intersection ) . astype ( np . float ) # No-chord matching; match -1 roots, SKIP_CHORDS dropped next no_root = np . logical_and ( ref_data [ 0 ] == - 1 , est_data [ 0 ] == - 1 ) comparison_scores [ no_root ] = 1.0 # Skip chords where the number of active semitones `n` is # 0 < n < `min_intersection`. ref_semitone_count = ( ref_data [ 1 ] > 0 ) . sum ( axis = 1 ) skip_idx = np . logical_and ( ref_semitone_count > 0 , ref_semitone_count < min_intersection ) # Also ignore 'X' chords. np . logical_or ( skip_idx , np . any ( ref_data [ 1 ] < 0 , axis = 1 ) , skip_idx ) comparison_scores [ skip_idx ] = - 1.0 return comparison_scores
Compare chords along MIREX rules .
397
8
19,877
def seg ( reference_intervals , estimated_intervals ) : return min ( underseg ( reference_intervals , estimated_intervals ) , overseg ( reference_intervals , estimated_intervals ) )
Compute the MIREX MeanSeg score .
47
10
19,878
def merge_chord_intervals ( intervals , labels ) : roots , semitones , basses = encode_many ( labels , True ) merged_ivs = [ ] prev_rt = None prev_st = None prev_ba = None for s , e , rt , st , ba in zip ( intervals [ : , 0 ] , intervals [ : , 1 ] , roots , semitones , basses ) : if rt != prev_rt or ( st != prev_st ) . any ( ) or ba != prev_ba : prev_rt , prev_st , prev_ba = rt , st , ba merged_ivs . append ( [ s , e ] ) else : merged_ivs [ - 1 ] [ - 1 ] = e return np . array ( merged_ivs )
Merge consecutive chord intervals if they represent the same chord .
174
12
19,879
def evaluate ( ref_intervals , ref_labels , est_intervals , est_labels , * * kwargs ) : # Append or crop estimated intervals so their span is the same as reference est_intervals , est_labels = util . adjust_intervals ( est_intervals , est_labels , ref_intervals . min ( ) , ref_intervals . max ( ) , NO_CHORD , NO_CHORD ) # use merged intervals for segmentation evaluation merged_ref_intervals = merge_chord_intervals ( ref_intervals , ref_labels ) merged_est_intervals = merge_chord_intervals ( est_intervals , est_labels ) # Adjust the labels so that they span the same intervals intervals , ref_labels , est_labels = util . merge_labeled_intervals ( ref_intervals , ref_labels , est_intervals , est_labels ) # Convert intervals to durations (used as weights) durations = util . intervals_to_durations ( intervals ) # Store scores for each comparison function scores = collections . OrderedDict ( ) scores [ 'thirds' ] = weighted_accuracy ( thirds ( ref_labels , est_labels ) , durations ) scores [ 'thirds_inv' ] = weighted_accuracy ( thirds_inv ( ref_labels , est_labels ) , durations ) scores [ 'triads' ] = weighted_accuracy ( triads ( ref_labels , est_labels ) , durations ) scores [ 'triads_inv' ] = weighted_accuracy ( triads_inv ( ref_labels , est_labels ) , durations ) scores [ 'tetrads' ] = weighted_accuracy ( tetrads ( ref_labels , est_labels ) , durations ) scores [ 'tetrads_inv' ] = weighted_accuracy ( tetrads_inv ( ref_labels , est_labels ) , durations ) scores [ 'root' ] = weighted_accuracy ( root ( ref_labels , est_labels ) , durations ) scores [ 'mirex' ] = weighted_accuracy ( mirex ( ref_labels , est_labels ) , durations ) scores [ 'majmin' ] = weighted_accuracy ( majmin ( ref_labels , est_labels ) , durations ) scores [ 'majmin_inv' ] = weighted_accuracy ( majmin_inv ( ref_labels , est_labels ) , durations ) scores [ 'sevenths' ] = weighted_accuracy ( sevenths ( ref_labels , est_labels ) , durations ) scores [ 'sevenths_inv' ] = weighted_accuracy ( sevenths_inv ( ref_labels , est_labels ) , durations ) scores [ 'underseg' ] = underseg ( merged_ref_intervals , merged_est_intervals ) scores [ 'overseg' ] = overseg ( merged_ref_intervals , merged_est_intervals ) scores [ 'seg' ] = min ( scores [ 'overseg' ] , scores [ 'underseg' ] ) return scores
Computes weighted accuracy for all comparison functions for the given reference and estimated annotations .
719
16
19,880
def _n_onset_midi ( patterns ) : return len ( [ o_m for pat in patterns for occ in pat for o_m in occ ] )
Computes the number of onset_midi objects in a pattern
36
13
19,881
def validate ( reference_patterns , estimated_patterns ) : # Warn if pattern lists are empty if _n_onset_midi ( reference_patterns ) == 0 : warnings . warn ( 'Reference patterns are empty.' ) if _n_onset_midi ( estimated_patterns ) == 0 : warnings . warn ( 'Estimated patterns are empty.' ) for patterns in [ reference_patterns , estimated_patterns ] : for pattern in patterns : if len ( pattern ) <= 0 : raise ValueError ( "Each pattern must contain at least one " "occurrence." ) for occurrence in pattern : for onset_midi in occurrence : if len ( onset_midi ) != 2 : raise ValueError ( "The (onset, midi) tuple must " "contain exactly 2 elements." )
Checks that the input annotations to a metric look like valid pattern lists and throws helpful errors if not .
174
21
19,882
def _occurrence_intersection ( occ_P , occ_Q ) : set_P = set ( [ tuple ( onset_midi ) for onset_midi in occ_P ] ) set_Q = set ( [ tuple ( onset_midi ) for onset_midi in occ_Q ] ) return set_P & set_Q
Computes the intersection between two occurrences .
75
8
19,883
def _compute_score_matrix ( P , Q , similarity_metric = "cardinality_score" ) : sm = np . zeros ( ( len ( P ) , len ( Q ) ) ) # The score matrix for iP , occ_P in enumerate ( P ) : for iQ , occ_Q in enumerate ( Q ) : if similarity_metric == "cardinality_score" : denom = float ( np . max ( [ len ( occ_P ) , len ( occ_Q ) ] ) ) # Compute the score sm [ iP , iQ ] = len ( _occurrence_intersection ( occ_P , occ_Q ) ) / denom # TODO: More scores: 'normalised matching socre' else : raise ValueError ( "The similarity metric (%s) can only be: " "'cardinality_score'." ) return sm
Computes the score matrix between the patterns P and Q .
194
12
19,884
def standard_FPR ( reference_patterns , estimated_patterns , tol = 1e-5 ) : validate ( reference_patterns , estimated_patterns ) nP = len ( reference_patterns ) # Number of patterns in the reference nQ = len ( estimated_patterns ) # Number of patterns in the estimation k = 0 # Number of patterns that match # If no patterns were provided, metric is zero if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. # Find matches of the prototype patterns for ref_pattern in reference_patterns : P = np . asarray ( ref_pattern [ 0 ] ) # Get reference prototype for est_pattern in estimated_patterns : Q = np . asarray ( est_pattern [ 0 ] ) # Get estimation prototype if len ( P ) != len ( Q ) : continue # Check transposition given a certain tolerance if ( len ( P ) == len ( Q ) == 1 or np . max ( np . abs ( np . diff ( P - Q , axis = 0 ) ) ) < tol ) : k += 1 break # Compute the standard measures precision = k / float ( nQ ) recall = k / float ( nP ) f_measure = util . f_measure ( precision , recall ) return f_measure , precision , recall
Standard F1 Score Precision and Recall .
311
8
19,885
def three_layer_FPR ( reference_patterns , estimated_patterns ) : validate ( reference_patterns , estimated_patterns ) def compute_first_layer_PR ( ref_occs , est_occs ) : """Computes the first layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_occs : est_occs : Returns ------- """ # Find the length of the intersection between reference and estimation s = len ( _occurrence_intersection ( ref_occs , est_occs ) ) # Compute the first layer scores precision = s / float ( len ( ref_occs ) ) recall = s / float ( len ( est_occs ) ) return precision , recall def compute_second_layer_PR ( ref_pattern , est_pattern ) : """Computes the second layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_pattern : est_pattern : Returns ------- """ # Compute the first layer scores F_1 = compute_layer ( ref_pattern , est_pattern ) # Compute the second layer scores precision = np . mean ( np . max ( F_1 , axis = 0 ) ) recall = np . mean ( np . max ( F_1 , axis = 1 ) ) return precision , recall def compute_layer ( ref_elements , est_elements , layer = 1 ) : """Computes the F-measure matrix for a given layer. The reference and estimated elements can be either patters or occurrences, depending on the layer. For layer 1, the elements must be occurrences. For layer 2, the elements must be patterns. Parameters ---------- ref_elements : est_elements : layer : (Default value = 1) Returns ------- """ if layer != 1 and layer != 2 : raise ValueError ( "Layer (%d) must be an integer between 1 and 2" % layer ) nP = len ( ref_elements ) # Number of elements in reference nQ = len ( est_elements ) # Number of elements in estimation F = np . zeros ( ( nP , nQ ) ) # F-measure matrix for the given layer for iP in range ( nP ) : for iQ in range ( nQ ) : if layer == 1 : func = compute_first_layer_PR elif layer == 2 : func = compute_second_layer_PR # Compute layer scores precision , recall = func ( ref_elements [ iP ] , est_elements [ iQ ] ) F [ iP , iQ ] = util . f_measure ( precision , recall ) return F # If no patterns were provided, metric is zero if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. # Compute the second layer (it includes the first layer) F_2 = compute_layer ( reference_patterns , estimated_patterns , layer = 2 ) # Compute the final scores (third layer) precision_3 = np . mean ( np . max ( F_2 , axis = 0 ) ) recall_3 = np . mean ( np . max ( F_2 , axis = 1 ) ) f_measure_3 = util . f_measure ( precision_3 , recall_3 ) return f_measure_3 , precision_3 , recall_3
Three Layer F1 Score Precision and Recall . As described by Meridith .
755
16
19,886
def first_n_three_layer_P ( reference_patterns , estimated_patterns , n = 5 ) : validate ( reference_patterns , estimated_patterns ) # If no patterns were provided, metric is zero if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. # Get only the first n patterns from the estimated results fn_est_patterns = estimated_patterns [ : min ( len ( estimated_patterns ) , n ) ] # Compute the three-layer scores for the first n estimated patterns F , P , R = three_layer_FPR ( reference_patterns , fn_est_patterns ) return P
First n three - layer precision .
170
7
19,887
def first_n_target_proportion_R ( reference_patterns , estimated_patterns , n = 5 ) : validate ( reference_patterns , estimated_patterns ) # If no patterns were provided, metric is zero if _n_onset_midi ( reference_patterns ) == 0 or _n_onset_midi ( estimated_patterns ) == 0 : return 0. , 0. , 0. # Get only the first n patterns from the estimated results fn_est_patterns = estimated_patterns [ : min ( len ( estimated_patterns ) , n ) ] F , P , R = establishment_FPR ( reference_patterns , fn_est_patterns ) return R
First n target proportion establishment recall metric .
155
8
19,888
def evaluate ( ref_patterns , est_patterns , * * kwargs ) : # Compute all the metrics scores = collections . OrderedDict ( ) # Standard scores scores [ 'F' ] , scores [ 'P' ] , scores [ 'R' ] = util . filter_kwargs ( standard_FPR , ref_patterns , est_patterns , * * kwargs ) # Establishment scores scores [ 'F_est' ] , scores [ 'P_est' ] , scores [ 'R_est' ] = util . filter_kwargs ( establishment_FPR , ref_patterns , est_patterns , * * kwargs ) # Occurrence scores # Force these values for thresh kwargs [ 'thresh' ] = .5 scores [ 'F_occ.5' ] , scores [ 'P_occ.5' ] , scores [ 'R_occ.5' ] = util . filter_kwargs ( occurrence_FPR , ref_patterns , est_patterns , * * kwargs ) kwargs [ 'thresh' ] = .75 scores [ 'F_occ.75' ] , scores [ 'P_occ.75' ] , scores [ 'R_occ.75' ] = util . filter_kwargs ( occurrence_FPR , ref_patterns , est_patterns , * * kwargs ) # Three-layer scores scores [ 'F_3' ] , scores [ 'P_3' ] , scores [ 'R_3' ] = util . filter_kwargs ( three_layer_FPR , ref_patterns , est_patterns , * * kwargs ) # First Five Patterns scores # Set default value of n if 'n' not in kwargs : kwargs [ 'n' ] = 5 scores [ 'FFP' ] = util . filter_kwargs ( first_n_three_layer_P , ref_patterns , est_patterns , * * kwargs ) scores [ 'FFTP_est' ] = util . filter_kwargs ( first_n_target_proportion_R , ref_patterns , est_patterns , * * kwargs ) return scores
Load data and perform the evaluation .
483
7
19,889
def validate ( ref_intervals , ref_pitches , ref_velocities , est_intervals , est_pitches , est_velocities ) : transcription . validate ( ref_intervals , ref_pitches , est_intervals , est_pitches ) # Check that velocities have the same length as intervals/pitches if not ref_velocities . shape [ 0 ] == ref_pitches . shape [ 0 ] : raise ValueError ( 'Reference velocities must have the same length as ' 'pitches and intervals.' ) if not est_velocities . shape [ 0 ] == est_pitches . shape [ 0 ] : raise ValueError ( 'Estimated velocities must have the same length as ' 'pitches and intervals.' ) # Check that the velocities are positive if ref_velocities . size > 0 and np . min ( ref_velocities ) < 0 : raise ValueError ( 'Reference velocities must be positive.' ) if est_velocities . size > 0 and np . min ( est_velocities ) < 0 : raise ValueError ( 'Estimated velocities must be positive.' )
Checks that the input annotations have valid time intervals pitches and velocities and throws helpful errors if not .
254
22
19,890
def match_notes ( ref_intervals , ref_pitches , ref_velocities , est_intervals , est_pitches , est_velocities , onset_tolerance = 0.05 , pitch_tolerance = 50.0 , offset_ratio = 0.2 , offset_min_tolerance = 0.05 , strict = False , velocity_tolerance = 0.1 ) : # Compute note matching as usual using standard transcription function matching = transcription . match_notes ( ref_intervals , ref_pitches , est_intervals , est_pitches , onset_tolerance , pitch_tolerance , offset_ratio , offset_min_tolerance , strict ) # Rescale reference velocities to the range [0, 1] min_velocity , max_velocity = np . min ( ref_velocities ) , np . max ( ref_velocities ) # Make the smallest possible range 1 to avoid divide by zero velocity_range = max ( 1 , max_velocity - min_velocity ) ref_velocities = ( ref_velocities - min_velocity ) / float ( velocity_range ) # Convert matching list-of-tuples to array for fancy indexing matching = np . array ( matching ) # When there is no matching, return an empty list if matching . size == 0 : return [ ] # Grab velocities for matched notes ref_matched_velocities = ref_velocities [ matching [ : , 0 ] ] est_matched_velocities = est_velocities [ matching [ : , 1 ] ] # Find slope and intercept of line which produces best least-squares fit # between matched est and ref velocities slope , intercept = np . linalg . lstsq ( np . vstack ( [ est_matched_velocities , np . ones ( len ( est_matched_velocities ) ) ] ) . T , ref_matched_velocities ) [ 0 ] # Re-scale est velocities to match ref est_matched_velocities = slope * est_matched_velocities + intercept # Compute the absolute error of (rescaled) estimated velocities vs. # normalized reference velocities. Error will be in [0, 1] velocity_diff = np . abs ( est_matched_velocities - ref_matched_velocities ) # Check whether each error is within the provided tolerance velocity_within_tolerance = ( velocity_diff < velocity_tolerance ) # Only keep matches whose velocity was within the provided tolerance matching = matching [ velocity_within_tolerance ] # Convert back to list-of-tuple format matching = [ tuple ( _ ) for _ in matching ] return matching
Match notes taking note velocity into consideration .
592
8
19,891
def validate ( reference_beats , estimated_beats ) : # If reference or estimated beats are empty, # warn because metric will be 0 if reference_beats . size == 0 : warnings . warn ( "Reference beats are empty." ) if estimated_beats . size == 0 : warnings . warn ( "Estimated beats are empty." ) for beats in [ reference_beats , estimated_beats ] : util . validate_events ( beats , MAX_TIME )
Checks that the input annotations to a metric look like valid beat time arrays and throws helpful errors if not .
100
22
19,892
def _get_reference_beat_variations ( reference_beats ) : # Create annotations at twice the metric level interpolated_indices = np . arange ( 0 , reference_beats . shape [ 0 ] - .5 , .5 ) original_indices = np . arange ( 0 , reference_beats . shape [ 0 ] ) double_reference_beats = np . interp ( interpolated_indices , original_indices , reference_beats ) # Return metric variations: # True, off-beat, double tempo, half tempo odd, and half tempo even return ( reference_beats , double_reference_beats [ 1 : : 2 ] , double_reference_beats , reference_beats [ : : 2 ] , reference_beats [ 1 : : 2 ] )
Return metric variations of the reference beats
175
7
19,893
def f_measure ( reference_beats , estimated_beats , f_measure_threshold = 0.07 ) : validate ( reference_beats , estimated_beats ) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats . size == 0 or reference_beats . size == 0 : return 0. # Compute the best-case matching between reference and estimated locations matching = util . match_events ( reference_beats , estimated_beats , f_measure_threshold ) precision = float ( len ( matching ) ) / len ( estimated_beats ) recall = float ( len ( matching ) ) / len ( reference_beats ) return util . f_measure ( precision , recall )
Compute the F - measure of correct vs incorrectly predicted beats . Correctness is determined over a small window .
163
22
19,894
def cemgil ( reference_beats , estimated_beats , cemgil_sigma = 0.04 ) : validate ( reference_beats , estimated_beats ) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats . size == 0 or reference_beats . size == 0 : return 0. , 0. # We'll compute Cemgil's accuracy for each variation accuracies = [ ] for reference_beats in _get_reference_beat_variations ( reference_beats ) : accuracy = 0 # Cycle through beats for beat in reference_beats : # Find the error for the closest beat to the reference beat beat_diff = np . min ( np . abs ( beat - estimated_beats ) ) # Add gaussian error into the accuracy accuracy += np . exp ( - ( beat_diff ** 2 ) / ( 2.0 * cemgil_sigma ** 2 ) ) # Normalize the accuracy accuracy /= .5 * ( estimated_beats . shape [ 0 ] + reference_beats . shape [ 0 ] ) # Add it to our list of accuracy scores accuracies . append ( accuracy ) # Return raw accuracy with non-varied annotations # and maximal accuracy across all variations return accuracies [ 0 ] , np . max ( accuracies )
Cemgil s score computes a gaussian error of each estimated beat . Compares against the original beat times and all metrical variations .
282
29
19,895
def goto ( reference_beats , estimated_beats , goto_threshold = 0.35 , goto_mu = 0.2 , goto_sigma = 0.2 ) : validate ( reference_beats , estimated_beats ) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats . size == 0 or reference_beats . size == 0 : return 0. # Error for each beat beat_error = np . ones ( reference_beats . shape [ 0 ] ) # Flag for whether the reference and estimated beats are paired paired = np . zeros ( reference_beats . shape [ 0 ] ) # Keep track of Goto's three criteria goto_criteria = 0 for n in range ( 1 , reference_beats . shape [ 0 ] - 1 ) : # Get previous inner-reference-beat-interval previous_interval = 0.5 * ( reference_beats [ n ] - reference_beats [ n - 1 ] ) # Window start - in the middle of the current beat and the previous window_min = reference_beats [ n ] - previous_interval # Next inter-reference-beat-interval next_interval = 0.5 * ( reference_beats [ n + 1 ] - reference_beats [ n ] ) # Window end - in the middle of the current beat and the next window_max = reference_beats [ n ] + next_interval # Get estimated beats in the window beats_in_window = np . logical_and ( ( estimated_beats >= window_min ) , ( estimated_beats < window_max ) ) # False negative/positive if beats_in_window . sum ( ) == 0 or beats_in_window . sum ( ) > 1 : paired [ n ] = 0 beat_error [ n ] = 1 else : # Single beat is paired! paired [ n ] = 1 # Get offset of the estimated beat and the reference beat offset = estimated_beats [ beats_in_window ] - reference_beats [ n ] # Scale by previous or next interval if offset < 0 : beat_error [ n ] = offset / previous_interval else : beat_error [ n ] = offset / next_interval # Get indices of incorrect beats incorrect_beats = np . flatnonzero ( np . abs ( beat_error ) > goto_threshold ) # All beats are correct (first and last will be 0 so always correct) if incorrect_beats . shape [ 0 ] < 3 : # Get the track of correct beats track = beat_error [ incorrect_beats [ 0 ] + 1 : incorrect_beats [ - 1 ] - 1 ] goto_criteria = 1 else : # Get the track of maximal length track_len = np . max ( np . diff ( incorrect_beats ) ) track_start = np . flatnonzero ( np . diff ( incorrect_beats ) == track_len ) [ 0 ] # Is the track length at least 25% of the song? if track_len - 1 > .25 * ( reference_beats . shape [ 0 ] - 2 ) : goto_criteria = 1 start_beat = incorrect_beats [ track_start ] end_beat = incorrect_beats [ track_start + 1 ] track = beat_error [ start_beat : end_beat + 1 ] # If we have a track if goto_criteria : # Are mean and std of the track less than the required thresholds? if np . mean ( np . abs ( track ) ) < goto_mu and np . std ( track , ddof = 1 ) < goto_sigma : goto_criteria = 3 # If all criteria are met, score is 100%! return 1.0 * ( goto_criteria == 3 )
Calculate Goto s score a binary 1 or 0 depending on some specific heuristic criteria
815
19
19,896
def p_score ( reference_beats , estimated_beats , p_score_threshold = 0.2 ) : validate ( reference_beats , estimated_beats ) # Warn when only one beat is provided for either estimated or reference, # report a warning if reference_beats . size == 1 : warnings . warn ( "Only one reference beat was provided, so beat intervals" " cannot be computed." ) if estimated_beats . size == 1 : warnings . warn ( "Only one estimated beat was provided, so beat intervals" " cannot be computed." ) # When estimated or reference beats have <= 1 beats, can't compute the # metric, so return 0 if estimated_beats . size <= 1 or reference_beats . size <= 1 : return 0. # Quantize beats to 10ms sampling_rate = int ( 1.0 / 0.010 ) # Shift beats so that the minimum in either sequence is zero offset = min ( estimated_beats . min ( ) , reference_beats . min ( ) ) estimated_beats = np . array ( estimated_beats - offset ) reference_beats = np . array ( reference_beats - offset ) # Get the largest time index end_point = np . int ( np . ceil ( np . max ( [ np . max ( estimated_beats ) , np . max ( reference_beats ) ] ) ) ) # Make impulse trains with impulses at beat locations reference_train = np . zeros ( end_point * sampling_rate + 1 ) beat_indices = np . ceil ( reference_beats * sampling_rate ) . astype ( np . int ) reference_train [ beat_indices ] = 1.0 estimated_train = np . zeros ( end_point * sampling_rate + 1 ) beat_indices = np . ceil ( estimated_beats * sampling_rate ) . astype ( np . int ) estimated_train [ beat_indices ] = 1.0 # Window size to take the correlation over # defined as .2*median(inter-annotation-intervals) annotation_intervals = np . diff ( np . flatnonzero ( reference_train ) ) win_size = int ( np . round ( p_score_threshold * np . median ( annotation_intervals ) ) ) # Get full correlation train_correlation = np . correlate ( reference_train , estimated_train , 'full' ) # Get the middle element - note we are rounding down on purpose here middle_lag = train_correlation . shape [ 0 ] // 2 # Truncate to only valid lags (those corresponding to the window) start = middle_lag - win_size end = middle_lag + win_size + 1 train_correlation = train_correlation [ start : end ] # Compute and return the P-score n_beats = np . max ( [ estimated_beats . shape [ 0 ] , reference_beats . shape [ 0 ] ] ) return np . sum ( train_correlation ) / n_beats
Get McKinney s P - score . Based on the autocorrelation of the reference and estimated beats
657
21
19,897
def information_gain ( reference_beats , estimated_beats , bins = 41 ) : validate ( reference_beats , estimated_beats ) # If an even number of bins is provided, # there will be no bin centered at zero, so warn the user. if not bins % 2 : warnings . warn ( "bins parameter is even, " "so there will not be a bin centered at zero." ) # Warn when only one beat is provided for either estimated or reference, # report a warning if reference_beats . size == 1 : warnings . warn ( "Only one reference beat was provided, so beat intervals" " cannot be computed." ) if estimated_beats . size == 1 : warnings . warn ( "Only one estimated beat was provided, so beat intervals" " cannot be computed." ) # When estimated or reference beats have <= 1 beats, can't compute the # metric, so return 0 if estimated_beats . size <= 1 or reference_beats . size <= 1 : return 0. # Get entropy for reference beats->estimated beats # and estimated beats->reference beats forward_entropy = _get_entropy ( reference_beats , estimated_beats , bins ) backward_entropy = _get_entropy ( estimated_beats , reference_beats , bins ) # Pick the larger of the entropies norm = np . log2 ( bins ) if forward_entropy > backward_entropy : # Note that the beat evaluation toolbox does not normalize information_gain_score = ( norm - forward_entropy ) / norm else : information_gain_score = ( norm - backward_entropy ) / norm return information_gain_score
Get the information gain - K - L divergence of the beat error histogram to a uniform histogram
357
20
19,898
def index_labels ( labels , case_sensitive = False ) : label_to_index = { } index_to_label = { } # If we're not case-sensitive, if not case_sensitive : labels = [ str ( s ) . lower ( ) for s in labels ] # First, build the unique label mapping for index , s in enumerate ( sorted ( set ( labels ) ) ) : label_to_index [ s ] = index index_to_label [ index ] = s # Remap the labels to indices indices = [ label_to_index [ s ] for s in labels ] # Return the converted labels, and the inverse mapping return indices , index_to_label
Convert a list of string identifiers into numerical indices .
148
11
19,899
def intervals_to_samples ( intervals , labels , offset = 0 , sample_size = 0.1 , fill_value = None ) : # Round intervals to the sample size num_samples = int ( np . floor ( intervals . max ( ) / sample_size ) ) sample_indices = np . arange ( num_samples , dtype = np . float32 ) sample_times = ( sample_indices * sample_size + offset ) . tolist ( ) sampled_labels = interpolate_intervals ( intervals , labels , sample_times , fill_value ) return sample_times , sampled_labels
Convert an array of labeled time intervals to annotated samples .
136
13