idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
26,000 | def load_config ( self , config ) : self . config = copy_config ( config , self . mutable_config_keys ) if 'cookiejar_cookies' in config [ 'state' ] : self . cookies = CookieManager . from_cookie_list ( config [ 'state' ] [ 'cookiejar_cookies' ] ) | Configure grab instance with external config object . |
26,001 | def setup ( self , ** kwargs ) : for key in kwargs : if key not in self . config . keys ( ) : raise error . GrabMisuseError ( 'Unknown option: %s' % key ) if 'url' in kwargs : if self . config . get ( 'url' ) : kwargs [ 'url' ] = self . make_url_absolute ( kwargs [ 'url' ] ) self . config . update ( kwargs ) | Setting up Grab instance configuration . |
26,002 | def download ( self , url , location , ** kwargs ) : doc = self . go ( url , ** kwargs ) with open ( location , 'wb' ) as out : out . write ( doc . body ) return len ( doc . body ) | Fetch document located at url and save to to location . |
26,003 | def prepare_request ( self , ** kwargs ) : if self . transport is None : self . setup_transport ( self . transport_param ) self . reset ( ) self . request_counter = next ( REQUEST_COUNTER ) if kwargs : self . setup ( ** kwargs ) if self . proxylist . size ( ) and self . config [ 'proxy_auto_change' ] : self . change_proxy ( ) self . request_method = self . detect_request_method ( ) self . transport . process_config ( self ) | Configure all things to make real network request . This method is called before doing real request via transport extension . |
26,004 | def log_request ( self , extra = '' ) : thread_name = threading . currentThread ( ) . getName ( ) . lower ( ) if thread_name == 'mainthread' : thread_name = '' else : thread_name = '-%s' % thread_name if self . config [ 'proxy' ] : if self . config [ 'proxy_userpwd' ] : auth = ' with authorization' else : auth = '' proxy_info = ' via %s proxy of type %s%s' % ( self . config [ 'proxy' ] , self . config [ 'proxy_type' ] , auth ) else : proxy_info = '' if extra : extra = '[%s] ' % extra logger_network . debug ( '[%s%s] %s%s %s%s' , ( '%02d' % self . request_counter if self . request_counter is not None else 'NA' ) , thread_name , extra , self . request_method or 'GET' , self . config [ 'url' ] , proxy_info ) | Send request details to logging system . |
26,005 | def request ( self , ** kwargs ) : self . prepare_request ( ** kwargs ) refresh_count = 0 while True : self . log_request ( ) try : self . transport . request ( ) except error . GrabError as ex : self . exception = ex self . reset_temporary_options ( ) if self . config [ 'log_dir' ] : self . save_failed_dump ( ) raise else : doc = self . process_request_result ( ) if self . config [ 'follow_location' ] : if doc . code in ( 301 , 302 , 303 , 307 , 308 ) : if doc . headers . get ( 'Location' ) : refresh_count += 1 if refresh_count > self . config [ 'redirect_limit' ] : raise error . GrabTooManyRedirectsError ( ) else : url = doc . headers . get ( 'Location' ) self . prepare_request ( url = self . make_url_absolute ( url ) , referer = None ) continue if self . config [ 'follow_refresh' ] : refresh_url = self . doc . get_meta_refresh_url ( ) if refresh_url is not None : refresh_count += 1 if refresh_count > self . config [ 'redirect_limit' ] : raise error . GrabTooManyRedirectsError ( ) else : self . prepare_request ( url = self . make_url_absolute ( refresh_url ) , referer = None ) continue return doc | Perform network request . |
26,006 | def submit ( self , make_request = True , ** kwargs ) : result = self . doc . get_form_request ( ** kwargs ) if result [ 'multipart_post' ] : self . setup ( multipart_post = result [ 'multipart_post' ] ) if result [ 'post' ] : self . setup ( post = result [ 'post' ] ) if result [ 'url' ] : self . setup ( url = result [ 'url' ] ) if make_request : return self . request ( ) else : return None | Submit current form . |
26,007 | def process_request_result ( self , prepare_response_func = None ) : now = datetime . utcnow ( ) if self . config [ 'debug_post' ] : post = self . config [ 'post' ] or self . config [ 'multipart_post' ] if isinstance ( post , dict ) : post = list ( post . items ( ) ) if post : if isinstance ( post , six . string_types ) : post = make_str ( post [ : self . config [ 'debug_post_limit' ] ] , errors = 'ignore' ) + b'...' else : items = normalize_http_values ( post , charset = self . config [ 'charset' ] ) new_items = [ ] for key , value in items : if len ( value ) > self . config [ 'debug_post_limit' ] : value = value [ : self . config [ 'debug_post_limit' ] ] + b'...' else : value = value new_items . append ( ( key , value ) ) post = '\n' . join ( '%-25s: %s' % x for x in new_items ) if post : logger_network . debug ( '[%02d] POST request:\n%s\n' , self . request_counter , post ) self . reset_temporary_options ( ) if prepare_response_func : self . doc = prepare_response_func ( self . transport , self ) else : self . doc = self . transport . prepare_response ( self ) self . doc . process_grab ( self ) if self . config [ 'reuse_cookies' ] : self . cookies . update ( self . doc . cookies ) self . doc . timestamp = now self . config [ 'charset' ] = self . doc . charset if self . config [ 'log_file' ] : with open ( self . config [ 'log_file' ] , 'wb' ) as out : out . write ( self . doc . body ) if self . config [ 'cookiefile' ] : self . cookies . save_to_file ( self . config [ 'cookiefile' ] ) if self . config [ 'reuse_referer' ] : self . config [ 'referer' ] = self . doc . url self . copy_request_data ( ) if self . config [ 'log_dir' ] : self . save_dumps ( ) return self . doc | Process result of real request performed via transport extension . |
26,008 | def save_failed_dump ( self ) : try : if ( self . transport . __class__ . __name__ == 'Urllib3Transport' and not getattr ( self . transport , '_response' , None ) ) : self . doc = None else : self . doc = self . transport . prepare_response ( self ) self . copy_request_data ( ) self . save_dumps ( ) except Exception as ex : logger . error ( '' , exc_info = ex ) | Save dump of failed request for debugging . |
26,009 | def setup_document ( self , content , ** kwargs ) : self . reset ( ) if isinstance ( content , six . text_type ) : raise error . GrabMisuseError ( 'Method `setup_document` accepts only ' 'byte string in `content` argument.' ) doc = Document ( grab = self ) doc . body = content doc . status = '' doc . head = b'HTTP/1.1 200 OK\r\n\r\n' doc . parse ( charset = kwargs . get ( 'document_charset' ) ) doc . code = 200 doc . total_time = 0 doc . connect_time = 0 doc . name_lookup_time = 0 doc . url = '' for key , value in kwargs . items ( ) : setattr ( doc , key , value ) self . doc = doc | Setup response object without real network requests . |
26,010 | def change_proxy ( self , random = True ) : if self . proxylist . size ( ) : if random : proxy = self . proxylist . get_random_proxy ( ) else : proxy = self . proxylist . get_next_proxy ( ) self . setup ( proxy = proxy . get_address ( ) , proxy_userpwd = proxy . get_userpwd ( ) , proxy_type = proxy . proxy_type ) else : logger . debug ( 'Proxy list is empty' ) | Set random proxy from proxylist . |
26,011 | def make_url_absolute ( self , url , resolve_base = False ) : if self . config [ 'url' ] : if resolve_base : ubody = self . doc . unicode_body ( ) base_url = find_base_url ( ubody ) if base_url : return urljoin ( base_url , url ) return urljoin ( self . config [ 'url' ] , url ) else : return url | Make url absolute using previous request url as base url . |
26,012 | def detect_request_method ( self ) : method = self . config [ 'method' ] if method : method = method . upper ( ) else : if self . config [ 'post' ] or self . config [ 'multipart_post' ] : method = 'POST' else : method = 'GET' return method | Analyze request config and find which request method will be used . |
26,013 | def create_cookie ( name , value , domain , httponly = None , ** kwargs ) : if domain == 'localhost' : domain = '' config = dict ( name = name , value = value , version = 0 , port = None , domain = domain , path = '/' , secure = False , expires = None , discard = True , comment = None , comment_url = None , rfc2109 = False , rest = { 'HttpOnly' : httponly } , ) for key in kwargs : if key not in config : raise GrabMisuseError ( 'Function `create_cookie` does not accept ' '`%s` argument' % key ) config . update ( ** kwargs ) config [ 'rest' ] [ 'HttpOnly' ] = httponly config [ 'port_specified' ] = bool ( config [ 'port' ] ) config [ 'domain_specified' ] = bool ( config [ 'domain' ] ) config [ 'domain_initial_dot' ] = ( config [ 'domain' ] or '' ) . startswith ( '.' ) config [ 'path_specified' ] = bool ( config [ 'path' ] ) return Cookie ( ** config ) | Creates cookielib . Cookie instance |
26,014 | def set ( self , name , value , domain , ** kwargs ) : if domain == 'localhost' : domain = '' self . cookiejar . set_cookie ( create_cookie ( name , value , domain , ** kwargs ) ) | Add new cookie or replace existing cookie with same parameters . |
26,015 | def load_from_file ( self , path ) : with open ( path ) as inf : data = inf . read ( ) if data : items = json . loads ( data ) else : items = { } for item in items : extra = dict ( ( x , y ) for x , y in item . items ( ) if x not in [ 'name' , 'value' , 'domain' ] ) self . set ( item [ 'name' ] , item [ 'value' ] , item [ 'domain' ] , ** extra ) | Load cookies from the file . |
26,016 | def save_to_file ( self , path ) : with open ( path , 'w' ) as out : out . write ( json . dumps ( self . get_dict ( ) ) ) | Dump all cookies to file . |
26,017 | def process_service_result ( self , result , task , meta = None ) : if meta is None : meta = { } if isinstance ( result , Task ) : if meta . get ( 'source' ) == 'cache_reader' : self . spider . add_task ( result , queue = self . spider . task_queue ) else : self . spider . add_task ( result ) elif result is None : pass elif isinstance ( result , ResponseNotValid ) : self . spider . add_task ( task . clone ( refresh_cache = True ) ) error_code = result . __class__ . __name__ . replace ( '_' , '-' ) self . spider . stat . inc ( 'integrity:%s' % error_code ) elif isinstance ( result , Exception ) : if task : handler = self . spider . find_task_handler ( task ) handler_name = getattr ( handler , '__name__' , 'NONE' ) else : handler_name = 'NA' self . spider . process_parser_error ( handler_name , task , meta [ 'exc_info' ] , ) if isinstance ( result , FatalError ) : self . spider . fatal_error_queue . put ( meta [ 'exc_info' ] ) elif isinstance ( result , dict ) and 'grab' in result : if ( self . spider . cache_writer_service and not result . get ( 'from_cache' ) and result [ 'ok' ] ) : self . spider . cache_writer_service . input_queue . put ( ( task , result [ 'grab' ] ) ) self . spider . log_network_result_stats ( result , task ) is_valid = False if task . get ( 'raw' ) : is_valid = True elif result [ 'ok' ] : res_code = result [ 'grab' ] . doc . code is_valid = self . spider . is_valid_network_response_code ( res_code , task ) if is_valid : self . spider . parser_service . input_queue . put ( ( result , task ) ) else : self . spider . log_failed_network_result ( result ) if self . spider . network_try_limit > 0 : task . refresh_cache = True task . setup_grab_config ( result [ 'grab_config_backup' ] ) self . spider . add_task ( task ) if result . get ( 'from_cache' ) : self . spider . stat . inc ( 'spider:task-%s-cache' % task . name ) self . spider . stat . inc ( 'spider:request' ) else : raise SpiderError ( 'Unknown result received from a service: %s' % result ) | Process result submitted from any service to task dispatcher service . |
26,018 | def find_link ( self , href_pattern , make_absolute = True ) : if make_absolute : self . tree . make_links_absolute ( self . doc . url ) if isinstance ( href_pattern , six . text_type ) : raise GrabMisuseError ( 'Method `find_link` accepts only ' 'byte-string argument' ) href_pattern = make_unicode ( href_pattern ) for elem , _ , link , _ in self . tree . iterlinks ( ) : if elem . tag == 'a' and href_pattern in link : return link return None | Find link in response body which href value matches href_pattern . |
26,019 | def find_link_rex ( self , rex , make_absolute = True ) : if make_absolute : self . tree . make_links_absolute ( self . doc . url ) for elem , _ , link , _ in self . tree . iterlinks ( ) : if elem . tag == 'a' : match = rex . search ( link ) if match : return link return None | Find link matched the given regular expression in response body . |
26,020 | def css_one ( self , path , default = NULL ) : try : return self . css_list ( path ) [ 0 ] except IndexError : if default is NULL : raise DataNotFound ( 'CSS path not found: %s' % path ) else : return default | Get first element which matches the given css path or raise DataNotFound . |
26,021 | def css_text ( self , path , default = NULL , smart = False , normalize_space = True ) : try : return get_node_text ( self . css_one ( path ) , smart = smart , normalize_space = normalize_space ) except IndexError : if default is NULL : raise else : return default | Get normalized text of node which matches the css path . |
26,022 | def css_number ( self , path , default = NULL , ignore_spaces = False , smart = False , make_int = True ) : try : text = self . css_text ( path , smart = smart ) return find_number ( text , ignore_spaces = ignore_spaces , make_int = make_int ) except IndexError : if default is NULL : raise else : return default | Find number in normalized text of node which matches the given css path . |
26,023 | def strip_tags ( self , content , smart = False ) : from lxml . html import fromstring return get_node_text ( fromstring ( content ) , smart = smart ) | Strip tags from the HTML content . |
26,024 | def camel_case_to_underscore ( name ) : res = RE_TOKEN1 . sub ( r'\1_\2' , name ) res = RE_TOKEN2 . sub ( r'\1_\2' , res ) return res . lower ( ) | Converts camel_case into CamelCase |
26,025 | def read_bom ( data ) : if data and data [ 0 ] in _FIRST_CHARS : for bom , encoding in _BOM_TABLE : if data . startswith ( bom ) : return encoding , bom return None , None | Read the byte order mark in the text if present and return the encoding represented by the BOM and the BOM . |
26,026 | def parse ( self , charset = None , headers = None ) : if headers : self . headers = headers else : if self . head : responses = self . head . rsplit ( b'\nHTTP/' , 1 ) _ , response = responses [ - 1 ] . split ( b'\n' , 1 ) response = response . decode ( 'utf-8' , 'ignore' ) else : response = u'' if six . PY2 : response = response . encode ( 'utf-8' ) self . headers = email . message_from_string ( response ) if charset is None : if isinstance ( self . body , six . text_type ) : self . charset = 'utf-8' else : self . detect_charset ( ) else : self . charset = charset . lower ( ) self . _unicode_body = None | Parse headers . |
26,027 | def detect_charset ( self ) : charset = None body_chunk = self . get_body_chunk ( ) if body_chunk : match_charset = RE_META_CHARSET . search ( body_chunk ) if match_charset : charset = match_charset . group ( 1 ) else : match_charset_html5 = RE_META_CHARSET_HTML5 . search ( body_chunk ) if match_charset_html5 : charset = match_charset_html5 . group ( 1 ) bom_enc , bom = read_bom ( body_chunk ) if bom_enc : charset = bom_enc self . bom = bom if not charset : if body_chunk . startswith ( b'<?xml' ) : match = RE_XML_DECLARATION . search ( body_chunk ) if match : enc_match = RE_DECLARATION_ENCODING . search ( match . group ( 0 ) ) if enc_match : charset = enc_match . group ( 1 ) if not charset : if 'Content-Type' in self . headers : pos = self . headers [ 'Content-Type' ] . find ( 'charset=' ) if pos > - 1 : charset = self . headers [ 'Content-Type' ] [ ( pos + 8 ) : ] if charset : charset = charset . lower ( ) if not isinstance ( charset , str ) : charset = charset . decode ( 'utf-8' ) try : codecs . lookup ( charset ) except LookupError : logger . debug ( 'Unknown charset found: %s.' ' Using utf-8 istead.' , charset ) self . charset = 'utf-8' else : self . charset = charset | Detect charset of the response . |
26,028 | def copy ( self , new_grab = None ) : obj = self . __class__ ( ) obj . process_grab ( new_grab if new_grab else self . grab ) copy_keys = ( 'status' , 'code' , 'head' , 'body' , 'total_time' , 'connect_time' , 'name_lookup_time' , 'url' , 'charset' , '_unicode_body' , '_grab_config' ) for key in copy_keys : setattr ( obj , key , getattr ( self , key ) ) obj . headers = copy ( self . headers ) obj . cookies = copy ( self . cookies ) return obj | Clone the Response object . |
26,029 | def save ( self , path ) : path_dir = os . path . split ( path ) [ 0 ] if not os . path . exists ( path_dir ) : try : os . makedirs ( path_dir ) except OSError : pass with open ( path , 'wb' ) as out : out . write ( self . _bytes_body if self . _bytes_body is not None else b'' ) | Save response body to file . |
26,030 | def save_hash ( self , location , basedir , ext = None ) : if isinstance ( location , six . text_type ) : location = location . encode ( 'utf-8' ) rel_path = hashed_path ( location , ext = ext ) path = os . path . join ( basedir , rel_path ) if not os . path . exists ( path ) : path_dir , _ = os . path . split ( path ) try : os . makedirs ( path_dir ) except OSError : pass with open ( path , 'wb' ) as out : out . write ( self . _bytes_body ) return rel_path | Save response body into file with special path builded from hash . That allows to lower number of files per directory . |
26,031 | def json ( self ) : if six . PY3 : return json . loads ( self . body . decode ( self . charset ) ) else : return json . loads ( self . body ) | Return response body deserialized into JSON object . |
26,032 | def browse ( self ) : _ , path = tempfile . mkstemp ( ) self . save ( path ) webbrowser . open ( 'file://' + path ) | Save response in temporary file and open it in GUI browser . |
26,033 | def text_search ( self , anchor , byte = False ) : if isinstance ( anchor , six . text_type ) : if byte : raise GrabMisuseError ( 'The anchor should be bytes string in ' 'byte mode' ) else : return anchor in self . unicode_body ( ) if not isinstance ( anchor , six . text_type ) : if byte : return anchor in self . body else : raise GrabMisuseError ( 'The anchor should be byte string in ' 'non-byte mode' ) | Search the substring in response body . |
26,034 | def text_assert ( self , anchor , byte = False ) : if not self . text_search ( anchor , byte = byte ) : raise DataNotFound ( u'Substring not found: %s' % anchor ) | If anchor is not found then raise DataNotFound exception . |
26,035 | def text_assert_any ( self , anchors , byte = False ) : found = False for anchor in anchors : if self . text_search ( anchor , byte = byte ) : found = True break if not found : raise DataNotFound ( u'Substrings not found: %s' % ', ' . join ( anchors ) ) | If no anchors were found then raise DataNotFound exception . |
26,036 | def rex_text ( self , regexp , flags = 0 , byte = False , default = NULL ) : try : match = self . rex_search ( regexp , flags = flags , byte = byte ) except DataNotFound : if default is NULL : raise DataNotFound ( 'Regexp not found' ) else : return default else : return normalize_space ( decode_entities ( match . group ( 1 ) ) ) | Search regular expression in response body and return content of first matching group . |
26,037 | def rex_search ( self , regexp , flags = 0 , byte = False , default = NULL ) : regexp = normalize_regexp ( regexp , flags ) match = None if byte : if not isinstance ( regexp . pattern , six . text_type ) or not six . PY3 : match = regexp . search ( self . body ) else : if isinstance ( regexp . pattern , six . text_type ) or not six . PY3 : ubody = self . unicode_body ( ) match = regexp . search ( ubody ) if match : return match else : if default is NULL : raise DataNotFound ( 'Could not find regexp: %s' % regexp ) else : return default | Search the regular expression in response body . |
26,038 | def rex_assert ( self , rex , byte = False ) : self . rex_search ( rex , byte = byte ) | If rex expression is not found then raise DataNotFound exception . |
26,039 | def pyquery ( self ) : if not self . _pyquery : from pyquery import PyQuery self . _pyquery = PyQuery ( self . tree ) return self . _pyquery | Returns pyquery handler . |
26,040 | def unicode_body ( self , ignore_errors = True , fix_special_entities = True ) : if not self . _unicode_body : self . _unicode_body = self . convert_body_to_unicode ( body = self . body , bom = self . bom , charset = self . charset , ignore_errors = ignore_errors , fix_special_entities = fix_special_entities , ) return self . _unicode_body | Return response body as unicode string . |
26,041 | def choose_form ( self , number = None , xpath = None , name = None , ** kwargs ) : id_ = kwargs . pop ( 'id' , None ) if id_ is not None : try : self . _lxml_form = self . select ( '//form[@id="%s"]' % id_ ) . node ( ) except IndexError : raise DataNotFound ( "There is no form with id: %s" % id_ ) elif name is not None : try : self . _lxml_form = self . select ( '//form[@name="%s"]' % name ) . node ( ) except IndexError : raise DataNotFound ( 'There is no form with name: %s' % name ) elif number is not None : try : self . _lxml_form = self . tree . forms [ number ] except IndexError : raise DataNotFound ( 'There is no form with number: %s' % number ) elif xpath is not None : try : self . _lxml_form = self . select ( xpath ) . node ( ) except IndexError : raise DataNotFound ( 'Could not find form with xpath: %s' % xpath ) else : raise GrabMisuseError ( 'choose_form methods requires one of ' '[number, id, name, xpath] arguments' ) | Set the default form . |
26,042 | def form ( self ) : if self . _lxml_form is None : forms = [ ( idx , len ( list ( x . fields ) ) ) for idx , x in enumerate ( self . tree . forms ) ] if forms : idx = sorted ( forms , key = lambda x : x [ 1 ] , reverse = True ) [ 0 ] [ 0 ] self . choose_form ( idx ) else : raise DataNotFound ( 'Response does not contains any form' ) return self . _lxml_form | This attribute points to default form . |
26,043 | def set_input ( self , name , value ) : if self . _lxml_form is None : self . choose_form_by_element ( './/*[@name="%s"]' % name ) elem = self . form . inputs [ name ] processed = False if getattr ( elem , 'type' , None ) == 'checkbox' : if isinstance ( value , bool ) : elem . checked = value processed = True if not processed : if getattr ( elem , 'type' , '' ) . lower ( ) == 'file' : self . _file_fields [ name ] = value elem . value = '' else : elem . value = value | Set the value of form element by its name attribute . |
26,044 | def set_input_by_id ( self , _id , value ) : xpath = './/*[@id="%s"]' % _id if self . _lxml_form is None : self . choose_form_by_element ( xpath ) sel = XpathSelector ( self . form ) elem = sel . select ( xpath ) . node ( ) return self . set_input ( elem . get ( 'name' ) , value ) | Set the value of form element by its id attribute . |
26,045 | def set_input_by_number ( self , number , value ) : sel = XpathSelector ( self . form ) elem = sel . select ( './/input[@type="text"]' ) [ number ] . node ( ) return self . set_input ( elem . get ( 'name' ) , value ) | Set the value of form element by its number in the form |
26,046 | def set_input_by_xpath ( self , xpath , value ) : elem = self . select ( xpath ) . node ( ) if self . _lxml_form is None : parent = elem while True : parent = parent . getparent ( ) if parent . tag == 'form' : self . _lxml_form = parent break return self . set_input ( elem . get ( 'name' ) , value ) | Set the value of form element by xpath |
26,047 | def get_form_request ( self , submit_name = None , url = None , extra_post = None , remove_from_post = None ) : post = self . form_fields ( ) submit_controls = { } for elem in self . form . inputs : if ( elem . tag == 'input' and elem . type == 'submit' and elem . get ( 'name' ) is not None ) : submit_controls [ elem . name ] = elem if submit_controls : if submit_name is None or submit_name not in submit_controls : controls = sorted ( submit_controls . values ( ) , key = lambda x : x . name ) submit_name = controls [ 0 ] . name for name in submit_controls : if name != submit_name : if name in post : del post [ name ] if url : action_url = urljoin ( self . url , url ) else : action_url = urljoin ( self . url , self . form . action ) if self . form . method == 'POST' : if 'multipart' in self . form . get ( 'enctype' , '' ) : for key , obj in self . _file_fields . items ( ) : post [ key ] = obj post_items = list ( post . items ( ) ) del post if extra_post : if isinstance ( extra_post , dict ) : extra_post_items = extra_post . items ( ) else : extra_post_items = extra_post keys_to_drop = set ( [ x for x , y in extra_post_items ] ) for key in keys_to_drop : post_items = [ ( x , y ) for x , y in post_items if x != key ] for key , value in extra_post_items : post_items . append ( ( key , value ) ) if remove_from_post : post_items = [ ( x , y ) for x , y in post_items if x not in remove_from_post ] result = { 'multipart_post' : None , 'post' : None , 'url' : None , } if self . form . method == 'POST' : if 'multipart' in self . form . get ( 'enctype' , '' ) : result [ 'multipart_post' ] = post_items else : result [ 'post' ] = post_items result [ 'url' ] = action_url else : url = action_url . split ( '?' ) [ 0 ] + '?' + smart_urlencode ( post_items ) result [ 'url' ] = url return result | Submit default form . |
26,048 | def form_fields ( self ) : fields = dict ( self . form . fields ) fields_to_remove = set ( ) for key , val in list ( fields . items ( ) ) : if isinstance ( val , CheckboxValues ) : if not len ( val ) : del fields [ key ] elif len ( val ) == 1 : fields [ key ] = val . pop ( ) else : fields [ key ] = list ( val ) if isinstance ( val , MultipleSelectOptions ) : if not len ( val ) : del fields [ key ] elif len ( val ) == 1 : fields [ key ] = val . pop ( ) else : fields [ key ] = list ( val ) for elem in self . form . inputs : if not elem . get ( 'name' ) : continue if elem . get ( 'disabled' ) : if elem . name in fields : fields_to_remove . add ( elem . name ) elif getattr ( elem , 'type' , None ) == 'checkbox' : if not elem . checked : if elem . name is not None : if elem . name in fields and fields [ elem . name ] is None : fields_to_remove . add ( elem . name ) else : if elem . name in fields_to_remove : fields_to_remove . remove ( elem . name ) if elem . tag == 'select' : if elem . name in fields and fields [ elem . name ] is None : if elem . value_options : fields [ elem . name ] = elem . value_options [ 0 ] elif getattr ( elem , 'type' , None ) == 'radio' : if fields [ elem . name ] is None : fields [ elem . name ] = elem . get ( 'value' ) for fname in fields_to_remove : del fields [ fname ] return fields | Return fields of default form . |
26,049 | def add_params ( param_list_left , param_list_right ) : res = [ ] for x , y in zip ( param_list_left , param_list_right ) : res . append ( x + y ) return res | Add two lists of parameters one by one |
26,050 | def subtract_params ( param_list_left , param_list_right ) : res = [ ] for x , y in zip ( param_list_left , param_list_right ) : res . append ( x - y ) return res | Subtract two lists of parameters |
26,051 | def get_neutral ( array_list ) : res = [ ] for x in array_list : res . append ( np . zeros_like ( x ) ) return res | Get list of zero - valued numpy arrays for specified list of numpy arrays |
26,052 | def divide_by ( array_list , num_workers ) : for i , x in enumerate ( array_list ) : array_list [ i ] /= num_workers return array_list | Divide a list of parameters by an integer num_workers . |
26,053 | def start_flask_service ( self ) : app = Flask ( __name__ ) self . app = app @ app . route ( '/' ) def home ( ) : return 'Elephas' @ app . route ( '/parameters' , methods = [ 'GET' ] ) def handle_get_parameters ( ) : if self . mode == 'asynchronous' : self . lock . acquire_read ( ) self . pickled_weights = pickle . dumps ( self . weights , - 1 ) pickled_weights = self . pickled_weights if self . mode == 'asynchronous' : self . lock . release ( ) return pickled_weights @ app . route ( '/update' , methods = [ 'POST' ] ) def handle_update_parameters ( ) : delta = pickle . loads ( request . data ) if self . mode == 'asynchronous' : self . lock . acquire_write ( ) if not self . master_network . built : self . master_network . build ( ) weights_before = self . weights self . weights = subtract_params ( weights_before , delta ) if self . mode == 'asynchronous' : self . lock . release ( ) return 'Update done' master_url = determine_master ( self . port ) host = master_url . split ( ':' ) [ 0 ] self . app . run ( host = host , debug = self . debug , port = self . port , threaded = self . threaded , use_reloader = self . use_reloader ) | Define Flask parameter server service . |
26,054 | def to_matrix ( np_array ) : if len ( np_array . shape ) == 2 : return Matrices . dense ( np_array . shape [ 0 ] , np_array . shape [ 1 ] , np_array . ravel ( ) ) else : raise Exception ( "An MLLib Matrix can only be created from a two-dimensional " + "numpy array, got {}" . format ( len ( np_array . shape ) ) ) | Convert numpy array to MLlib Matrix |
26,055 | def to_vector ( np_array ) : if len ( np_array . shape ) == 1 : return Vectors . dense ( np_array ) else : raise Exception ( "An MLLib Vector can only be created from a one-dimensional " + "numpy array, got {}" . format ( len ( np_array . shape ) ) ) | Convert numpy array to MLlib Vector |
26,056 | def retrieve_keras_weights ( java_model ) : weights = [ ] layers = java_model . getLayers ( ) for layer in layers : params = layer . paramTable ( ) keys = params . keySet ( ) key_list = java_classes . ArrayList ( keys ) for key in key_list : weight = params . get ( key ) np_weight = np . squeeze ( to_numpy ( weight ) ) weights . append ( np_weight ) return weights | For a previously imported Keras model after training it with DL4J Spark we want to set the resulting weights back to the original Keras model . |
26,057 | def _fit ( self , df ) : simple_rdd = df_to_simple_rdd ( df , categorical = self . get_categorical_labels ( ) , nb_classes = self . get_nb_classes ( ) , features_col = self . getFeaturesCol ( ) , label_col = self . getLabelCol ( ) ) simple_rdd = simple_rdd . repartition ( self . get_num_workers ( ) ) keras_model = model_from_yaml ( self . get_keras_model_config ( ) ) metrics = self . get_metrics ( ) loss = self . get_loss ( ) optimizer = get_optimizer ( self . get_optimizer_config ( ) ) keras_model . compile ( loss = loss , optimizer = optimizer , metrics = metrics ) spark_model = SparkModel ( model = keras_model , mode = self . get_mode ( ) , frequency = self . get_frequency ( ) , num_workers = self . get_num_workers ( ) ) spark_model . fit ( simple_rdd , epochs = self . get_epochs ( ) , batch_size = self . get_batch_size ( ) , verbose = self . get_verbosity ( ) , validation_split = self . get_validation_split ( ) ) model_weights = spark_model . master_network . get_weights ( ) weights = simple_rdd . ctx . broadcast ( model_weights ) return ElephasTransformer ( labelCol = self . getLabelCol ( ) , outputCol = 'prediction' , keras_model_config = spark_model . master_network . to_yaml ( ) , weights = weights ) | Private fit method of the Estimator which trains the model . |
26,058 | def _transform ( self , df ) : output_col = self . getOutputCol ( ) label_col = self . getLabelCol ( ) new_schema = copy . deepcopy ( df . schema ) new_schema . add ( StructField ( output_col , StringType ( ) , True ) ) rdd = df . rdd . coalesce ( 1 ) features = np . asarray ( rdd . map ( lambda x : from_vector ( x . features ) ) . collect ( ) ) model = model_from_yaml ( self . get_keras_model_config ( ) ) model . set_weights ( self . weights . value ) predictions = rdd . ctx . parallelize ( model . predict_classes ( features ) ) . coalesce ( 1 ) predictions = predictions . map ( lambda x : tuple ( str ( x ) ) ) results_rdd = rdd . zip ( predictions ) . map ( lambda x : x [ 0 ] + x [ 1 ] ) results_df = df . sql_ctx . createDataFrame ( results_rdd , new_schema ) results_df = results_df . withColumn ( output_col , results_df [ output_col ] . cast ( DoubleType ( ) ) ) results_df = results_df . withColumn ( label_col , results_df [ label_col ] . cast ( DoubleType ( ) ) ) return results_df | Private transform method of a Transformer . This serves as batch - prediction method for our purposes . |
26,059 | def _from_numpy ( np_array ) : required_dtype = get_np_dtype ( get_context_dtype ( ) ) if np_array . dtype != required_dtype : raise Exception ( "{} is required, got {}" . format ( repr ( required_dtype ) , repr ( np_array . dtype ) ) ) if np_array . ndim == 1 : np_array = np . expand_dims ( np_array , 0 ) pointer_address , _ = np_array . __array_interface__ [ 'data' ] _refs . append ( np_array ) pointer = native_ops . pointerForAddress ( pointer_address ) size = np_array . size mapping = { np . float64 : DoublePointer , np . float32 : FloatPointer , } pointer = mapping [ required_dtype ] ( pointer ) buff = Nd4j . createBuffer ( pointer , size ) assert buff . address ( ) == pointer_address _refs . append ( buff ) elem_size = buff . getElementSize ( ) assert elem_size == np_array . dtype . itemsize strides = np_array . strides strides = [ dim / elem_size for dim in strides ] shape = np_array . shape nd4j_array = Nd4j . create ( buff , shape , strides , 0 ) assert buff . address ( ) == nd4j_array . data ( ) . address ( ) return nd4j_array | Convert numpy array to nd4j array |
26,060 | def _to_numpy ( nd4j_array ) : buff = nd4j_array . data ( ) address = buff . pointer ( ) . address ( ) dtype = get_context_dtype ( ) mapping = { 'double' : ctypes . c_double , 'float' : ctypes . c_float } Pointer = ctypes . POINTER ( mapping [ dtype ] ) pointer = ctypes . cast ( address , Pointer ) np_array = np . ctypeslib . as_array ( pointer , tuple ( nd4j_array . shape ( ) ) ) return np_array | Convert nd4j array to numpy array |
26,061 | def acquire_read ( self ) : self . monitor . acquire ( ) while self . rwlock < 0 or self . writers_waiting : self . readers_ok . wait ( ) self . rwlock += 1 self . monitor . release ( ) | Acquire a read lock . Several threads can hold this typeof lock . It is exclusive with write locks . |
26,062 | def acquire_write ( self ) : self . monitor . acquire ( ) while self . rwlock != 0 : self . writers_waiting += 1 self . writers_ok . wait ( ) self . writers_waiting -= 1 self . rwlock = - 1 self . monitor . release ( ) | Acquire a write lock . Only one thread can hold this lock and only when no read locks are also held . |
26,063 | def release ( self ) : self . monitor . acquire ( ) if self . rwlock < 0 : self . rwlock = 0 else : self . rwlock -= 1 wake_writers = self . writers_waiting and self . rwlock == 0 wake_readers = self . writers_waiting == 0 self . monitor . release ( ) if wake_writers : self . writers_ok . acquire ( ) self . writers_ok . notify ( ) self . writers_ok . release ( ) elif wake_readers : self . readers_ok . acquire ( ) self . readers_ok . notifyAll ( ) self . readers_ok . release ( ) | Release a lock whether read or write . |
26,064 | def to_data_frame ( sc , features , labels , categorical = False ) : lp_rdd = to_labeled_point ( sc , features , labels , categorical ) sql_context = SQLContext ( sc ) df = sql_context . createDataFrame ( lp_rdd ) return df | Convert numpy arrays of features and labels into Spark DataFrame |
26,065 | def from_data_frame ( df , categorical = False , nb_classes = None ) : lp_rdd = df . rdd . map ( lambda row : LabeledPoint ( row . label , row . features ) ) features , labels = from_labeled_point ( lp_rdd , categorical , nb_classes ) return features , labels | Convert DataFrame back to pair of numpy arrays |
26,066 | def df_to_simple_rdd ( df , categorical = False , nb_classes = None , features_col = 'features' , label_col = 'label' ) : sql_context = df . sql_ctx sql_context . registerDataFrameAsTable ( df , "temp_table" ) selected_df = sql_context . sql ( "SELECT {0} AS features, {1} as label from temp_table" . format ( features_col , label_col ) ) if isinstance ( selected_df . first ( ) . features , MLLibVector ) : lp_rdd = selected_df . rdd . map ( lambda row : LabeledPoint ( row . label , row . features ) ) else : lp_rdd = selected_df . rdd . map ( lambda row : LabeledPoint ( row . label , MLLibVectors . fromML ( row . features ) ) ) rdd = lp_to_simple_rdd ( lp_rdd , categorical , nb_classes ) return rdd | Convert DataFrame into RDD of pairs |
26,067 | def fit ( self , rdd , epochs = 10 , batch_size = 32 , verbose = 0 , validation_split = 0.1 ) : print ( '>>> Fit model' ) if self . num_workers : rdd = rdd . repartition ( self . num_workers ) if self . mode in [ 'asynchronous' , 'synchronous' , 'hogwild' ] : self . _fit ( rdd , epochs , batch_size , verbose , validation_split ) else : raise ValueError ( "Choose from one of the modes: asynchronous, synchronous or hogwild" ) | Train an elephas model on an RDD . The Keras model configuration as specified in the elephas model is sent to Spark workers abd each worker will be trained on their data partition . |
26,068 | def _fit ( self , rdd , epochs , batch_size , verbose , validation_split ) : self . _master_network . compile ( optimizer = self . master_optimizer , loss = self . master_loss , metrics = self . master_metrics ) if self . mode in [ 'asynchronous' , 'hogwild' ] : self . start_server ( ) train_config = self . get_train_config ( epochs , batch_size , verbose , validation_split ) mode = self . parameter_server_mode freq = self . frequency optimizer = self . master_optimizer loss = self . master_loss metrics = self . master_metrics custom = self . custom_objects yaml = self . _master_network . to_yaml ( ) init = self . _master_network . get_weights ( ) parameters = rdd . context . broadcast ( init ) if self . mode in [ 'asynchronous' , 'hogwild' ] : print ( '>>> Initialize workers' ) worker = AsynchronousSparkWorker ( yaml , parameters , mode , train_config , freq , optimizer , loss , metrics , custom ) print ( '>>> Distribute load' ) rdd . mapPartitions ( worker . train ) . collect ( ) print ( '>>> Async training complete.' ) new_parameters = self . client . get_parameters ( ) elif self . mode == 'synchronous' : worker = SparkWorker ( yaml , parameters , train_config , optimizer , loss , metrics , custom ) gradients = rdd . mapPartitions ( worker . train ) . collect ( ) new_parameters = self . _master_network . get_weights ( ) for grad in gradients : new_parameters = subtract_params ( new_parameters , grad ) print ( '>>> Synchronous training complete.' ) else : raise ValueError ( "Unsupported mode {}" . format ( self . mode ) ) self . _master_network . set_weights ( new_parameters ) if self . mode in [ 'asynchronous' , 'hogwild' ] : self . stop_server ( ) | Protected train method to make wrapping of modes easier |
26,069 | def fit ( self , labeled_points , epochs = 10 , batch_size = 32 , verbose = 0 , validation_split = 0.1 , categorical = False , nb_classes = None ) : rdd = lp_to_simple_rdd ( labeled_points , categorical , nb_classes ) rdd = rdd . repartition ( self . num_workers ) self . _fit ( rdd = rdd , epochs = epochs , batch_size = batch_size , verbose = verbose , validation_split = validation_split ) | Train an elephas model on an RDD of LabeledPoints |
26,070 | def predict ( self , mllib_data ) : if isinstance ( mllib_data , pyspark . mllib . linalg . Matrix ) : return to_matrix ( self . _master_network . predict ( from_matrix ( mllib_data ) ) ) elif isinstance ( mllib_data , pyspark . mllib . linalg . Vector ) : return to_vector ( self . _master_network . predict ( from_vector ( mllib_data ) ) ) else : raise ValueError ( 'Provide either an MLLib matrix or vector, got {}' . format ( mllib_data . __name__ ) ) | Predict probabilities for an RDD of features |
26,071 | def train ( self , data_iterator ) : optimizer = get_optimizer ( self . master_optimizer ) self . model = model_from_yaml ( self . yaml , self . custom_objects ) self . model . compile ( optimizer = optimizer , loss = self . master_loss , metrics = self . master_metrics ) self . model . set_weights ( self . parameters . value ) feature_iterator , label_iterator = tee ( data_iterator , 2 ) x_train = np . asarray ( [ x for x , y in feature_iterator ] ) y_train = np . asarray ( [ y for x , y in label_iterator ] ) self . model . compile ( optimizer = self . master_optimizer , loss = self . master_loss , metrics = self . master_metrics ) weights_before_training = self . model . get_weights ( ) if x_train . shape [ 0 ] > self . train_config . get ( 'batch_size' ) : self . model . fit ( x_train , y_train , ** self . train_config ) weights_after_training = self . model . get_weights ( ) deltas = subtract_params ( weights_before_training , weights_after_training ) yield deltas | Train a keras model on a worker |
26,072 | def train ( self , data_iterator ) : feature_iterator , label_iterator = tee ( data_iterator , 2 ) x_train = np . asarray ( [ x for x , y in feature_iterator ] ) y_train = np . asarray ( [ y for x , y in label_iterator ] ) if x_train . size == 0 : return optimizer = get_optimizer ( self . master_optimizer ) self . model = model_from_yaml ( self . yaml , self . custom_objects ) self . model . compile ( optimizer = optimizer , loss = self . master_loss , metrics = self . master_metrics ) self . model . set_weights ( self . parameters . value ) epochs = self . train_config [ 'epochs' ] batch_size = self . train_config . get ( 'batch_size' ) nb_train_sample = x_train . shape [ 0 ] nb_batch = int ( np . ceil ( nb_train_sample / float ( batch_size ) ) ) index_array = np . arange ( nb_train_sample ) batches = [ ( i * batch_size , min ( nb_train_sample , ( i + 1 ) * batch_size ) ) for i in range ( 0 , nb_batch ) ] if self . frequency == 'epoch' : for epoch in range ( epochs ) : weights_before_training = self . client . get_parameters ( ) self . model . set_weights ( weights_before_training ) self . train_config [ 'epochs' ] = 1 if x_train . shape [ 0 ] > batch_size : self . model . fit ( x_train , y_train , ** self . train_config ) self . train_config [ 'epochs' ] = epochs weights_after_training = self . model . get_weights ( ) deltas = subtract_params ( weights_before_training , weights_after_training ) self . client . update_parameters ( deltas ) elif self . frequency == 'batch' : for epoch in range ( epochs ) : if x_train . shape [ 0 ] > batch_size : for ( batch_start , batch_end ) in batches : weights_before_training = self . client . get_parameters ( ) self . model . set_weights ( weights_before_training ) batch_ids = index_array [ batch_start : batch_end ] x = slice_arrays ( x_train , batch_ids ) y = slice_arrays ( y_train , batch_ids ) self . model . train_on_batch ( x , y ) weights_after_training = self . model . get_weights ( ) deltas = subtract_params ( weights_before_training , weights_after_training ) self . client . update_parameters ( deltas ) else : raise ValueError ( 'frequency parameter can be `epoch` or `batch, got {}' . format ( self . frequency ) ) yield [ ] | Train a keras model on a worker and send asynchronous updates to parameter server |
26,073 | def determine_master ( port = 4000 ) : if os . environ . get ( 'SPARK_LOCAL_IP' ) : return os . environ [ 'SPARK_LOCAL_IP' ] + ":" + str ( port ) else : return gethostbyname ( gethostname ( ) ) + ":" + str ( port ) | Determine address of master so that workers can connect to it . If the environment variable SPARK_LOCAL_IP is set that address will be used . |
26,074 | def _receive_all ( socket , num_bytes ) : buffer = '' buffer_size = 0 bytes_left = num_bytes while buffer_size < num_bytes : data = socket . recv ( bytes_left ) delta = len ( data ) buffer_size += delta bytes_left -= delta buffer += data return buffer | Reads num_bytes bytes from the specified socket . |
26,075 | def receive ( socket , num_bytes = 20 ) : length = int ( _receive_all ( socket , num_bytes ) . decode ( ) ) serialized_data = _receive_all ( socket , length ) return pickle . loads ( serialized_data ) | Receive data frame from open socket . |
26,076 | def send ( socket , data , num_bytes = 20 ) : pickled_data = pickle . dumps ( data , - 1 ) length = str ( len ( pickled_data ) ) . zfill ( num_bytes ) socket . sendall ( length . encode ( ) ) socket . sendall ( pickled_data ) | Send data to specified socket . |
26,077 | def to_java_rdd ( jsc , features , labels , batch_size ) : data_sets = java_classes . ArrayList ( ) num_batches = int ( len ( features ) / batch_size ) for i in range ( num_batches ) : xi = ndarray ( features [ : batch_size ] . copy ( ) ) yi = ndarray ( labels [ : batch_size ] . copy ( ) ) data_set = java_classes . DataSet ( xi . array , yi . array ) data_sets . add ( data_set ) features = features [ batch_size : ] labels = labels [ batch_size : ] return jsc . parallelize ( data_sets ) | Convert numpy features and labels into a JavaRDD of DL4J DataSet type . |
26,078 | def to_simple_rdd ( sc , features , labels ) : pairs = [ ( x , y ) for x , y in zip ( features , labels ) ] return sc . parallelize ( pairs ) | Convert numpy arrays of features and labels into an RDD of pairs . |
26,079 | def to_labeled_point ( sc , features , labels , categorical = False ) : labeled_points = [ ] for x , y in zip ( features , labels ) : if categorical : lp = LabeledPoint ( np . argmax ( y ) , to_vector ( x ) ) else : lp = LabeledPoint ( y , to_vector ( x ) ) labeled_points . append ( lp ) return sc . parallelize ( labeled_points ) | Convert numpy arrays of features and labels into a LabeledPoint RDD for MLlib and ML integration . |
26,080 | def from_labeled_point ( rdd , categorical = False , nb_classes = None ) : features = np . asarray ( rdd . map ( lambda lp : from_vector ( lp . features ) ) . collect ( ) ) labels = np . asarray ( rdd . map ( lambda lp : lp . label ) . collect ( ) , dtype = 'int32' ) if categorical : if not nb_classes : nb_classes = np . max ( labels ) + 1 temp = np . zeros ( ( len ( labels ) , nb_classes ) ) for i , label in enumerate ( labels ) : temp [ i , label ] = 1. labels = temp return features , labels | Convert a LabeledPoint RDD back to a pair of numpy arrays |
26,081 | def encode_label ( label , nb_classes ) : encoded = np . zeros ( nb_classes ) encoded [ int ( label ) ] = 1. return encoded | One - hot encoding of a single label |
26,082 | def lp_to_simple_rdd ( lp_rdd , categorical = False , nb_classes = None ) : if categorical : if not nb_classes : labels = np . asarray ( lp_rdd . map ( lambda lp : lp . label ) . collect ( ) , dtype = 'int32' ) nb_classes = np . max ( labels ) + 1 rdd = lp_rdd . map ( lambda lp : ( from_vector ( lp . features ) , encode_label ( lp . label , nb_classes ) ) ) else : rdd = lp_rdd . map ( lambda lp : ( from_vector ( lp . features ) , lp . label ) ) return rdd | Convert a LabeledPoint RDD into an RDD of feature - label pairs |
26,083 | def follow ( the_file ) : with open ( the_file ) as f : f . seek ( 0 , 2 ) while True : line = f . readline ( ) if not line : time . sleep ( 0.1 ) continue yield line | Follow a given file and yield new lines when they are available like tail - f . |
26,084 | def map_field ( field , func , dict_sequence ) : for item in dict_sequence : try : item [ field ] = func ( item . get ( field , None ) ) yield item except ValueError : pass | Apply given function to value of given key in every dictionary in sequence and set the result as new value for that key . |
26,085 | def add_field ( field , func , dict_sequence ) : for item in dict_sequence : if field not in item : item [ field ] = func ( item ) yield item | Apply given function to the record and store result in given field of current record . Do nothing if record already contains given field . |
26,086 | def getCanonicalRep ( record_cluster ) : canonical_rep = { } keys = record_cluster [ 0 ] . keys ( ) for key in keys : key_values = [ ] for record in record_cluster : if record [ key ] : key_values . append ( record [ key ] ) if key_values : canonical_rep [ key ] = getCentroid ( key_values , comparator ) else : canonical_rep [ key ] = '' return canonical_rep | Given a list of records within a duplicate cluster constructs a canonical representation of the cluster by finding canonical values for each field |
26,087 | def nearIntegersPredicate ( field ) : ints = integers ( field ) near_ints = set ( ) for char in ints : num = int ( char ) near_ints . add ( str ( num - 1 ) ) near_ints . add ( str ( num ) ) near_ints . add ( str ( num + 1 ) ) return near_ints | return any integers N N + 1 and N - 1 |
26,088 | def randomPairsMatch ( n_records_A , n_records_B , sample_size ) : n = int ( n_records_A * n_records_B ) if sample_size >= n : random_pairs = numpy . arange ( n ) else : random_pairs = numpy . array ( random . sample ( range ( n ) , sample_size ) , dtype = int ) i , j = numpy . unravel_index ( random_pairs , ( n_records_A , n_records_B ) ) return zip ( i , j ) | Return random combinations of indices for record list A and B |
26,089 | def thresholdBlocks ( self , blocks , recall_weight = 1.5 ) : candidate_records = itertools . chain . from_iterable ( self . _blockedPairs ( blocks ) ) probability = core . scoreDuplicates ( candidate_records , self . data_model , self . classifier , self . num_cores ) [ 'score' ] probability = probability . copy ( ) probability . sort ( ) probability = probability [ : : - 1 ] expected_dupes = numpy . cumsum ( probability ) recall = expected_dupes / expected_dupes [ - 1 ] precision = expected_dupes / numpy . arange ( 1 , len ( expected_dupes ) + 1 ) score = recall * precision / ( recall + recall_weight ** 2 * precision ) i = numpy . argmax ( score ) logger . info ( 'Maximum expected recall and precision' ) logger . info ( 'recall: %2.3f' , recall [ i ] ) logger . info ( 'precision: %2.3f' , precision [ i ] ) logger . info ( 'With threshold: %2.3f' , probability [ i ] ) return probability [ i ] | Returns the threshold that maximizes the expected F score a weighted average of precision and recall for a sample of blocked data . |
26,090 | def match ( self , data , threshold = 0.5 , generator = False ) : blocked_pairs = self . _blockData ( data ) clusters = self . matchBlocks ( blocked_pairs , threshold ) if generator : return clusters else : return list ( clusters ) | Identifies records that all refer to the same entity returns tuples |
26,091 | def readTraining ( self , training_file ) : logger . info ( 'reading training from file' ) training_pairs = json . load ( training_file , cls = serializer . dedupe_decoder ) self . markPairs ( training_pairs ) | Read training from previously built training data file object |
26,092 | def writeTraining ( self , file_obj ) : json . dump ( self . training_pairs , file_obj , default = serializer . _to_json , tuple_as_array = False , ensure_ascii = True ) | Write to a json file that contains labeled examples |
26,093 | def sample ( self , data_1 , data_2 , sample_size = 15000 , blocked_proportion = .5 , original_length_1 = None , original_length_2 = None ) : self . _checkData ( data_1 , data_2 ) self . active_learner = self . ActiveLearner ( self . data_model ) self . active_learner . sample_product ( data_1 , data_2 , blocked_proportion , sample_size , original_length_1 , original_length_2 ) | Draws a random sample of combinations of records from the first and second datasets and initializes active learning with this sample |
26,094 | def condensedDistance ( dupes ) : candidate_set = numpy . unique ( dupes [ 'pairs' ] ) i_to_id = dict ( enumerate ( candidate_set ) ) ids = candidate_set . searchsorted ( dupes [ 'pairs' ] ) row = ids [ : , 0 ] col = ids [ : , 1 ] N = len ( candidate_set ) matrix_length = N * ( N - 1 ) / 2 row_step = ( N - row ) * ( N - row - 1 ) / 2 index = matrix_length - row_step + col - row - 1 condensed_distances = numpy . ones ( int ( matrix_length ) , 'f4' ) condensed_distances [ index . astype ( int ) ] = 1 - dupes [ 'score' ] return i_to_id , condensed_distances , N | Convert the pairwise list of distances in dupes to condensed distance matrix required by the hierarchical clustering algorithms . Also return a dictionary that maps the distance matrix to the record_ids . |
26,095 | def cluster ( dupes , threshold = .5 , max_components = 30000 ) : distance_threshold = 1 - threshold dupe_sub_graphs = connected_components ( dupes , max_components ) for sub_graph in dupe_sub_graphs : if len ( sub_graph ) > 1 : i_to_id , condensed_distances , N = condensedDistance ( sub_graph ) linkage = fastcluster . linkage ( condensed_distances , method = 'centroid' , preserve_input = True ) partition = hcluster . fcluster ( linkage , distance_threshold , criterion = 'distance' ) clusters = defaultdict ( list ) for i , cluster_id in enumerate ( partition ) : clusters [ cluster_id ] . append ( i ) for cluster in viewvalues ( clusters ) : if len ( cluster ) > 1 : scores = confidences ( cluster , condensed_distances , N ) yield tuple ( i_to_id [ i ] for i in cluster ) , scores else : ( ids , score ) , = sub_graph if score > threshold : yield tuple ( ids ) , ( score , ) * 2 | Takes in a list of duplicate pairs and clusters them in to a list records that all refer to the same entity based on a given threshold |
26,096 | def confidences ( cluster , condensed_distances , d ) : scores = dict . fromkeys ( cluster , 0.0 ) squared_distances = condensed_distances ** 2 for i , j in itertools . combinations ( cluster , 2 ) : index = d * ( d - 1 ) / 2 - ( d - i ) * ( d - i - 1 ) / 2 + j - i - 1 squared_dist = squared_distances [ int ( index ) ] scores [ i ] += squared_dist scores [ j ] += squared_dist scores = numpy . array ( [ score for _ , score in sorted ( scores . items ( ) ) ] ) scores /= len ( cluster ) - 1 scores = numpy . sqrt ( scores ) scores = 1 - scores return scores | We calculate a per record score that is similar to a standard deviation . The main reason is that these record scores can be used to calculate the standard deviation of an entire cluster which is a reasonable metric for clusters . |
26,097 | def consoleLabel ( deduper ) : finished = False use_previous = False fields = unique ( field . field for field in deduper . data_model . primary_fields ) buffer_len = 1 examples_buffer = [ ] uncertain_pairs = [ ] while not finished : if use_previous : record_pair , _ = examples_buffer . pop ( 0 ) use_previous = False else : if not uncertain_pairs : uncertain_pairs = deduper . uncertainPairs ( ) try : record_pair = uncertain_pairs . pop ( ) except IndexError : break n_match = ( len ( deduper . training_pairs [ 'match' ] ) + sum ( label == 'match' for _ , label in examples_buffer ) ) n_distinct = ( len ( deduper . training_pairs [ 'distinct' ] ) + sum ( label == 'distinct' for _ , label in examples_buffer ) ) for pair in record_pair : for field in fields : line = "%s : %s" % ( field , pair [ field ] ) print ( line , file = sys . stderr ) print ( file = sys . stderr ) print ( "{0}/10 positive, {1}/10 negative" . format ( n_match , n_distinct ) , file = sys . stderr ) print ( 'Do these records refer to the same thing?' , file = sys . stderr ) valid_response = False user_input = '' while not valid_response : if examples_buffer : prompt = '(y)es / (n)o / (u)nsure / (f)inished / (p)revious' valid_responses = { 'y' , 'n' , 'u' , 'f' , 'p' } else : prompt = '(y)es / (n)o / (u)nsure / (f)inished' valid_responses = { 'y' , 'n' , 'u' , 'f' } print ( prompt , file = sys . stderr ) user_input = input ( ) if user_input in valid_responses : valid_response = True if user_input == 'y' : examples_buffer . insert ( 0 , ( record_pair , 'match' ) ) elif user_input == 'n' : examples_buffer . insert ( 0 , ( record_pair , 'distinct' ) ) elif user_input == 'u' : examples_buffer . insert ( 0 , ( record_pair , 'uncertain' ) ) elif user_input == 'f' : print ( 'Finished labeling' , file = sys . stderr ) finished = True elif user_input == 'p' : use_previous = True uncertain_pairs . append ( record_pair ) if len ( examples_buffer ) > buffer_len : record_pair , label = examples_buffer . pop ( ) if label in [ 'distinct' , 'match' ] : examples = { 'distinct' : [ ] , 'match' : [ ] } examples [ label ] . append ( record_pair ) deduper . markPairs ( examples ) for record_pair , label in examples_buffer : if label in [ 'distinct' , 'match' ] : examples = { 'distinct' : [ ] , 'match' : [ ] } examples [ label ] . append ( record_pair ) deduper . markPairs ( examples ) | Command line interface for presenting and labeling training pairs by the user |
26,098 | def trainingDataLink ( data_1 , data_2 , common_key , training_size = 50000 ) : identified_records = collections . defaultdict ( lambda : [ [ ] , [ ] ] ) matched_pairs = set ( ) distinct_pairs = set ( ) for record_id , record in data_1 . items ( ) : identified_records [ record [ common_key ] ] [ 0 ] . append ( record_id ) for record_id , record in data_2 . items ( ) : identified_records [ record [ common_key ] ] [ 1 ] . append ( record_id ) for keys_1 , keys_2 in identified_records . values ( ) : if keys_1 and keys_2 : matched_pairs . update ( itertools . product ( keys_1 , keys_2 ) ) keys_1 = list ( data_1 . keys ( ) ) keys_2 = list ( data_2 . keys ( ) ) random_pairs = [ ( keys_1 [ i ] , keys_2 [ j ] ) for i , j in randomPairsMatch ( len ( data_1 ) , len ( data_2 ) , training_size ) ] distinct_pairs = ( pair for pair in random_pairs if pair not in matched_pairs ) matched_records = [ ( data_1 [ key_1 ] , data_2 [ key_2 ] ) for key_1 , key_2 in matched_pairs ] distinct_records = [ ( data_1 [ key_1 ] , data_2 [ key_2 ] ) for key_1 , key_2 in distinct_pairs ] training_pairs = { 'match' : matched_records , 'distinct' : distinct_records } return training_pairs | Construct training data for consumption by the ActiveLearning markPairs method from already linked datasets . |
26,099 | def trainingDataDedupe ( data , common_key , training_size = 50000 ) : identified_records = collections . defaultdict ( list ) matched_pairs = set ( ) distinct_pairs = set ( ) unique_record_ids = set ( ) for record_id , record in data . items ( ) : unique_record_ids . add ( record_id ) identified_records [ record [ common_key ] ] . append ( record_id ) for record_ids in identified_records . values ( ) : if len ( record_ids ) > 1 : matched_pairs . update ( itertools . combinations ( sorted ( record_ids ) , 2 ) ) unique_record_ids = list ( unique_record_ids ) pair_indices = randomPairs ( len ( unique_record_ids ) , training_size ) distinct_pairs = set ( ) for i , j in pair_indices : distinct_pairs . add ( ( unique_record_ids [ i ] , unique_record_ids [ j ] ) ) distinct_pairs -= matched_pairs matched_records = [ ( data [ key_1 ] , data [ key_2 ] ) for key_1 , key_2 in matched_pairs ] distinct_records = [ ( data [ key_1 ] , data [ key_2 ] ) for key_1 , key_2 in distinct_pairs ] training_pairs = { 'match' : matched_records , 'distinct' : distinct_records } return training_pairs | Construct training data for consumption by the ActiveLearning markPairs method from an already deduplicated dataset . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.