idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
233,100
def delete_many ( self , query ) : items = self . find ( query ) result = [ self . table . remove ( where ( u'_id' ) == item [ u'_id' ] ) for item in items ] if query == { } : # need to reset TinyDB's index for docs order consistency self . table . _last_id = 0 return DeleteResult ( raw_result = result )
Removes all items matching the mongo query
88
9
233,101
def paginate ( self , skip , limit ) : if not self . count ( ) or not limit : return skip = skip or 0 pages = int ( ceil ( self . count ( ) / float ( limit ) ) ) limits = { } last = 0 for i in range ( pages ) : current = limit * i limits [ last ] = current last = current # example with count == 62 # {0: 20, 20: 40, 40: 60, 60: 62} if limit and limit < self . count ( ) : limit = limits . get ( skip , self . count ( ) ) self . cursordat = self . cursordat [ skip : limit ]
Paginate list of records
141
6
233,102
def should_stream ( proxy_response ) : content_type = proxy_response . headers . get ( 'Content-Type' ) if is_html_content_type ( content_type ) : return False try : content_length = int ( proxy_response . headers . get ( 'Content-Length' , 0 ) ) except ValueError : content_length = 0 if not content_length or content_length > MIN_STREAMING_LENGTH : return True return False
Function to verify if the proxy_response must be converted into a stream . This will be done by checking the proxy_response content - length and verify if its length is bigger than one stipulated by MIN_STREAMING_LENGTH .
101
49
233,103
def get_charset ( content_type ) : if not content_type : return DEFAULT_CHARSET matched = _get_charset_re . search ( content_type ) if matched : # Extract the charset and strip its double quotes return matched . group ( 'charset' ) . replace ( '"' , '' ) return DEFAULT_CHARSET
Function used to retrieve the charset from a content - type . If there is no charset in the content type then the charset defined on DEFAULT_CHARSET will be returned
81
38
233,104
def required_header ( header ) : if header in IGNORE_HEADERS : return False if header . startswith ( 'HTTP_' ) or header == 'CONTENT_TYPE' : return True return False
Function that verify if the header parameter is a essential header
45
11
233,105
def normalize_request_headers ( request ) : norm_headers = { } for header , value in request . META . items ( ) : if required_header ( header ) : norm_header = header . replace ( 'HTTP_' , '' ) . title ( ) . replace ( '_' , '-' ) norm_headers [ norm_header ] = value return norm_headers
r Function used to transform header replacing HTTP \ _ to and replace _ to -
82
16
233,106
def encode_items ( items ) : encoded = [ ] for key , values in items : for value in values : encoded . append ( ( key . encode ( 'utf-8' ) , value . encode ( 'utf-8' ) ) ) return encoded
Function that encode all elements in the list of items passed as a parameter
54
14
233,107
def cookie_from_string ( cookie_string , strict_cookies = False ) : if strict_cookies : cookies = SimpleCookie ( COOKIE_PREFIX + cookie_string ) if not cookies . keys ( ) : return None cookie_name , = cookies . keys ( ) cookie_dict = { k : v for k , v in cookies [ cookie_name ] . items ( ) if v and k != 'comment' } cookie_dict [ 'key' ] = cookie_name cookie_dict [ 'value' ] = cookies [ cookie_name ] . value return cookie_dict else : valid_attrs = ( 'path' , 'domain' , 'comment' , 'expires' , 'max_age' , 'httponly' , 'secure' ) cookie_dict = { } cookie_parts = cookie_string . split ( ';' ) try : key , value = cookie_parts [ 0 ] . split ( '=' , 1 ) cookie_dict [ 'key' ] , cookie_dict [ 'value' ] = key , unquote ( value ) except ValueError : logger . warning ( 'Invalid cookie: `%s`' , cookie_string ) return None if cookie_dict [ 'value' ] . startswith ( '=' ) : logger . warning ( 'Invalid cookie: `%s`' , cookie_string ) return None for part in cookie_parts [ 1 : ] : if '=' in part : attr , value = part . split ( '=' , 1 ) value = value . strip ( ) else : attr = part value = '' attr = attr . strip ( ) . lower ( ) if not attr : continue if attr in valid_attrs : if attr in ( 'httponly' , 'secure' ) : cookie_dict [ attr ] = True elif attr in 'comment' : # ignoring comment attr as explained in the # function docstring continue else : cookie_dict [ attr ] = unquote ( value ) else : logger . warning ( 'Unknown cookie attribute %s' , attr ) return cookie_dict
Parser for HTTP header set - cookie The return from this function will be used as parameters for django s response . set_cookie method . Because set_cookie doesn t have parameter comment this cookie attribute will be ignored .
453
44
233,108
def unquote ( value ) : if len ( value ) > 1 and value [ 0 ] == '"' and value [ - 1 ] == '"' : value = value [ 1 : - 1 ] . replace ( r'\"' , '"' ) return value
Remove wrapping quotes from a string .
54
7
233,109
def asbool ( value ) : is_string = isinstance ( value , string_types ) if is_string : value = value . strip ( ) . lower ( ) if value in ( 'true' , 'yes' , 'on' , 'y' , 't' , '1' , ) : return True elif value in ( 'false' , 'no' , 'off' , 'n' , 'f' , '0' ) : return False else : raise ValueError ( "String is not true/false: %r" % value ) else : return bool ( value )
Function used to convert certain string values into an appropriated boolean value . If value is not a string the built - in python bool function will be used to convert the passed parameter
127
34
233,110
def should_transform ( self ) : if not HAS_DIAZO : self . log . info ( "HAS_DIAZO: false" ) return False if asbool ( self . request . META . get ( DIAZO_OFF_REQUEST_HEADER ) ) : self . log . info ( "DIAZO_OFF_REQUEST_HEADER in request.META: off" ) return False if asbool ( self . response . get ( DIAZO_OFF_RESPONSE_HEADER ) ) : self . log . info ( "DIAZO_OFF_RESPONSE_HEADER in response.get: off" ) return False if self . request . is_ajax ( ) : self . log . info ( "Request is AJAX" ) return False if self . response . streaming : self . log . info ( "Response has streaming" ) return False content_type = self . response . get ( 'Content-Type' ) if not is_html_content_type ( content_type ) : self . log . info ( "Content-type: false" ) return False content_encoding = self . response . get ( 'Content-Encoding' ) if content_encoding in ( 'zip' , 'compress' ) : self . log . info ( "Content encode is %s" , content_encoding ) return False status_code = str ( self . response . status_code ) if status_code . startswith ( '3' ) or status_code == '204' or status_code == '401' : self . log . info ( "Status code: %s" , status_code ) return False if len ( self . response . content ) == 0 : self . log . info ( "Response Content is EMPTY" ) return False self . log . info ( "Transform" ) return True
Determine if we should transform the response
408
9
233,111
def transform ( self , rules , theme_template , is_html5 , context_data = None ) : if not self . should_transform ( ) : self . log . info ( "Don't need to be transformed" ) return self . response theme = loader . render_to_string ( theme_template , context = context_data , request = self . request ) output_xslt = compile_theme ( rules = rules , theme = StringIO ( theme ) , ) transform = etree . XSLT ( output_xslt ) self . log . debug ( "Transform: %s" , transform ) charset = get_charset ( self . response . get ( 'Content-Type' ) ) try : decoded_response = self . response . content . decode ( charset ) except UnicodeDecodeError : decoded_response = self . response . content . decode ( charset , 'ignore' ) self . log . warning ( "Charset is {} and type of encode used in file is\ different. Some unknown characteres might be\ ignored." . format ( charset ) ) content_doc = etree . fromstring ( decoded_response , parser = etree . HTMLParser ( ) ) self . response . content = transform ( content_doc ) if is_html5 : self . set_html5_doctype ( ) self . reset_headers ( ) self . log . debug ( "Response transformer: %s" , self . response ) return self . response
Method used to make a transformation on the content of the http response based on the rules and theme_templates passed as paremters
319
28
233,112
def set_html5_doctype ( self ) : doctype = b'<!DOCTYPE html>\n' content = doctype_re . subn ( doctype , self . response . content , 1 ) [ 0 ] self . response . content = content
Method used to transform a doctype in to a properly html5 doctype
60
15
233,113
def _output ( self , s ) : if s . lower ( ) . startswith ( b'host: ' ) : self . _buffer . insert ( 1 , s ) else : self . _buffer . append ( s )
Host header should always be first
49
6
233,114
def get_django_response ( proxy_response , strict_cookies = False ) : status = proxy_response . status headers = proxy_response . headers logger . debug ( 'Proxy response headers: %s' , headers ) content_type = headers . get ( 'Content-Type' ) logger . debug ( 'Content-Type: %s' , content_type ) if should_stream ( proxy_response ) : logger . info ( 'Content-Length is bigger than %s' , DEFAULT_AMT ) response = StreamingHttpResponse ( proxy_response . stream ( DEFAULT_AMT ) , status = status , content_type = content_type ) else : content = proxy_response . data or b'' response = HttpResponse ( content , status = status , content_type = content_type ) logger . info ( 'Normalizing response headers' ) set_response_headers ( response , headers ) logger . debug ( 'Response headers: %s' , getattr ( response , '_headers' ) ) cookies = proxy_response . headers . getlist ( 'set-cookie' ) logger . info ( 'Checking for invalid cookies' ) for cookie_string in cookies : cookie_dict = cookie_from_string ( cookie_string , strict_cookies = strict_cookies ) # if cookie is invalid cookie_dict will be None if cookie_dict : response . set_cookie ( * * cookie_dict ) logger . debug ( 'Response cookies: %s' , response . cookies ) return response
This method is used to create an appropriate response based on the Content - Length of the proxy_response . If the content is bigger than MIN_STREAMING_LENGTH which is found on utils . py than django . http . StreamingHttpResponse will be created else a django . http . HTTPResponse will be created instead
324
71
233,115
def get_request_headers ( self ) : request_headers = self . get_proxy_request_headers ( self . request ) if ( self . add_remote_user and hasattr ( self . request , 'user' ) and self . request . user . is_active ) : request_headers [ 'REMOTE_USER' ] = self . request . user . get_username ( ) self . log . info ( "REMOTE_USER set" ) return request_headers
Return request headers that will be sent to upstream .
102
10
233,116
def get_encoded_query_params ( self ) : get_data = encode_items ( self . request . GET . lists ( ) ) return urlencode ( get_data )
Return encoded query params to be used in proxied request
40
11
233,117
def stream_download ( url , target_path , verbose = False ) : response = requests . get ( url , stream = True ) handle = open ( target_path , "wb" ) if verbose : print ( "Beginning streaming download of %s" % url ) start = datetime . now ( ) try : content_length = int ( response . headers [ 'Content-Length' ] ) content_MB = content_length / 1048576.0 print ( "Total file size: %.2f MB" % content_MB ) except KeyError : pass # allow Content-Length to be missing for chunk in response . iter_content ( chunk_size = 512 ) : if chunk : # filter out keep-alive new chunks handle . write ( chunk ) if verbose : print ( "Download completed to %s in %s" % ( target_path , datetime . now ( ) - start ) )
Download a large file without loading it into memory .
195
10
233,118
def validate_object_id ( object_id ) : result = re . match ( OBJECT_ID_RE , str ( object_id ) ) if not result : print ( "'%s' appears not to be a valid 990 object_id" % object_id ) raise RuntimeError ( OBJECT_ID_MSG ) return object_id
It s easy to make a mistake entering these validate the format
75
12
233,119
def _get_table_start ( self ) : if self . documentation : standardized_table_start = { 'object_id' : { 'value' : self . object_id , 'ordering' : - 1 , 'line_number' : 'NA' , 'description' : 'IRS-assigned object id' , 'db_type' : 'String(18)' } , 'ein' : { 'value' : self . ein , 'ordering' : - 2 , 'line_number' : 'NA' , 'description' : 'IRS employer id number' , 'db_type' : 'String(9)' } } if self . documentId : standardized_table_start [ 'documentId' ] = { 'value' : self . documentId , 'description' : 'Document ID' , 'ordering' : 0 } else : standardized_table_start = { 'object_id' : self . object_id , 'ein' : self . ein } if self . documentId : standardized_table_start [ 'documentId' ] = self . documentId return standardized_table_start
prefill the columns we need for all tables
243
9
233,120
def debracket ( string ) : result = re . sub ( BRACKET_RE , ';' , str ( string ) ) result = result . lstrip ( ';' ) result = result . lstrip ( ' ' ) result = result . replace ( '; ;' , ';' ) return result
Eliminate the bracketed var names in doc line strings
67
12
233,121
def _set_schedules ( self ) : self . schedules = [ 'ReturnHeader990x' , ] self . otherforms = [ ] for sked in self . raw_irs_dict [ 'Return' ] [ 'ReturnData' ] . keys ( ) : if not sked . startswith ( "@" ) : if sked in KNOWN_SCHEDULES : self . schedules . append ( sked ) else : self . otherforms . append ( sked )
Attach the known and unknown schedules
105
6
233,122
def get_parsed_sked ( self , skedname ) : if not self . processed : raise Exception ( "Filing must be processed to return parsed sked" ) if skedname in self . schedules : matching_skeds = [ ] for sked in self . result : if sked [ 'schedule_name' ] == skedname : matching_skeds . append ( sked ) return matching_skeds else : return [ ]
Returns an array because multiple sked K s are allowed
99
11
233,123
def headers ( self ) : client_headers = getattr ( self . client , 'headers' , { } ) return dict ( self . DEFAULT_HEADERS , * * client_headers )
Return headers of the uploader instance . This would include the headers of the client instance .
41
18
233,124
def headers_as_list ( self ) : headers = self . headers headers_list = [ '{}: {}' . format ( key , value ) for key , value in iteritems ( headers ) ] return headers_list
Does the same as headers except it is returned as a list .
47
13
233,125
def get_offset ( self ) : resp = requests . head ( self . url , headers = self . headers ) offset = resp . headers . get ( 'upload-offset' ) if offset is None : msg = 'Attempt to retrieve offset fails with status {}' . format ( resp . status_code ) raise TusCommunicationError ( msg , resp . status_code , resp . content ) return int ( offset )
Return offset from tus server .
87
7
233,126
def encode_metadata ( self ) : encoded_list = [ ] for key , value in iteritems ( self . metadata ) : key_str = str ( key ) # dict keys may be of any object type. # confirm that the key does not contain unwanted characters. if re . search ( r'^$|[\s,]+' , key_str ) : msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.' raise ValueError ( msg . format ( key_str ) ) value_bytes = b ( value ) # python 3 only encodes bytes encoded_list . append ( '{} {}' . format ( key_str , b64encode ( value_bytes ) . decode ( 'ascii' ) ) ) return encoded_list
Return list of encoded metadata as defined by the Tus protocol .
167
12
233,127
def get_url ( self ) : if self . store_url and self . url_storage : key = self . fingerprinter . get_fingerprint ( self . get_file_stream ( ) ) url = self . url_storage . get_item ( key ) if not url : url = self . create_url ( ) self . url_storage . set_item ( key , url ) return url else : return self . create_url ( )
Return the tus upload url .
97
7
233,128
def create_url ( self ) : headers = self . headers headers [ 'upload-length' ] = str ( self . file_size ) headers [ 'upload-metadata' ] = ',' . join ( self . encode_metadata ( ) ) resp = requests . post ( self . client . url , headers = headers ) url = resp . headers . get ( "location" ) if url is None : msg = 'Attempt to retrieve create file url with status {}' . format ( resp . status_code ) raise TusCommunicationError ( msg , resp . status_code , resp . content ) return urljoin ( self . client . url , url )
Return upload url .
137
4
233,129
def request_length ( self ) : remainder = self . stop_at - self . offset return self . chunk_size if remainder > self . chunk_size else remainder
Return length of next chunk upload .
35
7
233,130
def verify_upload ( self ) : if self . request . status_code == 204 : return True else : raise TusUploadFailed ( '' , self . request . status_code , self . request . response_content )
Confirm that the last upload was sucessful . Raises TusUploadFailed exception if the upload was not sucessful .
47
27
233,131
def get_file_stream ( self ) : if self . file_stream : self . file_stream . seek ( 0 ) return self . file_stream elif os . path . isfile ( self . file_path ) : return open ( self . file_path , 'rb' ) else : raise ValueError ( "invalid file {}" . format ( self . file_path ) )
Return a file stream instance of the upload .
84
9
233,132
def file_size ( self ) : stream = self . get_file_stream ( ) stream . seek ( 0 , os . SEEK_END ) return stream . tell ( )
Return size of the file .
38
6
233,133
def upload ( self , stop_at = None ) : self . stop_at = stop_at or self . file_size while self . offset < self . stop_at : self . upload_chunk ( ) else : if self . log_func : self . log_func ( "maximum upload specified({} bytes) has been reached" . format ( self . stop_at ) )
Perform file upload .
83
5
233,134
def upload_chunk ( self ) : self . _retried = 0 self . _do_request ( ) self . offset = int ( self . request . response_headers . get ( 'upload-offset' ) ) if self . log_func : msg = '{} bytes uploaded ...' . format ( self . offset ) self . log_func ( msg )
Upload chunk of file .
78
5
233,135
def get_item ( self , key ) : result = self . _db . search ( self . _urls . key == key ) return result [ 0 ] . get ( 'url' ) if result else None
Return the tus url of a file identified by the key specified .
45
14
233,136
def set_item ( self , key , url ) : if self . _db . search ( self . _urls . key == key ) : self . _db . update ( { 'url' : url } , self . _urls . key == key ) else : self . _db . insert ( { 'key' : key , 'url' : url } )
Store the url value under the unique key .
79
9
233,137
def perform ( self ) : try : host = '{}://{}' . format ( self . _url . scheme , self . _url . netloc ) path = self . _url . geturl ( ) . replace ( host , '' , 1 ) chunk = self . file . read ( self . _content_length ) if self . _upload_checksum : self . _request_headers [ "upload-checksum" ] = " " . join ( ( self . _checksum_algorithm_name , base64 . b64encode ( self . _checksum_algorithm ( chunk ) . digest ( ) ) . decode ( "ascii" ) , ) ) self . handle . request ( "PATCH" , path , chunk , self . _request_headers ) self . _response = self . handle . getresponse ( ) self . status_code = self . _response . status self . response_headers = { k . lower ( ) : v for k , v in self . _response . getheaders ( ) } except http . client . HTTPException as e : raise TusUploadFailed ( e ) # wrap connection related errors not raised by the http.client.HTTP(S)Connection # as TusUploadFailed exceptions to enable retries except OSError as e : if e . errno in ( errno . EPIPE , errno . ESHUTDOWN , errno . ECONNABORTED , errno . ECONNREFUSED , errno . ECONNRESET ) : raise TusUploadFailed ( e ) raise e
Perform actual request .
340
5
233,138
def get_fingerprint ( self , fs ) : hasher = hashlib . md5 ( ) # we encode the content to avoid python 3 uncicode errors buf = self . _encode_data ( fs . read ( self . BLOCK_SIZE ) ) while len ( buf ) > 0 : hasher . update ( buf ) buf = fs . read ( self . BLOCK_SIZE ) return 'md5:' + hasher . hexdigest ( )
Return a unique fingerprint string value based on the file stream recevied
98
14
233,139
def init_poolmanager ( self , connections , maxsize , block = False , * * pool_kwargs ) : try : pool_kwargs [ 'ssl_version' ] = ssl . PROTOCOL_TLS except AttributeError : pool_kwargs [ 'ssl_version' ] = ssl . PROTOCOL_SSLv23 return super ( SSLAdapter , self ) . init_poolmanager ( connections , maxsize , block , * * pool_kwargs )
Called to initialize the HTTPAdapter when no proxy is used .
103
13
233,140
def proxy_manager_for ( self , proxy , * * proxy_kwargs ) : try : proxy_kwargs [ 'ssl_version' ] = ssl . PROTOCOL_TLS except AttributeError : proxy_kwargs [ 'ssl_version' ] = ssl . PROTOCOL_SSLv23 return super ( SSLAdapter , self ) . proxy_manager_for ( proxy , * * proxy_kwargs )
Called to initialize the HTTPAdapter when a proxy is used .
93
13
233,141
def make_calls ( self , num_calls = 1 ) : self . _cull ( ) while self . _outstanding_calls + num_calls > self . _max_calls_per_second : time . sleep ( 0 ) # yield self . _cull ( ) self . _call_times . append ( self . CallRecord ( time = time . time ( ) , num_calls = num_calls ) ) self . _outstanding_calls += num_calls
Adds appropriate sleep to avoid making too many calls .
111
10
233,142
def _cull ( self ) : right_now = time . time ( ) cull_from = - 1 for index in range ( len ( self . _call_times ) ) : if right_now - self . _call_times [ index ] . time >= 1.0 : cull_from = index self . _outstanding_calls -= self . _call_times [ index ] . num_calls else : break if cull_from > - 1 : self . _call_times = self . _call_times [ cull_from + 1 : ]
Remove calls more than 1 second old from the queue .
120
11
233,143
def map_with_retries ( self , requests , responses_for_requests ) : retries = [ ] response_futures = [ preq . callable ( ) for preq in requests ] for request , response_future in zip ( requests , response_futures ) : try : response = response_future . result ( ) if response is not None and response . status_code == 403 : logging . warning ( 'Request to {} caused a 403 response status code.' . format ( request . url ) ) raise InvalidRequestError ( 'Access forbidden' ) if response is not None : responses_for_requests [ request ] = response except RequestException as re : logging . error ( 'An exception was raised for {}: {}' . format ( request . url , re ) ) if self . total_retries > 0 : self . total_retries -= 1 retries . append ( request ) # Recursively retry failed requests with the modified total retry count if retries : self . map_with_retries ( retries , responses_for_requests )
Provides session - based retry functionality
231
8
233,144
def multi_get ( self , urls , query_params = None , to_json = True ) : return self . _multi_request ( MultiRequest . _VERB_GET , urls , query_params , data = None , to_json = to_json , )
Issue multiple GET requests .
60
5
233,145
def multi_post ( self , urls , query_params = None , data = None , to_json = True , send_as_file = False ) : return self . _multi_request ( MultiRequest . _VERB_POST , urls , query_params , data , to_json = to_json , send_as_file = send_as_file , )
Issue multiple POST requests .
82
5
233,146
def _zip_request_params ( self , urls , query_params , data ) : # Everybody gets to be a list if not isinstance ( urls , list ) : urls = [ urls ] if not isinstance ( query_params , list ) : query_params = [ query_params ] if not isinstance ( data , list ) : data = [ data ] # Counts must not mismatch url_count = len ( urls ) query_param_count = len ( query_params ) data_count = len ( data ) max_count = max ( url_count , query_param_count , data_count ) if ( max_count > url_count > 1 or max_count > query_param_count > 1 or max_count > data_count > 1 ) : raise InvalidRequestError ( 'Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}' , url_count , query_param_count , data_count , max_count , ) # Pad out lists if url_count < max_count : urls = urls * max_count if query_param_count < max_count : query_params = query_params * max_count if data_count < max_count : data = data * max_count return list ( zip ( urls , query_params , data ) )
Massages inputs and returns a list of 3 - tuples zipping them up .
304
17
233,147
def _wait_for_response ( self , requests ) : failed_requests = [ ] responses_for_requests = OrderedDict . fromkeys ( requests ) for retry in range ( self . _max_retry ) : try : logging . debug ( 'Try #{0}' . format ( retry + 1 ) ) self . _availability_limiter . map_with_retries ( requests , responses_for_requests ) failed_requests = [ ] for request , response in responses_for_requests . items ( ) : if self . _drop_404s and response is not None and response . status_code == 404 : logging . warning ( 'Request to {0} failed with status code 404, dropping.' . format ( request . url ) ) elif not response : failed_requests . append ( ( request , response ) ) if not failed_requests : break logging . warning ( 'Try #{0}. Expected {1} successful response(s) but only got {2}.' . format ( retry + 1 , len ( requests ) , len ( requests ) - len ( failed_requests ) , ) ) # retry only for the failed requests requests = [ fr [ 0 ] for fr in failed_requests ] except InvalidRequestError : raise except Exception as e : # log the exception for the informative purposes and pass to the next iteration logging . exception ( 'Try #{0}. Exception occured: {1}. Retrying.' . format ( retry + 1 , e ) ) pass if failed_requests : logging . warning ( 'Still {0} failed request(s) after {1} retries:' . format ( len ( failed_requests ) , self . _max_retry , ) ) for failed_request , failed_response in failed_requests : if failed_response is not None : # in case response text does contain some non-ascii characters failed_response_text = failed_response . text . encode ( 'ascii' , 'xmlcharrefreplace' ) logging . warning ( 'Request to {0} failed with status code {1}. Response text: {2}' . format ( failed_request . url , failed_response . status_code , failed_response_text , ) ) else : logging . warning ( 'Request to {0} failed with None response.' . format ( failed_request . url ) ) return list ( responses_for_requests . values ( ) )
Issues a batch of requests and waits for the responses . If some of the requests fail it will retry the failed ones up to _max_retry times .
531
34
233,148
def _convert_to_json ( self , response ) : try : return response . json ( ) except ValueError : logging . warning ( 'Expected response in JSON format from {0} but the actual response text is: {1}' . format ( response . request . url , response . text , ) ) return None
Converts response to JSON . If the response cannot be converted to JSON then None is returned .
69
19
233,149
def _multi_request ( self , verb , urls , query_params , data , to_json = True , send_as_file = False ) : if not urls : raise InvalidRequestError ( 'No URL supplied' ) # Break the params into batches of request_params request_params = self . _zip_request_params ( urls , query_params , data ) batch_of_params = [ request_params [ pos : pos + self . _max_requests ] for pos in range ( 0 , len ( request_params ) , self . _max_requests ) ] # Iteratively issue each batch, applying the rate limiter if necessary all_responses = [ ] for param_batch in batch_of_params : if self . _rate_limiter : self . _rate_limiter . make_calls ( num_calls = len ( param_batch ) ) prepared_requests = [ self . _create_request ( verb , url , query_params = query_param , data = datum , send_as_file = send_as_file , ) for url , query_param , datum in param_batch ] responses = self . _wait_for_response ( prepared_requests ) for response in responses : if response : all_responses . append ( self . _convert_to_json ( response ) if to_json else response ) else : all_responses . append ( None ) return all_responses
Issues multiple batches of simultaneous HTTP requests and waits for responses .
317
13
233,150
def error_handling ( cls , fn ) : def wrapper ( * args , * * kwargs ) : try : result = fn ( * args , * * kwargs ) return result except InvalidRequestError as e : write_exception ( e ) if hasattr ( e , 'request' ) : write_error_message ( 'request {0}' . format ( repr ( e . request ) ) ) if hasattr ( e , 'response' ) : write_error_message ( 'response {0}' . format ( repr ( e . response ) ) ) raise e return wrapper
Decorator to handle errors
128
6
233,151
def acquire_node ( self , node ) : try : return node . set ( self . resource , self . lock_key , nx = True , px = self . ttl ) except ( redis . exceptions . ConnectionError , redis . exceptions . TimeoutError ) : return False
acquire a single redis node
62
7
233,152
def release_node ( self , node ) : # use the lua script to release the lock in a safe way try : node . _release_script ( keys = [ self . resource ] , args = [ self . lock_key ] ) except ( redis . exceptions . ConnectionError , redis . exceptions . TimeoutError ) : pass
release a single redis node
72
6
233,153
def _extract_response_xml ( self , domain , response ) : attributes = { } alexa_keys = { 'POPULARITY' : 'TEXT' , 'REACH' : 'RANK' , 'RANK' : 'DELTA' } try : xml_root = ET . fromstring ( response . _content ) for xml_child in xml_root . findall ( 'SD//' ) : if xml_child . tag in alexa_keys and alexa_keys [ xml_child . tag ] in xml_child . attrib : attributes [ xml_child . tag . lower ( ) ] = xml_child . attrib [ alexa_keys [ xml_child . tag ] ] except ParseError : # Skip ill-formatted XML and return no Alexa attributes pass attributes [ 'domain' ] = domain return { 'attributes' : attributes }
Extract XML content of an HTTP response into dictionary format .
194
12
233,154
def _bulk_cache_lookup ( self , api_name , keys ) : if self . _cache : responses = self . _cache . bulk_lookup ( api_name , keys ) missing_keys = [ key for key in keys if key not in responses . keys ( ) ] return ( responses , missing_keys ) return ( { } , keys )
Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache . If cached is not configured it will return an empty dictionary of found results and the initial list of keys .
78
44
233,155
def _write_cache_to_file ( self ) : with ( open ( self . _cache_file_name , 'w' ) ) as fp : fp . write ( simplejson . dumps ( self . _cache ) )
Write the contents of the cache to a file on disk .
51
12
233,156
def _read_cache_from_file ( self ) : cache = { } try : with ( open ( self . _cache_file_name , 'r' ) ) as fp : contents = fp . read ( ) cache = simplejson . loads ( contents ) except ( IOError , JSONDecodeError ) : # The file could not be read. This is not a problem if the file does not exist. pass return cache
Read the contents of the cache from a file on disk .
92
12
233,157
def bulk_lookup ( self , api_name , keys ) : cached_data = { } for key in keys : value = self . lookup_value ( api_name , key ) if value is not None : cached_data [ key ] = value return cached_data
Perform lookup on an enumerable of keys .
58
10
233,158
def _cached_by_domain ( api_name ) : def wrapped ( func ) : def decorated ( self , domains ) : if not self . _cache : return func ( self , domains ) all_responses = self . _cache . bulk_lookup ( api_name , domains ) domains = list ( set ( domains ) - set ( all_responses ) ) if domains : response = func ( self , domains ) if not response : raise ResponseError ( "No response for uncached domains" ) for domain in response : self . _cache . cache_value ( api_name , domain , response [ domain ] ) all_responses [ domain ] = response [ domain ] return all_responses return decorated return wrapped
A caching wrapper for functions that take a list of domains as parameters .
154
14
233,159
def domain_score ( self , domains ) : warn ( 'OpenDNS Domain Scores endpoint is deprecated. Use ' 'InvestigateApi.categorization() instead' , DeprecationWarning , ) url_path = 'domains/score/' return self . _multi_post ( url_path , domains )
Calls domain scores endpoint .
68
6
233,160
def _multi_get ( self , cache_api_name , fmt_url_path , url_params , query_params = None ) : all_responses = { } if self . _cache : all_responses = self . _cache . bulk_lookup ( cache_api_name , url_params ) url_params = [ key for key in url_params if key not in all_responses . keys ( ) ] if len ( url_params ) : urls = self . _to_urls ( fmt_url_path , url_params ) responses = self . _requests . multi_get ( urls , query_params ) for url_param , response in zip ( url_params , responses ) : if self . _cache : self . _cache . cache_value ( cache_api_name , url_param , response ) all_responses [ url_param ] = response return all_responses
Makes multiple GETs to an OpenDNS endpoint .
201
12
233,161
def security ( self , domains ) : api_name = 'opendns-security' fmt_url_path = u'security/name/{0}.json' return self . _multi_get ( api_name , fmt_url_path , domains )
Calls security end point and adds an is_suspicious key to each response .
57
17
233,162
def whois_emails ( self , emails ) : api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self . _multi_get ( api_name , fmt_url_path , emails )
Calls WHOIS Email end point
65
7
233,163
def whois_nameservers ( self , nameservers ) : api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self . _multi_get ( api_name , fmt_url_path , nameservers )
Calls WHOIS Nameserver end point
72
8
233,164
def whois_domains ( self , domains ) : api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self . _multi_get ( api_name , fmt_url_path , domains )
Calls WHOIS domain end point
61
7
233,165
def whois_domains_history ( self , domains ) : api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self . _multi_get ( api_name , fmt_url_path , domains )
Calls WHOIS domain history end point
67
8
233,166
def domain_tag ( self , domains ) : api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self . _multi_get ( api_name , fmt_url_path , domains )
Get the data range when a domain is part of OpenDNS block list .
62
16
233,167
def rr_history ( self , ips ) : api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self . _multi_get ( api_name , fmt_url_path , ips )
Get the domains related to input ips .
66
9
233,168
def sample ( self , hashes ) : api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self . _multi_get ( api_name , fmt_url_path , hashes )
Get the information about a sample based on its hash .
53
11
233,169
def search ( self , patterns , start = 30 , limit = 1000 , include_category = False ) : api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days' . format ( start ) include_category = str ( include_category ) . lower ( ) query_params = { 'start' : start , 'limit' : limit , 'includecategory' : include_category , } return self . _multi_get ( api_name , fmt_url_path , patterns , query_params )
Performs pattern searches against the Investigate database .
129
10
233,170
def risk_score ( self , domains ) : api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self . _multi_get ( api_name , fmt_url_path , domains )
Performs Umbrella risk score analysis on the input domains
62
11
233,171
def _extract_all_responses ( self , resources , api_endpoint , api_name ) : all_responses , resources = self . _bulk_cache_lookup ( api_name , resources ) resource_chunks = self . _prepare_resource_chunks ( resources ) response_chunks = self . _request_reports ( "resource" , resource_chunks , api_endpoint ) self . _extract_response_chunks ( all_responses , response_chunks , api_name ) return all_responses
Aux function to extract all the API endpoint responses .
122
11
233,172
def get_url_distribution ( self , params = None ) : params = params or { } all_responses = { } api_name = 'virustotal-url-distribution' response_chunks = self . _request_reports ( list ( params . keys ( ) ) , list ( params . values ( ) ) , 'url/distribution' ) self . _extract_response_chunks ( all_responses , response_chunks , api_name ) return all_responses
Retrieves a live feed with the latest URLs submitted to VT .
109
14
233,173
def get_url_reports ( self , resources ) : api_name = 'virustotal-url-reports' ( all_responses , resources ) = self . _bulk_cache_lookup ( api_name , resources ) resource_chunks = self . _prepare_resource_chunks ( resources , '\n' ) response_chunks = self . _request_reports ( "resource" , resource_chunks , 'url/report' ) self . _extract_response_chunks ( all_responses , response_chunks , api_name ) return all_responses
Retrieves a scan report on a given URL .
131
11
233,174
def get_ip_reports ( self , ips ) : api_name = 'virustotal-ip-address-reports' ( all_responses , ips ) = self . _bulk_cache_lookup ( api_name , ips ) responses = self . _request_reports ( "ip" , ips , 'ip-address/report' ) for ip , response in zip ( ips , responses ) : if self . _cache : self . _cache . cache_value ( api_name , ip , response ) all_responses [ ip ] = response return all_responses
Retrieves the most recent VT info for a set of ips .
130
15
233,175
def get_file_clusters ( self , date ) : api_name = 'virustotal-file-clusters' ( all_responses , resources ) = self . _bulk_cache_lookup ( api_name , date ) response = self . _request_reports ( "date" , date , 'file/clusters' ) self . _extract_response_chunks ( all_responses , response , api_name ) return all_responses
Retrieves file similarity clusters for a given time frame .
102
12
233,176
def _prepare_resource_chunks ( self , resources , resource_delim = ',' ) : return [ self . _prepare_resource_chunk ( resources , resource_delim , pos ) for pos in range ( 0 , len ( resources ) , self . _resources_per_req ) ]
As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests .
67
37
233,177
def _extract_response_chunks ( self , all_responses , response_chunks , api_name ) : for response_chunk in response_chunks : if not isinstance ( response_chunk , list ) : response_chunk = [ response_chunk ] for response in response_chunk : if not response : continue if self . _cache : self . _cache . cache_value ( api_name , response [ 'resource' ] , response ) all_responses [ response [ 'resource' ] ] = response
Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources . Extracted responses are added to the already cached responses passed in the all_responses parameter .
117
45
233,178
def get_editor_widget ( self , request , plugins , plugin ) : cancel_url_name = self . get_admin_url_name ( 'delete_on_cancel' ) cancel_url = reverse ( 'admin:%s' % cancel_url_name ) render_plugin_url_name = self . get_admin_url_name ( 'render_plugin' ) render_plugin_url = reverse ( 'admin:%s' % render_plugin_url_name ) action_token = self . get_action_token ( request , plugin ) # should we delete the text plugin when # the user cancels? delete_text_on_cancel = ( 'delete-on-cancel' in request . GET and # noqa not plugin . get_plugin_instance ( ) [ 0 ] ) widget = TextEditorWidget ( installed_plugins = plugins , pk = plugin . pk , placeholder = plugin . placeholder , plugin_language = plugin . language , configuration = self . ckeditor_configuration , render_plugin_url = render_plugin_url , cancel_url = cancel_url , action_token = action_token , delete_on_cancel = delete_text_on_cancel , ) return widget
Returns the Django form Widget to be used for the text area
269
13
233,179
def get_form_class ( self , request , plugins , plugin ) : widget = self . get_editor_widget ( request = request , plugins = plugins , plugin = plugin , ) instance = plugin . get_plugin_instance ( ) [ 0 ] if instance : context = RequestContext ( request ) context [ 'request' ] = request rendered_text = plugin_tags_to_admin_html ( text = instance . body , context = context , ) else : rendered_text = None # We avoid mutating the Form declared above by subclassing class TextPluginForm ( self . form ) : body = CharField ( widget = widget , required = False ) def __init__ ( self , * args , * * kwargs ) : initial = kwargs . pop ( 'initial' , { } ) if rendered_text : initial [ 'body' ] = rendered_text super ( TextPluginForm , self ) . __init__ ( * args , initial = initial , * * kwargs ) return TextPluginForm
Returns a subclass of Form to be used by this plugin
215
11
233,180
def _plugin_tags_to_html ( text , output_func ) : plugins_by_id = get_plugins_from_text ( text ) def _render_tag ( m ) : try : plugin_id = int ( m . groupdict ( ) [ 'pk' ] ) obj = plugins_by_id [ plugin_id ] except KeyError : # Object must have been deleted. It cannot be rendered to # end user so just remove it from the HTML altogether return u'' else : obj . _render_meta . text_enabled = True return output_func ( obj , m ) return OBJ_ADMIN_RE . sub ( _render_tag , text )
Convert plugin object tags into the form for public site .
146
12
233,181
def extract_images ( data , plugin ) : if not settings . TEXT_SAVE_IMAGE_FUNCTION : return data tree_builder = html5lib . treebuilders . getTreeBuilder ( 'dom' ) parser = html5lib . html5parser . HTMLParser ( tree = tree_builder ) dom = parser . parse ( data ) found = False for img in dom . getElementsByTagName ( 'img' ) : src = img . getAttribute ( 'src' ) if not src . startswith ( 'data:' ) : # nothing to do continue width = img . getAttribute ( 'width' ) height = img . getAttribute ( 'height' ) # extract the image data data_re = re . compile ( r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)' ) m = data_re . search ( src ) dr = m . groupdict ( ) mime_type = dr [ 'mime_type' ] image_data = dr [ 'data' ] if mime_type . find ( ';' ) : mime_type = mime_type . split ( ';' ) [ 0 ] try : image_data = base64 . b64decode ( image_data ) except Exception : image_data = base64 . urlsafe_b64decode ( image_data ) try : image_type = mime_type . split ( '/' ) [ 1 ] except IndexError : # No image type specified -- will convert to jpg below if it's valid image data image_type = '' image = BytesIO ( image_data ) # genarate filename and normalize image format if image_type == 'jpg' or image_type == 'jpeg' : file_ending = 'jpg' elif image_type == 'png' : file_ending = 'png' elif image_type == 'gif' : file_ending = 'gif' else : # any not "web-safe" image format we try to convert to jpg im = Image . open ( image ) new_image = BytesIO ( ) file_ending = 'jpg' im . save ( new_image , 'JPEG' ) new_image . seek ( 0 ) image = new_image filename = u'%s.%s' % ( uuid . uuid4 ( ) , file_ending ) # transform image into a cms plugin image_plugin = img_data_to_plugin ( filename , image , parent_plugin = plugin , width = width , height = height ) # render the new html for the plugin new_img_html = plugin_to_tag ( image_plugin ) # replace the original image node with the newly created cms plugin html img . parentNode . replaceChild ( parser . parseFragment ( new_img_html ) . childNodes [ 0 ] , img ) found = True if found : return u'' . join ( [ y . toxml ( ) for y in dom . getElementsByTagName ( 'body' ) [ 0 ] . childNodes ] ) else : return data
extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins
683
20
233,182
def default_config_filename ( root_dir = None ) : root_dir = Path ( root_dir ) if root_dir else Path ( '.' ) . abspath ( ) locale_dir = root_dir / 'locale' if not os . path . exists ( locale_dir ) : locale_dir = root_dir / 'conf' / 'locale' return locale_dir / BASE_CONFIG_FILENAME
Returns the default name of the configuration file .
93
9
233,183
def rtl_langs ( self ) : def is_rtl ( lang ) : """ Returns True if lang is a RTL language args: lang (str): The language to be checked Returns: True if lang is an RTL language. """ # Base RTL langs are Arabic, Farsi, Hebrew, and Urdu base_rtl = [ 'ar' , 'fa' , 'he' , 'ur' ] # do this to capture both 'fa' and 'fa_IR' return any ( [ lang . startswith ( base_code ) for base_code in base_rtl ] ) return sorted ( set ( [ lang for lang in self . translated_locales if is_rtl ( lang ) ] ) )
Returns the set of translated RTL language codes present in self . locales . Ignores source locale .
159
21
233,184
def clean_conf_folder ( self , locale ) : dirname = self . configuration . get_messages_dir ( locale ) dirname . removedirs_p ( )
Remove the configuration directory for locale
37
6
233,185
def segment_pofiles ( configuration , locale ) : files_written = set ( ) for filename , segments in configuration . segment . items ( ) : filename = configuration . get_messages_dir ( locale ) / filename files_written . update ( segment_pofile ( filename , segments ) ) return files_written
Segment all the pofiles for locale .
68
9
233,186
def segment_pofile ( filename , segments ) : reading_msg = "Reading {num} entries from {file}" writing_msg = "Writing {num} entries to {file}" source_po = polib . pofile ( filename ) LOG . info ( reading_msg . format ( file = filename , num = len ( source_po ) ) ) # pylint: disable=logging-format-interpolation # A new pofile just like the source, but with no messages. We'll put # anything not segmented into this file. remaining_po = copy . deepcopy ( source_po ) remaining_po [ : ] = [ ] # Turn the segments dictionary into two structures: segment_patterns is a # list of (pattern, segmentfile) pairs. segment_po_files is a dict mapping # segment file names to pofile objects of their contents. segment_po_files = { filename : remaining_po } segment_patterns = [ ] for segmentfile , patterns in segments . items ( ) : segment_po_files [ segmentfile ] = copy . deepcopy ( remaining_po ) segment_patterns . extend ( ( pat , segmentfile ) for pat in patterns ) # Examine each message in the source file. If all of its occurrences match # a pattern for the same segment, it goes in that segment. Otherwise, it # goes in remaining. for msg in source_po : msg_segments = set ( ) for occ_file , _ in msg . occurrences : for pat , segment_file in segment_patterns : if fnmatch . fnmatch ( occ_file , pat ) : msg_segments . add ( segment_file ) break else : msg_segments . add ( filename ) assert msg_segments if len ( msg_segments ) == 1 : # This message belongs in this segment. segment_file = msg_segments . pop ( ) segment_po_files [ segment_file ] . append ( msg ) else : # It's in more than one segment, so put it back in the main file. remaining_po . append ( msg ) # Write out the results. files_written = set ( ) for segment_file , pofile in segment_po_files . items ( ) : out_file = filename . dirname ( ) / segment_file if not pofile : LOG . error ( "No messages to write to %s, did you run segment twice?" , out_file ) else : LOG . info ( writing_msg . format ( file = out_file , num = len ( pofile ) ) ) # pylint: disable=logging-format-interpolation pofile . save ( out_file ) files_written . add ( out_file ) return files_written
Segment a . po file using patterns in segments .
587
11
233,187
def fix_header ( pofile ) : # By default, django-admin.py makemessages creates this header: # # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. pofile . metadata_is_fuzzy = [ ] # remove [u'fuzzy'] header = pofile . header fixes = ( ( 'SOME DESCRIPTIVE TITLE' , EDX_MARKER ) , ( 'Translations template for PROJECT.' , EDX_MARKER ) , ( 'YEAR' , str ( datetime . utcnow ( ) . year ) ) , ( 'ORGANIZATION' , 'edX' ) , ( "THE PACKAGE'S COPYRIGHT HOLDER" , "EdX" ) , ( 'This file is distributed under the same license as the PROJECT project.' , 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ) , ( 'This file is distributed under the same license as the PACKAGE package.' , 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ) , ( 'FIRST AUTHOR <EMAIL@ADDRESS>' , 'EdX Team <info@edx.org>' ) , ) for src , dest in fixes : header = header . replace ( src , dest ) pofile . header = header
Replace default headers with edX headers
328
8
233,188
def strip_key_strings ( pofile ) : newlist = [ entry for entry in pofile if not is_key_string ( entry . msgid ) ] del pofile [ : ] pofile += newlist
Removes all entries in PO which are key strings . These entries should appear only in messages . po not in any other po files .
46
27
233,189
def rename_source_file ( self , src , dst ) : try : os . rename ( self . source_msgs_dir . joinpath ( src ) , self . source_msgs_dir . joinpath ( dst ) ) except OSError : pass
Rename a file in the source directory .
57
9
233,190
def get_valid_commands ( ) : modules = [ m . basename ( ) . split ( '.' ) [ 0 ] for m in Path ( __file__ ) . dirname ( ) . files ( '*.py' ) ] commands = [ ] for modname in modules : if modname == 'main' : continue mod = importlib . import_module ( 'i18n.%s' % modname ) if hasattr ( mod , 'main' ) : commands . append ( modname ) return commands
Returns valid commands .
111
4
233,191
def error_message ( ) : sys . stderr . write ( 'valid commands:\n' ) for cmd in get_valid_commands ( ) : sys . stderr . write ( '\t%s\n' % cmd ) return - 1
Writes out error message specifying the valid commands .
56
10
233,192
def main ( ) : try : command = sys . argv [ 1 ] except IndexError : return error_message ( ) try : module = importlib . import_module ( 'i18n.%s' % command ) module . main . args = sys . argv [ 2 : ] except ( ImportError , AttributeError ) : return error_message ( ) return module . main ( )
Executes the given command . Returns error_message if command is not valid .
84
16
233,193
def validate_po_files ( configuration , locale_dir , root_dir = None , report_empty = False , check_all = False ) : found_problems = False # List of .po files that are the product of a merge (see generate.py). merged_files = configuration . generate_merge . keys ( ) for dirpath , __ , filenames in os . walk ( root_dir if root_dir else locale_dir ) : for name in filenames : __ , ext = os . path . splitext ( name ) filename = os . path . join ( dirpath , name ) # Validate only .po files that are not product of a merge (see generate.py) unless check_all is true. # If django-partial.po has a problem, then django.po will also, so don't report it. if ext . lower ( ) == '.po' and ( check_all or os . path . basename ( filename ) not in merged_files ) : # First validate the format of this file if msgfmt_check_po_file ( locale_dir , filename ) : found_problems = True # Check that the translated strings are valid, and optionally # check for empty translations. But don't check English. if "/locale/en/" not in filename : problems = check_messages ( filename , report_empty ) if problems : report_problems ( filename , problems ) found_problems = True dup_filename = filename . replace ( '.po' , '.dup' ) has_duplicates = os . path . exists ( dup_filename ) if has_duplicates : log . warning ( " Duplicates found in %s, details in .dup file" , dup_filename ) found_problems = True if not ( problems or has_duplicates ) : log . info ( " No problems found in %s" , filename ) return found_problems
Validate all of the po files found in the root directory that are not product of a merge .
416
20
233,194
def msgfmt_check_po_file ( locale_dir , filename ) : found_problems = False # Use relative paths to make output less noisy. rfile = os . path . relpath ( filename , locale_dir ) out , err = call ( 'msgfmt -c -o /dev/null {}' . format ( rfile ) , working_directory = locale_dir ) if err : log . info ( u'\n' + out . decode ( 'utf8' ) ) log . warning ( u'\n' + err . decode ( 'utf8' ) ) found_problems = True return found_problems
Call GNU msgfmt - c on each . po file to validate its format . Any errors caught by msgfmt are logged to log .
139
29
233,195
def tags_in_string ( msg ) : def is_linguistic_tag ( tag ) : """Is this tag one that can change with the language?""" if tag . startswith ( "&" ) : return True if any ( x in tag for x in [ "<abbr>" , "<abbr " , "</abbr>" ] ) : return True return False __ , tags = Converter ( ) . detag_string ( msg ) return set ( t for t in tags if not is_linguistic_tag ( t ) )
Return the set of tags in a message string .
118
10
233,196
def astral ( msg ) : # Python2 narrow builds present astral characters as surrogate pairs. # By encoding as utf32, and decoding DWORDS, we can get at the real code # points. utf32 = msg . encode ( "utf32" ) [ 4 : ] # [4:] to drop the bom code_points = struct . unpack ( "%dI" % ( len ( utf32 ) / 4 ) , utf32 ) return any ( cp > 0xFFFF for cp in code_points )
Does msg have characters outside the Basic Multilingual Plane?
113
11
233,197
def report_problems ( filename , problems ) : problem_file = filename . replace ( ".po" , ".prob" ) id_filler = textwrap . TextWrapper ( width = 79 , initial_indent = " msgid: " , subsequent_indent = " " * 9 ) tx_filler = textwrap . TextWrapper ( width = 79 , initial_indent = " -----> " , subsequent_indent = " " * 9 ) with codecs . open ( problem_file , "w" , encoding = "utf8" ) as prob_file : for problem in problems : desc , msgid = problem [ : 2 ] prob_file . write ( u"{}\n{}\n" . format ( desc , id_filler . fill ( msgid ) ) ) info = u"{}\n{}\n" . format ( desc , id_filler . fill ( msgid ) ) for translation in problem [ 2 : ] : prob_file . write ( u"{}\n" . format ( tx_filler . fill ( translation ) ) ) info += u"{}\n" . format ( tx_filler . fill ( translation ) ) log . info ( info ) prob_file . write ( u"\n" ) log . error ( " %s problems in %s, details in .prob file" , len ( problems ) , filename )
Report on the problems found in filename .
302
8
233,198
def merge ( configuration , locale , target = 'django.po' , sources = ( 'django-partial.po' , ) , fail_if_missing = True ) : LOG . info ( 'Merging %s locale %s' , target , locale ) locale_directory = configuration . get_messages_dir ( locale ) try : validate_files ( locale_directory , sources ) except Exception : # pylint: disable=broad-except if not fail_if_missing : return raise # merged file is merged.po merge_cmd = 'msgcat -o merged.po ' + ' ' . join ( sources ) execute ( merge_cmd , working_directory = locale_directory ) # clean up redunancies in the metadata merged_filename = locale_directory . joinpath ( 'merged.po' ) duplicate_entries = clean_pofile ( merged_filename ) # rename merged.po -> django.po (default) target_filename = locale_directory . joinpath ( target ) os . rename ( merged_filename , target_filename ) # Write duplicate messages to a file if duplicate_entries : dup_file = target_filename . replace ( ".po" , ".dup" ) with codecs . open ( dup_file , "w" , encoding = "utf8" ) as dfile : for ( entry , translations ) in duplicate_entries : dfile . write ( u"{}\n" . format ( entry ) ) dfile . write ( u"Translations found were:\n\t{}\n\n" . format ( translations ) ) LOG . warning ( " %s duplicates in %s, details in .dup file" , len ( duplicate_entries ) , target_filename )
For the given locale merge the sources files to become the target file . Note that the target file might also be one of the sources .
375
27
233,199
def merge_files ( configuration , locale , fail_if_missing = True ) : for target , sources in configuration . generate_merge . items ( ) : merge ( configuration , locale , target , sources , fail_if_missing )
Merge all the files in locale as specified in config . yaml .
50
15