idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
239,800
def get_attname_column ( self ) : # See "A guide to Field parameters": django/db/models/fields/__init__.py # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. attname = self . get_attname ( ) if self . db_column is not None : # explicit name column = self . db_column else : if not self . name . islower ( ) : # a Salesforce style name e.g. 'LastName' or 'MyCustomField' column = self . name else : # a Django style name like 'last_name' or 'my_custom_field' column = self . name . title ( ) . replace ( '_' , '' ) # Fix custom fields if self . sf_custom : column = self . sf_namespace + column + '__c' return attname , column
Get the database column name automatically in most cases .
244
10
239,801
def extract_values ( query ) : # pylint if isinstance ( query , subqueries . UpdateQuery ) : row = query . values return extract_values_inner ( row , query ) if isinstance ( query , subqueries . InsertQuery ) : ret = [ ] for row in query . objs : ret . append ( extract_values_inner ( row , query ) ) return ret raise NotSupportedError
Extract values from insert or update query . Supports bulk_create
89
13
239,802
def execute ( self , q , args = ( ) ) : # pylint:disable=too-many-branches self . rowcount = None response = None if self . query is None : self . execute_select ( q , args ) else : response = self . execute_django ( q , args ) if isinstance ( response , list ) : return # the encoding is detected automatically, e.g. from headers if response and response . text : # parse_float set to decimal.Decimal to avoid precision errors when # converting from the json number to a float and then to a Decimal object # on a model's DecimalField. This converts from json number directly # to a Decimal object data = response . json ( parse_float = decimal . Decimal ) # a SELECT query if 'totalSize' in data : # SELECT self . rowcount = data [ 'totalSize' ] # a successful INSERT query, return after getting PK elif ( 'success' in data and 'id' in data ) : self . lastrowid = data [ 'id' ] return elif 'compositeResponse' in data : # TODO treat error reporting for composite requests self . lastrowid = [ x [ 'body' ] [ 'id' ] if x [ 'body' ] is not None else x [ 'referenceId' ] for x in data [ 'compositeResponse' ] ] return elif data [ 'hasErrors' ] is False : # it is from Composite Batch request # save id from bulk_create even if Django don't use it if data [ 'results' ] and data [ 'results' ] [ 0 ] [ 'result' ] : self . lastrowid = [ item [ 'result' ] [ 'id' ] for item in data [ 'results' ] ] return # something we don't recognize else : raise DatabaseError ( data ) if not q . upper ( ) . startswith ( 'SELECT COUNT() FROM' ) : self . first_row = data [ 'records' ] [ 0 ] if data [ 'records' ] else None
Send a query to the Salesforce API .
448
9
239,803
def execute_django ( self , soql , args = ( ) ) : response = None sqltype = soql . split ( None , 1 ) [ 0 ] . upper ( ) if isinstance ( self . query , subqueries . InsertQuery ) : response = self . execute_insert ( self . query ) elif isinstance ( self . query , subqueries . UpdateQuery ) : response = self . execute_update ( self . query ) elif isinstance ( self . query , subqueries . DeleteQuery ) : response = self . execute_delete ( self . query ) elif isinstance ( self . query , RawQuery ) : self . execute_select ( soql , args ) elif sqltype in ( 'SAVEPOINT' , 'ROLLBACK' , 'RELEASE' ) : log . info ( "Ignored SQL command '%s'" , sqltype ) return elif isinstance ( self . query , Query ) : self . execute_select ( soql , args ) else : raise DatabaseError ( "Unsupported query: type %s: %s" % ( type ( self . query ) , self . query ) ) return response
Fixed execute for queries coming from Django query compilers
249
10
239,804
def get_pks_from_query ( self , query ) : where = query . where sql = None if where . connector == 'AND' and not where . negated and len ( where . children ) == 1 : # simple cases are optimized, especially because a suboptimal # nested query based on the same table is not allowed by SF child = where . children [ 0 ] if ( child . lookup_name in ( 'exact' , 'in' ) and child . lhs . target . column == 'Id' and not child . bilateral_transforms and child . lhs . target . model is self . query . model ) : pks = child . rhs if child . lookup_name == 'exact' : assert isinstance ( pks , text_type ) return [ pks ] # lookup_name 'in' assert not child . bilateral_transforms if isinstance ( pks , ( tuple , list ) ) : return pks if DJANGO_111_PLUS : assert isinstance ( pks , Query ) and type ( pks ) . __name__ == 'SalesforceQuery' # # alternative solution: # return list(salesforce.backend.query.SalesforceQuerySet(pk.model, query=pk, using=pk._db)) sql , params = pks . get_compiler ( 'salesforce' ) . as_sql ( ) else : assert isinstance ( pks , salesforce . backend . query . SalesforceQuerySet ) return [ x . pk for x in pks ] if not sql : # a subquery is necessary in this case where_sql , params = where . as_sql ( query . get_compiler ( 'salesforce' ) , self . db . connection ) sql = "SELECT Id FROM {} WHERE {}" . format ( query . model . _meta . db_table , where_sql ) with self . db . cursor ( ) as cur : cur . execute ( sql , params ) assert len ( cur . description ) == 1 and cur . description [ 0 ] [ 0 ] == 'Id' return [ x [ 0 ] for x in cur ]
Prepare primary keys for update and delete queries
461
9
239,805
def versions_request ( self ) : ret = self . handle_api_exceptions ( 'GET' , '' , api_ver = '' ) return [ str_dict ( x ) for x in ret . json ( ) ]
List Available REST API Versions
48
6
239,806
def fix_international ( text ) : class SmartInternational ( str ) : def __new__ ( cls , text ) : return str . __new__ ( cls , text ) def endswith ( self , string ) : return super ( SmartInternational , self ) . endswith ( str ( string ) ) if PY3 : return text out = [ ] last = 0 for match in re . finditer ( r'(?<=[^\\])(?:\\x[0-9a-f]{2}|\\u[0-9a-f]{4})' , text ) : start , end , group = match . start ( ) , match . end ( ) , match . group ( ) out . append ( text [ last : start ] ) c = group . decode ( 'unicode_escape' ) out . append ( c if ord ( c ) > 160 and ord ( c ) != 173 else group ) last = end out . append ( text [ last : ] ) return SmartInternational ( '' . join ( out ) . encode ( 'utf-8' ) )
Fix excaped international characters back to utf - 8
233
11
239,807
def get_meta ( self , table_name , constraints = None , column_to_field_name = None , is_view = False , is_partition = None ) : # pylint:disable=arguments-differ,too-many-arguments,unused-argument meta = [ " class Meta(models.Model.Meta):" , " db_table = '%s'" % table_name ] if self . connection . vendor == 'salesforce' : for line in self . connection . introspection . get_additional_meta ( table_name ) : meta . append ( " " + line ) meta . append ( "" ) return meta
Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name .
144
26
239,808
def relative_path ( path ) : return os . path . join ( os . path . dirname ( __file__ ) , path )
Return the given path relative to this file .
29
9
239,809
def get_tagged_version ( ) : with open ( relative_path ( 'salesforce/__init__.py' ) , 'r' ) as fd : version = re . search ( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]' , fd . read ( ) , re . MULTILINE ) . group ( 1 ) return version
Determine the current version of this package .
91
10
239,810
def dynamic_start ( self , access_token , instance_url = None , * * kw ) : self . dynamic = { 'access_token' : str ( access_token ) , 'instance_url' : str ( instance_url ) } self . dynamic . update ( kw )
Set the access token dynamically according to the current user .
63
11
239,811
def mark_quoted_strings ( sql ) : # pattern of a string parameter (pm), a char escaped by backslash (bs) # out_pattern: characters valid in SOQL pm_pattern = re . compile ( r"'[^\\']*(?:\\[\\'][^\\']*)*'" ) bs_pattern = re . compile ( r"\\([\\'])" ) out_pattern = re . compile ( r"^(?:[-!()*+,.:<=>\w\s|%s])*$" ) missing_apostrophe = "invalid character in SOQL or a missing apostrophe" start = 0 out = [ ] params = [ ] for match in pm_pattern . finditer ( sql ) : out . append ( sql [ start : match . start ( ) ] ) assert out_pattern . match ( sql [ start : match . start ( ) ] ) , missing_apostrophe params . append ( bs_pattern . sub ( '\\1' , sql [ match . start ( ) + 1 : match . end ( ) - 1 ] ) ) start = match . end ( ) out . append ( sql [ start : ] ) assert out_pattern . match ( sql [ start : ] ) , missing_apostrophe return '@' . join ( out ) , params
Mark all quoted strings in the SOQL by
286
9
239,812
def subst_quoted_strings ( sql , params ) : parts = sql . split ( '@' ) params_dont_match = "number of parameters doesn' match the transformed query" assert len ( parts ) == len ( params ) + 1 , params_dont_match # would be internal error out = [ ] for i , param in enumerate ( params ) : out . append ( parts [ i ] ) out . append ( "'%s'" % param . replace ( '\\' , '\\\\' ) . replace ( "\'" , "\\\'" ) ) out . append ( parts [ - 1 ] ) return '' . join ( out )
Reverse operation to mark_quoted_strings - substitutes
139
13
239,813
def find_closing_parenthesis ( sql , startpos ) : pattern = re . compile ( r'[()]' ) level = 0 opening = [ ] for match in pattern . finditer ( sql , startpos ) : par = match . group ( ) if par == '(' : if level == 0 : opening = match . start ( ) level += 1 if par == ')' : assert level > 0 , "missing '(' before ')'" level -= 1 if level == 0 : closing = match . end ( ) return opening , closing
Find the pair of opening and closing parentheses .
114
9
239,814
def split_subquery ( sql ) : sql , params = mark_quoted_strings ( sql ) sql = simplify_expression ( sql ) _ = params # NOQA start = 0 out = [ ] subqueries = [ ] pattern = re . compile ( r'\(SELECT\b' , re . I ) match = pattern . search ( sql , start ) while match : out . append ( sql [ start : match . start ( ) + 1 ] + '&' ) start , pos = find_closing_parenthesis ( sql , match . start ( ) ) start , pos = start + 1 , pos - 1 subqueries . append ( split_subquery ( sql [ start : pos ] ) ) start = pos match = pattern . search ( sql , start ) out . append ( sql [ start : len ( sql ) ] ) return '' . join ( out ) , subqueries
Split on subqueries and replace them by & .
191
11
239,815
def simplify_expression ( txt ) : minimal = re . sub ( r'\s' , ' ' , re . sub ( r'\s(?=\W)' , '' , re . sub ( r'(?<=\W)\s' , '' , txt . strip ( ) ) ) ) # add space before some "(" and after some ")" return re . sub ( r'\)(?=\w)' , ') ' , re . sub ( r'(,|\b(?:{}))\(' . format ( '|' . join ( RESERVED_WORDS ) ) , '\\1 (' , minimal ) )
Remove all unecessary whitespace and some very usual space
144
13
239,816
def _make_flat ( self , row_dict , path , subroots ) : # can get a cursor parameter, if introspection should be possible on the fly out = { } for k , v in row_dict . items ( ) : klc = k . lower ( ) # "key lower case" if ( not ( isinstance ( v , dict ) and 'attributes' in v ) or ( 'done' in v and 'records' in v and 'totalSize' in v ) ) : # : if klc not in subroots : out [ klc ] = v else : strpath = '.' . join ( path + ( klc , ) ) + '.' strip_pos = len ( strpath ) - len ( klc + '.' ) for alias in self . aliases : if alias . lower ( ) . startswith ( strpath ) : out [ alias . lower ( ) [ strip_pos : ] ] = None # empty outer join field names else : new_subroots = subroots [ klc ] if k != 'attributes' else { } for sub_k , sub_v in self . _make_flat ( v , path + ( klc , ) , new_subroots ) . items ( ) : out [ k . lower ( ) + '.' + sub_k ] = sub_v return out
Replace the nested dict objects by a flat dict with keys object . object . name .
288
18
239,817
def parse_rest_response ( self , records , rowcount , row_type = list ) : if self . is_plain_count : # result of "SELECT COUNT() FROM ... WHERE ..." assert list ( records ) == [ ] yield rowcount # originally [resp.json()['totalSize']] else : while True : for row_deep in records : assert self . is_aggregation == ( row_deep [ 'attributes' ] [ 'type' ] == 'AggregateResult' ) row_flat = self . _make_flat ( row_deep , path = ( ) , subroots = self . subroots ) # TODO Will be the expression "or x['done']" really correct also for long subrequests? assert all ( not isinstance ( x , dict ) or x [ 'done' ] for x in row_flat ) if issubclass ( row_type , dict ) : yield { k : fix_data_type ( row_flat [ k . lower ( ) ] ) for k in self . aliases } else : yield [ fix_data_type ( row_flat [ k . lower ( ) ] ) for k in self . aliases ] # if not resp['done']: # if not cursor: # raise ProgrammingError("Must get a cursor") # resp = cursor.query_more(resp['nextRecordsUrl']).json() # else: # break break
Parse the REST API response to DB API cursor flat response
299
12
239,818
def make_dynamic_fields ( pattern_module , dynamic_field_patterns , attrs ) : # pylint:disable=invalid-name,too-many-branches,too-many-locals import re attr_meta = attrs [ 'Meta' ] db_table = getattr ( attr_meta , 'db_table' , None ) if not db_table : raise RuntimeError ( 'The "db_table" must be set in Meta if "dynamic_field_patterns" is used.' ) is_custom_model = getattr ( attr_meta , 'custom' , False ) patterns = [ ] for pat in dynamic_field_patterns : enabled = True if pat . startswith ( '-' ) : enabled = False pat = pat [ 1 : ] patterns . append ( ( enabled , re . compile ( r'^(?:{})$' . format ( pat ) , re . I ) ) ) used_columns = [ ] for name , attr in attrs . items ( ) : if isinstance ( attr , SfField ) : field = attr if field . sf_custom is None and is_custom_model : field . sf_custom = True if not field . name : field . name = name attname , column = field . get_attname_column ( ) # pylint:disable=unused-variable used_columns . append ( column ) if not pattern_module : raise RuntimeError ( "a pattern_module is required for dynamic fields." ) for name , obj in vars ( pattern_module ) . items ( ) : if not name . startswith ( '_' ) and isclass ( obj ) and issubclass ( obj , ModelTemplate ) : default_table = obj . __name__ if getattr ( getattr ( obj , 'Meta' , None ) , 'db_table' , default_table ) == db_table : cls = obj break else : # not found db_table model, but decide between warning or exception if any ( not x . startswith ( '__' ) for x in dir ( pattern_module ) ) : raise RuntimeError ( "No Model for table '%s' found in the module '%s'" % ( db_table , pattern_module . __name__ ) ) warnings . warn ( "The module '%s' is empty. (It is OK if you are " "rewriting new Models by pipe from inspectdb command.)" % pattern_module . __name__ ) return lazy_fields = [ ( name , obj ) for name , obj in vars ( cls ) . items ( ) if isinstance ( obj , LazyField ) and issubclass ( obj . klass , SfField ) ] for name , obj in sorted ( lazy_fields , key = lambda name_obj : name_obj [ 1 ] . counter ) : for enabled , pat in patterns : if pat . match ( name ) : break else : enabled = False if enabled : if issubclass ( obj . klass , ForeignKey ) : to = obj . kw [ 'to' ] if isclass ( to ) and issubclass ( to , ModelTemplate ) : obj . kw [ 'to' ] = to . __name__ field = obj . create ( ) attrs [ name ] = field assert pattern_module
Add some Salesforce fields from a pattern_module models . py
727
13
239,819
def prepare_exception ( obj , messages = None , response = None , verbs = None ) : # pylint:disable=too-many-branches verbs = set ( verbs or [ ] ) known_options = [ 'method+url' ] if messages is None : messages = [ ] if isinstance ( messages , ( text_type , str ) ) : messages = [ messages ] assert isinstance ( messages , list ) assert not verbs . difference ( known_options ) data = None # a boolean from a failed response is False, though error messages in json should be decoded if response is not None and 'json' in response . headers . get ( 'Content-Type' , '' ) and response . text : data = json . loads ( response . text ) if data : data_0 = data [ 0 ] if 'errorCode' in data_0 : subreq = '' if 'referenceId' in data_0 : subreq = " (in subrequest {!r})" . format ( data_0 [ 'referenceId' ] ) messages = [ data_0 [ 'errorCode' ] + subreq ] + messages if data_0 . get ( 'fields' ) : messages . append ( 'FIELDS: {}' . format ( data_0 [ 'fields' ] ) ) if len ( data ) > 1 : messages . append ( 'MORE_ERRORS ({})' . format ( len ( data ) ) ) if 'method+url' in verbs : method = response . request . method url = response . request . url if len ( url ) > 100 : url = url [ : 100 ] + '...' data_info = '' if ( method in ( 'POST' , 'PATCH' ) and ( not response . request . body or 'json' not in response . request . headers [ 'content-type' ] ) ) : data_info = ' (without json request data)' messages . append ( 'in {} "{}"{}' . format ( method , url , data_info ) ) separ = '\n ' if not PY3 : messages = [ x if isinstance ( x , str ) else x . encode ( 'utf-8' ) for x in messages ] messages = [ x . replace ( '\n' , separ ) for x in messages ] message = separ . join ( messages ) if obj : obj . data = data obj . response = response obj . verbs = verbs return message
Prepare excetion params or only an exception message
518
11
239,820
def warn_sf ( messages , response , verbs = None , klass = SalesforceWarning ) : warnings . warn ( klass ( messages , response , verbs ) , stacklevel = 2 )
Issue a warning SalesforceWarning with message combined from message and data from SFDC response
40
17
239,821
def get_from_clause ( self ) : self . query_topology ( ) root_table = self . soql_trans [ self . root_alias ] return [ root_table ] , [ ]
Return the FROM clause converted the SOQL dialect .
45
10
239,822
def quote_name_unless_alias ( self , name ) : r = self . connection . ops . quote_name ( name ) self . quote_cache [ name ] = r return r
A wrapper around connection . ops . quote_name that doesn t quote aliases for table names . Mostly used during the ORDER BY clause .
40
27
239,823
def get_soap_client ( db_alias , client_class = None ) : if not beatbox : raise InterfaceError ( "To use SOAP API, you'll need to install the Beatbox package." ) if client_class is None : client_class = beatbox . PythonClient soap_client = client_class ( ) # authenticate connection = connections [ db_alias ] # verify the authenticated connection, because Beatbox can not refresh the token cursor = connection . cursor ( ) cursor . urls_request ( ) auth_info = connections [ db_alias ] . sf_session . auth access_token = auth_info . get_auth ( ) [ 'access_token' ] assert access_token [ 15 ] == '!' org_id = access_token [ : 15 ] url = '/services/Soap/u/{version}/{org_id}' . format ( version = salesforce . API_VERSION , org_id = org_id ) soap_client . useSession ( access_token , auth_info . instance_url + url ) return soap_client
Create the SOAP client for the current user logged in the db_alias
234
15
239,824
def signalize_extensions ( ) : warnings . warn ( "DB-API extension cursor.rownumber used" , SalesforceWarning ) warnings . warn ( "DB-API extension connection.<exception> used" , SalesforceWarning ) # TODO warnings . warn ( "DB-API extension cursor.connection used" , SalesforceWarning ) # not implemented DB-API extension cursor.scroll(, SalesforceWarning) warnings . warn ( "DB-API extension cursor.messages used" , SalesforceWarning ) warnings . warn ( "DB-API extension connection.messages used" , SalesforceWarning ) warnings . warn ( "DB-API extension cursor.next(, SalesforceWarning) used" ) warnings . warn ( "DB-API extension cursor.__iter__(, SalesforceWarning) used" ) warnings . warn ( "DB-API extension cursor.lastrowid used" , SalesforceWarning ) warnings . warn ( "DB-API extension .errorhandler used" , SalesforceWarning )
DB API 2 . 0 extension are reported by warnings at run - time .
212
15
239,825
def arg_to_soql ( arg ) : conversion = sql_conversions . get ( type ( arg ) ) if conversion : return conversion ( arg ) for type_ in subclass_conversions : if isinstance ( arg , type_ ) : return sql_conversions [ type_ ] ( arg ) return sql_conversions [ str ] ( arg )
Perform necessary SOQL quoting on the arg .
75
10
239,826
def arg_to_json ( arg ) : conversion = json_conversions . get ( type ( arg ) ) if conversion : return conversion ( arg ) for type_ in subclass_conversions : if isinstance ( arg , type_ ) : return json_conversions [ type_ ] ( arg ) return json_conversions [ str ] ( arg )
Perform necessary JSON conversion on the arg .
74
9
239,827
def merge_dict ( dict_1 , * other , * * kw ) : tmp = dict_1 . copy ( ) for x in other : tmp . update ( x ) tmp . update ( kw ) return tmp
Merge two or more dict including kw into result dict .
47
13
239,828
def make_session ( self ) : with connect_lock : if self . _sf_session is None : sf_session = requests . Session ( ) # TODO configurable class Salesforce***Auth sf_session . auth = SalesforcePasswordAuth ( db_alias = self . alias , settings_dict = self . settings_dict ) sf_instance_url = sf_session . auth . instance_url sf_requests_adapter = HTTPAdapter ( max_retries = get_max_retries ( ) ) sf_session . mount ( sf_instance_url , sf_requests_adapter ) # Additional headers work, but the same are added automatically by "requests' package. # sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO self . _sf_session = sf_session
Authenticate and get the name of assigned SFDC data server
203
12
239,829
def rest_api_url ( self , * url_parts , * * kwargs ) : url_parts = list ( url_parts ) if url_parts and re . match ( r'^(?:https|mock)://' , url_parts [ 0 ] ) : return '/' . join ( url_parts ) relative = kwargs . pop ( 'relative' , False ) api_ver = kwargs . pop ( 'api_ver' , None ) api_ver = api_ver if api_ver is not None else self . api_ver assert not kwargs if not relative : base = [ self . sf_session . auth . instance_url ] else : base = [ '' ] if url_parts and url_parts [ 0 ] . startswith ( '/' ) : prefix = [ ] url_parts [ 0 ] = url_parts [ 0 ] [ 1 : ] else : prefix = [ 'services/data' ] if api_ver : prefix += [ 'v{api_ver}' . format ( api_ver = api_ver ) ] return '/' . join ( base + prefix + url_parts )
Join the URL of REST_API
249
7
239,830
def raise_errors ( self , response ) : # Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500) # TODO extract a case ID for Salesforce support from code 500 messages # TODO disabled 'debug_verbs' temporarily, after writing better default messages verb = self . debug_verbs # NOQA pylint:disable=unused-variable method = response . request . method data = None is_json = 'json' in response . headers . get ( 'Content-Type' , '' ) and response . text if is_json : data = json . loads ( response . text ) if not ( isinstance ( data , list ) and data and 'errorCode' in data [ 0 ] ) : messages = [ response . text ] if is_json else [ ] raise OperationalError ( [ 'HTTP error "%d %s":' % ( response . status_code , response . reason ) ] + messages , response , [ 'method+url' ] ) # Other Errors are reported in the json body err_msg = data [ 0 ] [ 'message' ] err_code = data [ 0 ] [ 'errorCode' ] if response . status_code == 404 : # ResourceNotFound if method == 'DELETE' and err_code in ( 'ENTITY_IS_DELETED' , 'INVALID_CROSS_REFERENCE_KEY' ) : # It was a delete command and the object is in trash bin or it is # completely deleted or it could be a valid Id for this sobject type. # Then we accept it with a warning, similarly to delete by a classic database query: # DELETE FROM xy WHERE id = 'something_deleted_yet' warn_sf ( [ err_msg , "Object is deleted before delete or update" ] , response , [ 'method+url' ] ) # TODO add a warning and add it to messages return None if err_code in ( 'NOT_FOUND' , # 404 e.g. invalid object type in url path or url query?q=select ... 'METHOD_NOT_ALLOWED' , # 405 e.g. patch instead of post ) : # both need to report the url raise SalesforceError ( [ err_msg ] , response , [ 'method+url' ] ) # it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE') raise SalesforceError ( [ err_msg ] , response )
The innermost part - report errors by exceptions
558
9
239,831
def composite_request ( self , data ) : post_data = { 'compositeRequest' : data , 'allOrNone' : True } resp = self . handle_api_exceptions ( 'POST' , 'composite' , json = post_data ) comp_resp = resp . json ( ) [ 'compositeResponse' ] is_ok = all ( x [ 'httpStatusCode' ] < 400 for x in comp_resp ) if is_ok : return resp # construct an equivalent of individual bad request/response bad_responses = { i : x for i , x in enumerate ( comp_resp ) if not ( x [ 'httpStatusCode' ] == 400 and x [ 'body' ] [ 0 ] [ 'errorCode' ] in ( 'PROCESSING_HALTED' , 'ALL_OR_NONE_OPERATION_ROLLED_BACK' ) ) } if len ( bad_responses ) != 1 : raise InternalError ( "Too much or too many subrequests with an individual error" ) bad_i , bad_response = bad_responses . popitem ( ) bad_request = data [ bad_i ] bad_req = FakeReq ( bad_request [ 'method' ] , bad_request [ 'url' ] , bad_request . get ( 'body' ) , bad_request . get ( 'httpHeaders' , { } ) , context = { bad_i : bad_request [ 'referenceId' ] } ) body = [ merge_dict ( x , referenceId = bad_response [ 'referenceId' ] ) for x in bad_response [ 'body' ] ] bad_resp_headers = bad_response [ 'httpHeaders' ] . copy ( ) bad_resp_headers . update ( { 'Content-Type' : resp . headers [ 'Content-Type' ] } ) bad_resp = FakeResp ( bad_response [ 'httpStatusCode' ] , json . dumps ( body ) , bad_req , bad_resp_headers ) self . raise_errors ( bad_resp )
Call a composite request with subrequests error handling
453
10
239,832
def align_after ( self , offset ) : f = self . reader if offset <= 0 : # FIXME what is a negative offset?? f . seek ( 0 ) self . _block_count = 0 self . _read_header ( ) # FIXME we can't extimate how big it is... return sm = self . sync_marker sml = len ( sm ) pos = offset while pos < self . file_length - sml : f . seek ( pos ) data = f . read ( self . FORWARD_WINDOW_SIZE ) sync_offset = data . find ( sm ) if sync_offset > - 1 : f . seek ( pos + sync_offset ) self . _block_count = 0 return pos += len ( data )
Search for a sync point after offset and align just after that .
160
13
239,833
def get_progress ( self ) : pos = self . reader . reader . tell ( ) return min ( ( pos - self . region_start ) / float ( self . region_end - self . region_start ) , 1.0 )
Give a rough estimate of the progress done .
51
9
239,834
def is_exe ( fpath ) : return os . path . isfile ( fpath ) and os . access ( fpath , os . X_OK )
Path references an executable file .
34
6
239,835
def is_readable ( fpath ) : return os . path . isfile ( fpath ) and os . access ( fpath , os . R_OK )
Path references a readable file .
34
6
239,836
def is_local ( self , hadoop_conf = None , hadoop_home = None ) : conf = self . hadoop_params ( hadoop_conf , hadoop_home ) keys = ( 'mapreduce.framework.name' , 'mapreduce.jobtracker.address' , 'mapred.job.tracker' ) for k in keys : if conf . get ( k , 'local' ) . lower ( ) != 'local' : return False return True
\ Is Hadoop configured to run in local mode?
105
12
239,837
def abspath ( hdfs_path , user = None , local = False ) : if local : return 'file:%s' % os . path . abspath ( hdfs_path ) if isfull ( hdfs_path ) : return hdfs_path hostname , port , path = split ( hdfs_path , user = user ) if hostname : fs = hdfs_fs . hdfs ( hostname , port ) apath = join ( "hdfs://%s:%s" % ( fs . host , fs . port ) , path ) fs . close ( ) else : apath = "file:%s" % os . path . abspath ( path ) return apath
Return an absolute path for hdfs_path .
158
11
239,838
def dirname ( hdfs_path ) : scheme , netloc , path = parse ( hdfs_path ) return unparse ( scheme , netloc , os . path . dirname ( path ) )
Return the directory component of hdfs_path .
45
11
239,839
def expanduser ( path ) : if hdfs_fs . default_is_local ( ) : return os . path . expanduser ( path ) m = re . match ( r'^~([^/]*)' , path ) if m is None : return path user = m . groups ( ) [ 0 ] or common . DEFAULT_USER return '/user/%s%s' % ( user , path [ m . end ( 1 ) : ] )
Replace initial ~ or ~user with the user s home directory .
99
14
239,840
def normpath ( path ) : scheme , netloc , path_ = parse ( path ) return unparse ( scheme , netloc , os . path . normpath ( path_ ) )
Normalize path collapsing redundant separators and up - level refs .
39
14
239,841
def realpath ( path ) : scheme , netloc , path_ = parse ( path ) if scheme == 'file' or hdfs_fs . default_is_local ( ) : return unparse ( scheme , netloc , os . path . realpath ( path_ ) ) return path
Return path with symlinks resolved .
62
7
239,842
def default_is_local ( hadoop_conf = None , hadoop_home = None ) : params = pydoop . hadoop_params ( hadoop_conf , hadoop_home ) for k in 'fs.defaultFS' , 'fs.default.name' : if not params . get ( k , 'file:' ) . startswith ( 'file:' ) : return False return True
\ Is Hadoop configured to use the local file system?
87
13
239,843
def open_file ( self , path , mode = "r" , buff_size = 0 , replication = 0 , blocksize = 0 , encoding = None , errors = None ) : _complain_ifclosed ( self . closed ) if not path : raise ValueError ( "Empty path" ) m , is_text = common . parse_mode ( mode ) if not self . host : fret = local_file ( self , path , m ) if is_text : cls = io . BufferedReader if m == "r" else io . BufferedWriter fret = TextIOWrapper ( cls ( fret ) , encoding , errors ) return fret f = self . fs . open_file ( path , m , buff_size , replication , blocksize ) cls = FileIO if is_text else hdfs_file fret = cls ( f , self , mode ) return fret
Open an HDFS file .
191
6
239,844
def capacity ( self ) : _complain_ifclosed ( self . closed ) if not self . __status . host : raise RuntimeError ( 'Capacity is not defined for a local fs' ) return self . fs . get_capacity ( )
Return the raw capacity of the filesystem .
52
8
239,845
def copy ( self , from_path , to_hdfs , to_path ) : _complain_ifclosed ( self . closed ) if isinstance ( to_hdfs , self . __class__ ) : to_hdfs = to_hdfs . fs return self . fs . copy ( from_path , to_hdfs , to_path )
Copy file from one filesystem to another .
82
8
239,846
def delete ( self , path , recursive = True ) : _complain_ifclosed ( self . closed ) return self . fs . delete ( path , recursive )
Delete path .
34
3
239,847
def exists ( self , path ) : _complain_ifclosed ( self . closed ) return self . fs . exists ( path )
Check if a given path exists on the filesystem .
28
10
239,848
def get_path_info ( self , path ) : _complain_ifclosed ( self . closed ) return self . fs . get_path_info ( path )
Get information about path as a dict of properties .
36
10
239,849
def list_directory ( self , path ) : _complain_ifclosed ( self . closed ) return self . fs . list_directory ( path )
r Get list of files and directories for path \ .
32
11
239,850
def rename ( self , from_path , to_path ) : _complain_ifclosed ( self . closed ) return self . fs . rename ( from_path , to_path )
Rename file .
40
4
239,851
def set_replication ( self , path , replication ) : _complain_ifclosed ( self . closed ) return self . fs . set_replication ( path , replication )
r Set the replication of path to replication \ .
38
10
239,852
def set_working_directory ( self , path ) : _complain_ifclosed ( self . closed ) return self . fs . set_working_directory ( path )
r Set the working directory to path \ . All relative paths will be resolved relative to it .
36
19
239,853
def working_directory ( self ) : _complain_ifclosed ( self . closed ) wd = self . fs . get_working_directory ( ) return wd
Get the current working directory .
36
6
239,854
def __compute_mode_from_string ( self , path , mode_string ) : Char_to_perm_byte = { 'r' : 4 , 'w' : 2 , 'x' : 1 } Fields = ( ( 'u' , 6 ) , ( 'g' , 3 ) , ( 'o' , 0 ) ) # -- m = re . match ( r"\s*([ugoa]*)([-+=])([rwx]*)\s*" , mode_string ) if not m : raise ValueError ( "Invalid mode string %s" % mode_string ) who = m . group ( 1 ) what_op = m . group ( 2 ) which_perm = m . group ( 3 ) # -- old_mode = self . fs . get_path_info ( path ) [ 'permissions' ] # The mode to be applied by the operation, repeated three # times in a list, for user, group, and other respectively. # Initially these are identical, but some may change if we # have to respect the umask setting. op_perm = [ reduce ( ops . ior , [ Char_to_perm_byte [ c ] for c in which_perm ] ) ] * 3 if 'a' in who : who = 'ugo' elif who == '' : who = 'ugo' # erase the umask bits inverted_umask = ~ self . __get_umask ( ) for i , field in enumerate ( Fields ) : op_perm [ i ] &= ( inverted_umask >> field [ 1 ] ) & 0x7 # for each user, compute the permission bit and set it in the mode new_mode = 0 for i , tpl in enumerate ( Fields ) : field , shift = tpl # shift by the bits specified for the field; keep only the # 3 lowest bits old = ( old_mode >> shift ) & 0x7 if field in who : if what_op == '-' : new = old & ~ op_perm [ i ] elif what_op == '=' : new = op_perm [ i ] elif what_op == '+' : new = old | op_perm [ i ] else : raise RuntimeError ( "unexpected permission operation %s" % what_op ) else : # copy the previous permissions new = old new_mode |= new << shift return new_mode
Scan a unix - style mode string and apply it to path .
515
14
239,855
def utime ( self , path , mtime , atime ) : _complain_ifclosed ( self . closed ) return self . fs . utime ( path , int ( mtime ) , int ( atime ) )
Change file last access and modification times .
48
8
239,856
def rm_rf ( path , dry_run = False ) : log . info ( "removing %s" % path ) if dry_run : return try : if os . path . isdir ( path ) and not os . path . islink ( path ) : shutil . rmtree ( path ) else : os . remove ( path ) except OSError : pass
Remove a file or directory tree .
81
7
239,857
def __finalize_hdfs ( self , ext ) : java_home = jvm . get_java_home ( ) jvm_lib_path , _ = jvm . get_jvm_lib_path_and_name ( java_home ) ext . include_dirs = jvm . get_include_dirs ( ) + ext . include_dirs ext . libraries = jvm . get_libraries ( ) ext . library_dirs = [ os . path . join ( java_home , "Libraries" ) , jvm_lib_path ] ext . define_macros = jvm . get_macros ( ) ext . extra_link_args = [ '-Wl,-rpath,%s' % jvm_lib_path ] if self . __have_better_tls ( ) : ext . define_macros . append ( ( "HAVE_BETTER_TLS" , None ) ) try : # too many warnings in libhdfs self . compiler . compiler_so . remove ( "-Wsign-compare" ) except ( AttributeError , ValueError ) : pass
\ Adds a few bits that depend on the specific environment .
248
12
239,858
def run_tool_cmd ( tool , cmd , args = None , properties = None , hadoop_conf_dir = None , logger = None , keep_streams = True ) : if logger is None : logger = utils . NullLogger ( ) _args = [ tool ] if hadoop_conf_dir : _args . extend ( [ "--config" , hadoop_conf_dir ] ) _args . append ( cmd ) if properties : _args . extend ( _construct_property_args ( properties ) ) if args : if isinstance ( args , basestring ) : args = shlex . split ( args ) _merge_csv_args ( args ) gargs = _pop_generic_args ( args ) for seq in gargs , args : _args . extend ( map ( str , seq ) ) logger . debug ( 'final args: %r' , ( _args , ) ) if keep_streams : p = subprocess . Popen ( _args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) error = "" stderr_iterator = iter ( p . stderr . readline , b"" ) for line in stderr_iterator : error += line logger . info ( "cmd stderr line: %s" , line . strip ( ) ) output , _ = p . communicate ( ) else : p = subprocess . Popen ( _args , stdout = None , stderr = None , bufsize = 1 ) ret = p . wait ( ) error = 'command exited with %d status' % ret if ret else '' output = '' if p . returncode : raise RunCmdError ( p . returncode , ' ' . join ( _args ) , error ) return output
Run a Hadoop command .
383
7
239,859
def get_task_trackers ( properties = None , hadoop_conf_dir = None , offline = False ) : if offline : if not hadoop_conf_dir : hadoop_conf_dir = pydoop . hadoop_conf ( ) slaves = os . path . join ( hadoop_conf_dir , "slaves" ) try : with open ( slaves ) as f : task_trackers = [ ( l . strip ( ) , 0 ) for l in f ] except IOError : task_trackers = [ ] else : # run JobClient directly (avoids "hadoop job" deprecation) stdout = run_class ( "org.apache.hadoop.mapred.JobClient" , [ "-list-active-trackers" ] , properties = properties , hadoop_conf_dir = hadoop_conf_dir , keep_streams = True ) task_trackers = [ ] for line in stdout . splitlines ( ) : if not line : continue line = line . split ( ":" ) task_trackers . append ( ( line [ 0 ] . split ( "_" ) [ 1 ] , int ( line [ - 1 ] ) ) ) return task_trackers
Get the list of task trackers in the Hadoop cluster .
264
14
239,860
def get_num_nodes ( properties = None , hadoop_conf_dir = None , offline = False ) : return len ( get_task_trackers ( properties , hadoop_conf_dir , offline ) )
Get the number of task trackers in the Hadoop cluster .
48
14
239,861
def dfs ( args = None , properties = None , hadoop_conf_dir = None ) : # run FsShell directly (avoids "hadoop dfs" deprecation) return run_class ( "org.apache.hadoop.fs.FsShell" , args , properties , hadoop_conf_dir = hadoop_conf_dir , keep_streams = True )
Run the Hadoop file system shell .
87
9
239,862
def run_pipes ( executable , input_path , output_path , more_args = None , properties = None , force_pydoop_submitter = False , hadoop_conf_dir = None , logger = None , keep_streams = False ) : if logger is None : logger = utils . NullLogger ( ) if not hdfs . path . exists ( executable ) : raise IOError ( "executable %s not found" % executable ) if not hdfs . path . exists ( input_path ) and not ( set ( input_path ) & GLOB_CHARS ) : raise IOError ( "input path %s not found" % input_path ) if properties is None : properties = { } properties . setdefault ( 'mapreduce.pipes.isjavarecordreader' , 'true' ) properties . setdefault ( 'mapreduce.pipes.isjavarecordwriter' , 'true' ) if force_pydoop_submitter : use_pydoop_submit = True else : use_pydoop_submit = False ver = pydoop . hadoop_version_info ( ) if ver . has_security ( ) : if ver . is_cdh_mrv2 ( ) and hdfs . default_is_local ( ) : raise RuntimeError ( "mrv2 on local fs not supported yet" ) use_pydoop_submit = hdfs . default_is_local ( ) args = [ "-program" , executable , "-input" , input_path , "-output" , output_path , ] if more_args is not None : args . extend ( more_args ) if use_pydoop_submit : submitter = "it.crs4.pydoop.pipes.Submitter" pydoop_jar = pydoop . jar_path ( ) args . extend ( ( "-libjars" , pydoop_jar ) ) return run_class ( submitter , args , properties , classpath = pydoop_jar , logger = logger , keep_streams = keep_streams ) else : return run_mapred_cmd ( "pipes" , args = args , properties = properties , hadoop_conf_dir = hadoop_conf_dir , logger = logger , keep_streams = keep_streams )
Run a pipes command .
514
5
239,863
def collect_output ( mr_out_dir , out_file = None ) : if out_file is None : output = [ ] for fn in iter_mr_out_files ( mr_out_dir ) : with hdfs . open ( fn , "rt" ) as f : output . append ( f . read ( ) ) return "" . join ( output ) else : block_size = 16777216 with open ( out_file , 'a' ) as o : for fn in iter_mr_out_files ( mr_out_dir ) : with hdfs . open ( fn ) as f : data = f . read ( block_size ) while len ( data ) > 0 : o . write ( data ) data = f . read ( block_size )
Return all mapreduce output in mr_out_dir .
171
14
239,864
def set_output ( self , output ) : self . output = output self . logger . info ( "assigning output to %s" , self . output )
Set the output path for the job . Optional if the runner has been instantiated with a prefix .
34
20
239,865
def set_exe ( self , pipes_code ) : if not self . output : raise RuntimeError ( "no output directory, can't create launcher" ) parent = hdfs . path . dirname ( hdfs . path . abspath ( self . output . rstrip ( "/" ) ) ) self . exe = hdfs . path . join ( parent , utils . make_random_str ( ) ) hdfs . dump ( pipes_code , self . exe )
Dump launcher code to the distributed file system .
106
10
239,866
def dump ( data , hdfs_path , * * kwargs ) : kwargs [ "mode" ] = "w" if isinstance ( data , bintype ) else "wt" with open ( hdfs_path , * * kwargs ) as fo : i = 0 bufsize = common . BUFSIZE while i < len ( data ) : fo . write ( data [ i : i + bufsize ] ) i += bufsize fo . fs . close ( )
\ Write data to hdfs_path .
106
10
239,867
def load ( hdfs_path , * * kwargs ) : m , _ = common . parse_mode ( kwargs . get ( "mode" , "r" ) ) if m != "r" : raise ValueError ( "opening mode must be readonly" ) with open ( hdfs_path , * * kwargs ) as fi : data = fi . read ( ) fi . fs . close ( ) return data
\ Read the content of hdfs_path and return it .
95
14
239,868
def cp ( src_hdfs_path , dest_hdfs_path , * * kwargs ) : src , dest = { } , { } try : for d , p in ( ( src , src_hdfs_path ) , ( dest , dest_hdfs_path ) ) : d [ "host" ] , d [ "port" ] , d [ "path" ] = path . split ( p ) d [ "fs" ] = hdfs ( d [ "host" ] , d [ "port" ] ) # --- does src exist? --- try : src [ "info" ] = src [ "fs" ] . get_path_info ( src [ "path" ] ) except IOError : raise IOError ( "no such file or directory: %r" % ( src [ "path" ] ) ) # --- src exists. Does dest exist? --- try : dest [ "info" ] = dest [ "fs" ] . get_path_info ( dest [ "path" ] ) except IOError : if src [ "info" ] [ "kind" ] == "file" : _cp_file ( src [ "fs" ] , src [ "path" ] , dest [ "fs" ] , dest [ "path" ] , * * kwargs ) return else : dest [ "fs" ] . create_directory ( dest [ "path" ] ) dest_hdfs_path = dest [ "fs" ] . get_path_info ( dest [ "path" ] ) [ "name" ] for item in src [ "fs" ] . list_directory ( src [ "path" ] ) : cp ( item [ "name" ] , dest_hdfs_path , * * kwargs ) return # --- dest exists. Is it a file? --- if dest [ "info" ] [ "kind" ] == "file" : raise IOError ( "%r already exists" % ( dest [ "path" ] ) ) # --- dest is a directory --- dest [ "path" ] = path . join ( dest [ "path" ] , path . basename ( src [ "path" ] ) ) if dest [ "fs" ] . exists ( dest [ "path" ] ) : raise IOError ( "%r already exists" % ( dest [ "path" ] ) ) if src [ "info" ] [ "kind" ] == "file" : _cp_file ( src [ "fs" ] , src [ "path" ] , dest [ "fs" ] , dest [ "path" ] , * * kwargs ) else : dest [ "fs" ] . create_directory ( dest [ "path" ] ) dest_hdfs_path = dest [ "fs" ] . get_path_info ( dest [ "path" ] ) [ "name" ] for item in src [ "fs" ] . list_directory ( src [ "path" ] ) : cp ( item [ "name" ] , dest_hdfs_path , * * kwargs ) finally : for d in src , dest : try : d [ "fs" ] . close ( ) except KeyError : pass
\ Copy the contents of src_hdfs_path to dest_hdfs_path .
685
21
239,869
def put ( src_path , dest_hdfs_path , * * kwargs ) : cp ( path . abspath ( src_path , local = True ) , dest_hdfs_path , * * kwargs )
\ Copy the contents of src_path to dest_hdfs_path .
52
17
239,870
def get ( src_hdfs_path , dest_path , * * kwargs ) : cp ( src_hdfs_path , path . abspath ( dest_path , local = True ) , * * kwargs )
\ Copy the contents of src_hdfs_path to dest_path .
52
17
239,871
def mkdir ( hdfs_path , user = None ) : host , port , path_ = path . split ( hdfs_path , user ) fs = hdfs ( host , port , user ) retval = fs . create_directory ( path_ ) fs . close ( ) return retval
Create a directory and its parents as needed .
66
9
239,872
def lsl ( hdfs_path , user = None , recursive = False ) : host , port , path_ = path . split ( hdfs_path , user ) fs = hdfs ( host , port , user ) if not recursive : dir_list = fs . list_directory ( path_ ) else : treewalk = fs . walk ( path_ ) top = next ( treewalk ) if top [ 'kind' ] == 'directory' : dir_list = list ( treewalk ) else : dir_list = [ top ] fs . close ( ) return dir_list
Return a list of dictionaries of file properties .
128
10
239,873
def ls ( hdfs_path , user = None , recursive = False ) : dir_list = lsl ( hdfs_path , user , recursive ) return [ d [ "name" ] for d in dir_list ]
Return a list of hdfs paths .
50
9
239,874
def move ( src , dest , user = None ) : src_host , src_port , src_path = path . split ( src , user ) dest_host , dest_port , dest_path = path . split ( dest , user ) src_fs = hdfs ( src_host , src_port , user ) dest_fs = hdfs ( dest_host , dest_port , user ) try : retval = src_fs . move ( src_path , dest_fs , dest_path ) return retval finally : src_fs . close ( ) dest_fs . close ( )
Move or rename src to dest .
130
7
239,875
def renames ( from_path , to_path , user = None ) : to_dir = path . dirname ( to_path ) if to_dir : mkdir ( to_dir , user = user ) rename ( from_path , to_path , user = user )
Rename from_path to to_path creating parents as needed .
60
14
239,876
def readline ( self ) : _complain_ifclosed ( self . closed ) line = self . f . readline ( ) if self . __encoding : return line . decode ( self . __encoding , self . __errors ) else : return line
Read and return a line of text .
55
8
239,877
def pread ( self , position , length ) : _complain_ifclosed ( self . closed ) if position > self . size : raise IOError ( "position cannot be past EOF" ) if length < 0 : length = self . size - position data = self . f . raw . pread ( position , length ) if self . __encoding : return data . decode ( self . __encoding , self . __errors ) else : return data
r Read length bytes of data from the file starting from position \ .
96
14
239,878
def read ( self , length = - 1 ) : _complain_ifclosed ( self . closed ) # NOTE: libhdfs read stops at block boundaries: it is *essential* # to ensure that we actually read the required number of bytes. if length < 0 : length = self . size chunks = [ ] while 1 : if length <= 0 : break c = self . f . read ( min ( self . buff_size , length ) ) if c == b"" : break chunks . append ( c ) length -= len ( c ) data = b"" . join ( chunks ) if self . __encoding : return data . decode ( self . __encoding , self . __errors ) else : return data
Read length bytes from the file . If length is negative or omitted read all data until EOF .
150
20
239,879
def seek ( self , position , whence = os . SEEK_SET ) : _complain_ifclosed ( self . closed ) return self . f . seek ( position , whence )
Seek to position in file .
39
7
239,880
def write ( self , data ) : _complain_ifclosed ( self . closed ) if self . __encoding : self . f . write ( data . encode ( self . __encoding , self . __errors ) ) return len ( data ) else : return self . f . write ( data )
Write data to the file .
64
6
239,881
def set_args ( self , args , unknown_args = None ) : if unknown_args is None : unknown_args = [ ] self . logger . setLevel ( getattr ( logging , args . log_level ) ) parent = hdfs . path . dirname ( hdfs . path . abspath ( args . output . rstrip ( "/" ) ) ) self . remote_wd = hdfs . path . join ( parent , utils . make_random_str ( prefix = "pydoop_submit_" ) ) self . remote_exe = hdfs . path . join ( self . remote_wd , str ( uuid . uuid4 ( ) ) ) self . properties [ JOB_NAME ] = args . job_name or 'pydoop' self . properties [ IS_JAVA_RR ] = ( 'false' if args . do_not_use_java_record_reader else 'true' ) self . properties [ IS_JAVA_RW ] = ( 'false' if args . do_not_use_java_record_writer else 'true' ) self . properties [ JOB_REDUCES ] = args . num_reducers if args . job_name : self . properties [ JOB_NAME ] = args . job_name self . properties . update ( args . job_conf or { } ) self . __set_files_to_cache ( args ) self . __set_archives_to_cache ( args ) self . requested_env = self . _env_arg_to_dict ( args . set_env or [ ] ) self . args = args self . unknown_args = unknown_args
Configure job based on the arguments provided .
364
9
239,882
def __warn_user_if_wd_maybe_unreadable ( self , abs_remote_path ) : host , port , path = hdfs . path . split ( abs_remote_path ) if host == '' and port == 0 : # local file system host_port = "file:///" else : # FIXME: this won't work with any scheme other than # hdfs:// (e.g., s3) host_port = "hdfs://%s:%s/" % ( host , port ) path_pieces = path . strip ( '/' ) . split ( os . path . sep ) fs = hdfs . hdfs ( host , port ) for i in range ( 0 , len ( path_pieces ) ) : part = os . path . join ( host_port , os . path . sep . join ( path_pieces [ 0 : i + 1 ] ) ) permissions = fs . get_path_info ( part ) [ 'permissions' ] if permissions & 0o111 != 0o111 : self . logger . warning ( ( "remote module %s may not be readable by the task " "tracker when initializing the distributed cache. " "Permissions on %s: %s" ) , abs_remote_path , part , oct ( permissions ) ) break
Check directories above the remote module and issue a warning if they are not traversable by all users .
281
20
239,883
def __setup_remote_paths ( self ) : self . logger . debug ( "remote_wd: %s" , self . remote_wd ) self . logger . debug ( "remote_exe: %s" , self . remote_exe ) self . logger . debug ( "remotes: %s" , self . files_to_upload ) if self . args . module : self . logger . debug ( 'Generated pipes_code:\n\n %s' , self . _generate_pipes_code ( ) ) if not self . args . pretend : hdfs . mkdir ( self . remote_wd ) hdfs . chmod ( self . remote_wd , "a+rx" ) self . logger . debug ( "created and chmod-ed: %s" , self . remote_wd ) pipes_code = self . _generate_pipes_code ( ) hdfs . dump ( pipes_code , self . remote_exe ) self . logger . debug ( "dumped pipes_code to: %s" , self . remote_exe ) hdfs . chmod ( self . remote_exe , "a+rx" ) self . __warn_user_if_wd_maybe_unreadable ( self . remote_wd ) for ( l , h , _ ) in self . files_to_upload : self . logger . debug ( "uploading: %s to %s" , l , h ) hdfs . cp ( l , h ) self . logger . debug ( "Created%sremote paths:" % ( ' [simulation] ' if self . args . pretend else ' ' ) )
Actually create the working directory and copy the module into it .
357
12
239,884
def docker_client ( ) : cert_path = os . environ . get ( 'DOCKER_CERT_PATH' , '' ) if cert_path == '' : cert_path = os . path . join ( os . environ . get ( 'HOME' , '' ) , '.docker' ) base_url = os . environ . get ( 'DOCKER_HOST' ) tls_config = None if os . environ . get ( 'DOCKER_TLS_VERIFY' , '' ) != '' : parts = base_url . split ( '://' , 1 ) base_url = '%s://%s' % ( 'https' , parts [ 1 ] ) client_cert = ( os . path . join ( cert_path , 'cert.pem' ) , os . path . join ( cert_path , 'key.pem' ) ) ca_cert = os . path . join ( cert_path , 'ca.pem' ) tls_config = tls . TLSConfig ( ssl_version = ssl . PROTOCOL_TLSv1 , verify = True , assert_hostname = False , client_cert = client_cert , ca_cert = ca_cert , ) timeout = int ( os . environ . get ( 'DOCKER_CLIENT_TIMEOUT' , 60 ) ) return Client ( base_url = base_url , tls = tls_config , version = '1.15' , timeout = timeout )
Returns a docker - py client configured using environment variables according to the same logic as the official Docker client .
329
21
239,885
def get_java_home ( ) : error = RuntimeError ( "java home not found, try setting JAVA_HOME" ) try : return os . environ [ "JAVA_HOME" ] except KeyError : wd = tempfile . mkdtemp ( prefix = 'pydoop_' ) jclass = "Temp" jsrc = os . path . join ( wd , "%s.java" % jclass ) with open ( jsrc , "w" ) as f : f . write ( JPROG . substitute ( classname = jclass ) ) try : subprocess . check_call ( [ "javac" , jsrc ] ) path = subprocess . check_output ( [ "java" , "-cp" , wd , jclass ] , universal_newlines = True ) except ( OSError , UnicodeDecodeError , subprocess . CalledProcessError ) : raise error finally : shutil . rmtree ( wd ) path = os . path . normpath ( path . strip ( ) ) if os . path . exists ( os . path . join ( path , "include" , "jni.h" ) ) : return path path = os . path . dirname ( path ) if os . path . exists ( os . path . join ( path , "include" , "jni.h" ) ) : return path raise error
\ Try getting JAVA_HOME from system properties .
299
12
239,886
def run_task ( factory , * * kwargs ) : context = TaskContext ( factory , * * kwargs ) pstats_dir = kwargs . get ( "pstats_dir" , os . getenv ( PSTATS_DIR ) ) if pstats_dir : import cProfile import tempfile import pydoop . hdfs as hdfs hdfs . mkdir ( pstats_dir ) fd , pstats_fn = tempfile . mkstemp ( suffix = ".pstats" ) os . close ( fd ) cProfile . runctx ( "_run(context, **kwargs)" , globals ( ) , locals ( ) , filename = pstats_fn ) pstats_fmt = kwargs . get ( "pstats_fmt" , os . getenv ( PSTATS_FMT , DEFAULT_PSTATS_FMT ) ) name = pstats_fmt % ( context . task_type , context . get_task_partition ( ) , os . path . basename ( pstats_fn ) ) hdfs . put ( pstats_fn , hdfs . path . join ( pstats_dir , name ) ) else : _run ( context , * * kwargs )
\ Run a MapReduce task .
274
8
239,887
def progress ( self ) : now = time ( ) if now - self . last_progress_t > 1 : self . last_progress_t = now if self . status : self . uplink . status ( self . status ) self . status = None self . __spill_counters ( ) self . uplink . progress ( self . progress_value ) self . uplink . flush ( )
\ Report progress to the Java side .
85
8
239,888
def merge_pages ( self , replacements ) : warnings . warn ( "merge_pages has been deprecated in favour of merge_templates" , category = DeprecationWarning , stacklevel = 2 ) self . merge_templates ( replacements , "page_break" )
Deprecated method .
58
4
239,889
def importpath ( path , error_text = None ) : result = None attrs = [ ] parts = path . split ( '.' ) exception = None while parts : try : result = __import__ ( '.' . join ( parts ) , { } , { } , [ '' ] ) except ImportError as e : if exception is None : exception = e attrs = parts [ - 1 : ] + attrs parts = parts [ : - 1 ] else : break for attr in attrs : try : result = getattr ( result , attr ) except ( AttributeError , ValueError ) as e : if error_text is not None : raise ImproperlyConfigured ( 'Error: %s can import "%s"' % ( error_text , path ) ) else : raise exception return result
Import value by specified path . Value can represent module class object attribute or method . If error_text is not None and import will raise ImproperlyConfigured with user friendly text .
170
37
239,890
def get_booking ( request ) : booking = None if request . user . is_authenticated ( ) : try : booking = Booking . objects . get ( user = request . user , booking_status__slug = 'inprogress' ) except Booking . DoesNotExist : # The user does not have any open bookings pass else : session = Session . objects . get ( session_key = request . session . session_key ) try : booking = Booking . objects . get ( session = session ) except Booking . DoesNotExist : # The user does not have any bookings in his session pass return booking
Returns the booking that is in progress for the current user or None
134
13
239,891
def persist_booking ( booking , user ) : if booking is not None : existing_bookings = Booking . objects . filter ( user = user , booking_status__slug = 'inprogress' ) . exclude ( pk = booking . pk ) existing_bookings . delete ( ) booking . session = None booking . user = user booking . save ( )
Ties an in - progress booking from a session to a user when the user logs in .
79
19
239,892
def compare_config ( self , target , init = True , indent_level = 0 ) : if init : fwd = self . full_path_fwd bwd = self . full_path_bwd else : fwd = self . rel_path_fwd bwd = self . rel_path_bwd indent = 4 * indent_level * ' ' if indent_level == 0 and self . vdom is not None : if self . vdom == 'global' : pre = 'conf global\n' else : pre = 'conf vdom\n edit %s\n' % self . vdom post = 'end' else : pre = '' post = '' pre_block = '%s%s' % ( indent , fwd ) post_block = '%s%s' % ( indent , bwd ) my_params = self . parameters . keys ( ) ot_params = target . parameters . keys ( ) text = '' for param in my_params : if param not in ot_params : text += ' %sunset %s\n' % ( indent , param ) else : # We ignore quotes when comparing values if str ( self . get_param ( param ) ) . replace ( '"' , '' ) != str ( target . get_param ( param ) ) . replace ( '"' , '' ) : text += ' %sset %s %s\n' % ( indent , param , target . get_param ( param ) ) for param in ot_params : if param not in my_params : text += ' %sset %s %s\n' % ( indent , param , target . get_param ( param ) ) my_blocks = self . sub_blocks . keys ( ) ot_blocks = target . sub_blocks . keys ( ) for block_name in my_blocks : if block_name not in ot_blocks : text += " %sdelete %s\n" % ( indent , block_name ) else : text += self [ block_name ] . compare_config ( target [ block_name ] , False , indent_level + 1 ) for block_name in ot_blocks : if block_name not in my_blocks : text += target [ block_name ] . to_text ( True , indent_level + 1 , True ) if text == '' : return '' else : return '%s%s%s%s%s' % ( pre , pre_block , text , post_block , post )
This method will return all the necessary commands to get from the config we are in to the target config .
532
21
239,893
def to_text ( self , relative = False , indent_level = 0 , clean_empty_block = False ) : if relative : fwd = self . rel_path_fwd bwd = self . rel_path_bwd else : fwd = self . full_path_fwd bwd = self . full_path_bwd indent = 4 * indent_level * ' ' pre = '%s%s' % ( indent , fwd ) post = '%s%s' % ( indent , bwd ) text = '' for param , value in self . iterparams ( ) : text += ' %sset %s %s\n' % ( indent , param , value ) for key , block in self . iterblocks ( ) : text += block . to_text ( True , indent_level + 1 ) if len ( text ) > 0 or not clean_empty_block : text = '%s%s%s' % ( pre , text , post ) return text
This method returns the object model in text format . You should be able to copy&paste this text into any device running a supported version of FortiOS .
214
31
239,894
def open ( self ) : logger . debug ( 'Connecting to device %s, vdom %s' % ( self . hostname , self . vdom ) ) self . ssh = paramiko . SSHClient ( ) self . ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) cfg = { 'hostname' : self . hostname , 'timeout' : self . timeout , 'username' : self . username , 'password' : self . password , 'key_filename' : self . keyfile } if os . path . exists ( os . path . expanduser ( "~/.ssh/config" ) ) : ssh_config = paramiko . SSHConfig ( ) user_config_file = os . path . expanduser ( "~/.ssh/config" ) with io . open ( user_config_file , 'rt' , encoding = 'utf-8' ) as f : ssh_config . parse ( f ) host_conf = ssh_config . lookup ( self . hostname ) if host_conf : if 'proxycommand' in host_conf : cfg [ 'sock' ] = paramiko . ProxyCommand ( host_conf [ 'proxycommand' ] ) if 'user' in host_conf : cfg [ 'username' ] = host_conf [ 'user' ] if 'identityfile' in host_conf : cfg [ 'key_filename' ] = host_conf [ 'identityfile' ] if 'hostname' in host_conf : cfg [ 'hostname' ] = host_conf [ 'hostname' ] self . ssh . connect ( * * cfg )
Opens the ssh session with the device .
359
9
239,895
def _read_wrapper ( data ) : # Paramiko (strangely) in PY3 returns an int here. if isinstance ( data , int ) : data = chr ( data ) # Ensure unicode return py23_compat . text_type ( data )
Ensure unicode always returned on read .
59
9
239,896
def _parse_batch_lastlog ( last_log ) : regexp = re . compile ( '(-?[0-9]\d*):\W+(.*)' ) wrong_commands = list ( ) for line in last_log : result = regexp . match ( line ) if result is not None : status_code = result . group ( 1 ) command = result . group ( 2 ) if int ( status_code ) < 0 : wrong_commands . append ( ( status_code , command ) ) return wrong_commands
This static method will help reading the result of the commit command by command .
117
15
239,897
def _reload_config ( self , reload_original_config ) : # We don't want to reload the config under some circumstances if reload_original_config : self . original_config = self . running_config self . original_config . set_name ( 'original' ) paths = self . running_config . get_paths ( ) self . running_config = FortiConfig ( 'running' , vdom = self . vdom ) for path in paths : self . load_config ( path , empty_candidate = True )
This command will update the running config from the live device .
116
12
239,898
def generate_states ( self , initial_condition = None , with_noise = True , stateseq = None ) : from pybasicbayes . util . stats import sample_discrete # Generate from the prior and raise exception if unstable T , K , n = self . T , self . num_states , self . D_latent A = self . trans_matrix # Initialize discrete state sequence dss = - 1 * np . ones ( T , dtype = np . int32 ) if stateseq is None else stateseq . astype ( np . int32 ) assert dss . shape == ( T , ) gss = np . empty ( ( T , n ) , dtype = 'double' ) if initial_condition is None : if dss [ 0 ] == - 1 : dss [ 0 ] = sample_discrete ( self . pi_0 ) gss [ 0 ] = self . init_dynamics_distns [ dss [ 0 ] ] . rvs ( ) else : dss [ 0 ] = initial_condition [ 0 ] gss [ 0 ] = initial_condition [ 1 ] for t in range ( 1 , T ) : # Sample discrete state given previous continuous state if with_noise : # Sample discre=te state from recurrent transition matrix if dss [ t ] == - 1 : dss [ t ] = sample_discrete ( A [ dss [ t - 1 ] , : ] ) # Sample continuous state given current discrete state gss [ t ] = self . dynamics_distns [ dss [ t - 1 ] ] . rvs ( x = np . hstack ( ( gss [ t - 1 ] [ None , : ] , self . inputs [ t - 1 ] [ None , : ] ) ) , return_xy = False ) else : # Pick the most likely next discrete state and continuous state if dss [ t ] == - 1 : dss [ t ] = np . argmax ( A [ dss [ t - 1 ] , : ] ) gss [ t ] = self . dynamics_distns [ dss [ t - 1 ] ] . predict ( np . hstack ( ( gss [ t - 1 ] [ None , : ] , self . inputs [ t - 1 ] [ None , : ] ) ) ) assert np . all ( np . isfinite ( gss [ t ] ) ) , "SLDS appears to be unstable!" self . stateseq = dss self . gaussian_states = gss
Jointly sample the discrete and continuous states
536
9
239,899
def heldout_log_likelihood ( self , test_mask = None ) : if test_mask is None : # If a test mask is not supplied, use the negation of this object's mask if self . mask is None : return 0 else : test_mask = ~ self . mask xs = np . hstack ( ( self . gaussian_states , self . inputs ) ) if self . single_emission : return self . emission_distns [ 0 ] . log_likelihood ( ( xs , self . data ) , mask = test_mask ) . sum ( ) else : hll = 0 z = self . stateseq for idx , ed in enumerate ( self . emission_distns ) : hll += ed . log_likelihood ( ( xs [ z == idx ] , self . data [ z == idx ] ) , mask = test_mask [ z == idx ] ) . sum ( )
Compute the log likelihood of the masked data given the latent discrete and continuous states .
202
17