idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
224,000
def build_scope ( resource , method ) : if ResourceList in inspect . getmro ( resource ) and method == 'GET' : prefix = 'list' else : method_to_prefix = { 'GET' : 'get' , 'POST' : 'create' , 'PATCH' : 'update' , 'DELETE' : 'delete' } prefix = method_to_prefix [ method ] if ResourceRelationship in inspect . getmro ( resource ) : prefix = '_' . join ( [ prefix , 'relationship' ] ) return '_' . join ( [ prefix , resource . schema . opts . type_ ] )
Compute the name of the scope for oauth
140
10
224,001
def permission_manager ( self , permission_manager ) : self . check_permissions = permission_manager for resource in self . resource_registry : if getattr ( resource , 'disable_permission' , None ) is not True : for method in getattr ( resource , 'methods' , ( 'GET' , 'POST' , 'PATCH' , 'DELETE' ) ) : setattr ( resource , method . lower ( ) , self . has_permission ( ) ( getattr ( resource , method . lower ( ) ) ) )
Use permission manager to enable permission for API
119
8
224,002
def has_permission ( self , * args , * * kwargs ) : def wrapper ( view ) : if getattr ( view , '_has_permissions_decorator' , False ) is True : return view @ wraps ( view ) @ jsonapi_exception_formatter def decorated ( * view_args , * * view_kwargs ) : self . check_permissions ( view , view_args , view_kwargs , * args , * * kwargs ) return view ( * view_args , * * view_kwargs ) decorated . _has_permissions_decorator = True return decorated return wrapper
Decorator used to check permissions before to call resource manager method
138
13
224,003
def check_headers ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : if request . method in ( 'POST' , 'PATCH' ) : if 'Content-Type' in request . headers and 'application/vnd.api+json' in request . headers [ 'Content-Type' ] and request . headers [ 'Content-Type' ] != 'application/vnd.api+json' : error = json . dumps ( jsonapi_errors ( [ { 'source' : '' , 'detail' : "Content-Type header must be application/vnd.api+json" , 'title' : 'Invalid request header' , 'status' : '415' } ] ) , cls = JSONEncoder ) return make_response ( error , 415 , { 'Content-Type' : 'application/vnd.api+json' } ) if 'Accept' in request . headers : flag = False for accept in request . headers [ 'Accept' ] . split ( ',' ) : if accept . strip ( ) == 'application/vnd.api+json' : flag = False break if 'application/vnd.api+json' in accept and accept . strip ( ) != 'application/vnd.api+json' : flag = True if flag is True : error = json . dumps ( jsonapi_errors ( [ { 'source' : '' , 'detail' : ( 'Accept header must be application/vnd.api+json without' 'media type parameters' ) , 'title' : 'Invalid request header' , 'status' : '406' } ] ) , cls = JSONEncoder ) return make_response ( error , 406 , { 'Content-Type' : 'application/vnd.api+json' } ) return func ( * args , * * kwargs ) return wrapper
Check headers according to jsonapi reference
400
7
224,004
def check_method_requirements ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : error_message = "You must provide {error_field} in {cls} to get access to the default {method} method" error_data = { 'cls' : args [ 0 ] . __class__ . __name__ , 'method' : request . method . lower ( ) } if request . method != 'DELETE' : if not hasattr ( args [ 0 ] , 'schema' ) : error_data . update ( { 'error_field' : 'a schema class' } ) raise Exception ( error_message . format ( * * error_data ) ) return func ( * args , * * kwargs ) return wrapper
Check methods requirements
170
3
224,005
def create_object ( self , data , view_kwargs ) : self . before_create_object ( data , view_kwargs ) relationship_fields = get_relationships ( self . resource . schema , model_field = True ) nested_fields = get_nested_fields ( self . resource . schema , model_field = True ) join_fields = relationship_fields + nested_fields obj = self . model ( * * { key : value for ( key , value ) in data . items ( ) if key not in join_fields } ) self . apply_relationships ( data , obj ) self . apply_nested_fields ( data , obj ) self . session . add ( obj ) try : self . session . commit ( ) except JsonApiException as e : self . session . rollback ( ) raise e except Exception as e : self . session . rollback ( ) raise JsonApiException ( "Object creation error: " + str ( e ) , source = { 'pointer' : '/data' } ) self . after_create_object ( obj , data , view_kwargs ) return obj
Create an object through sqlalchemy
241
7
224,006
def get_object ( self , view_kwargs , qs = None ) : self . before_get_object ( view_kwargs ) id_field = getattr ( self , 'id_field' , inspect ( self . model ) . primary_key [ 0 ] . key ) try : filter_field = getattr ( self . model , id_field ) except Exception : raise Exception ( "{} has no attribute {}" . format ( self . model . __name__ , id_field ) ) url_field = getattr ( self , 'url_field' , 'id' ) filter_value = view_kwargs [ url_field ] query = self . retrieve_object_query ( view_kwargs , filter_field , filter_value ) if qs is not None : query = self . eagerload_includes ( query , qs ) try : obj = query . one ( ) except NoResultFound : obj = None self . after_get_object ( obj , view_kwargs ) return obj
Retrieve an object through sqlalchemy
218
8
224,007
def get_collection ( self , qs , view_kwargs ) : self . before_get_collection ( qs , view_kwargs ) query = self . query ( view_kwargs ) if qs . filters : query = self . filter_query ( query , qs . filters , self . model ) if qs . sorting : query = self . sort_query ( query , qs . sorting ) object_count = query . count ( ) if getattr ( self , 'eagerload_includes' , True ) : query = self . eagerload_includes ( query , qs ) query = self . paginate_query ( query , qs . pagination ) collection = query . all ( ) collection = self . after_get_collection ( collection , qs , view_kwargs ) return object_count , collection
Retrieve a collection of objects through sqlalchemy
179
10
224,008
def update_object ( self , obj , data , view_kwargs ) : if obj is None : url_field = getattr ( self , 'url_field' , 'id' ) filter_value = view_kwargs [ url_field ] raise ObjectNotFound ( '{}: {} not found' . format ( self . model . __name__ , filter_value ) , source = { 'parameter' : url_field } ) self . before_update_object ( obj , data , view_kwargs ) relationship_fields = get_relationships ( self . resource . schema , model_field = True ) nested_fields = get_nested_fields ( self . resource . schema , model_field = True ) join_fields = relationship_fields + nested_fields for key , value in data . items ( ) : if hasattr ( obj , key ) and key not in join_fields : setattr ( obj , key , value ) self . apply_relationships ( data , obj ) self . apply_nested_fields ( data , obj ) try : self . session . commit ( ) except JsonApiException as e : self . session . rollback ( ) raise e except Exception as e : self . session . rollback ( ) raise JsonApiException ( "Update object error: " + str ( e ) , source = { 'pointer' : '/data' } ) self . after_update_object ( obj , data , view_kwargs )
Update an object through sqlalchemy
316
7
224,009
def delete_object ( self , obj , view_kwargs ) : if obj is None : url_field = getattr ( self , 'url_field' , 'id' ) filter_value = view_kwargs [ url_field ] raise ObjectNotFound ( '{}: {} not found' . format ( self . model . __name__ , filter_value ) , source = { 'parameter' : url_field } ) self . before_delete_object ( obj , view_kwargs ) self . session . delete ( obj ) try : self . session . commit ( ) except JsonApiException as e : self . session . rollback ( ) raise e except Exception as e : self . session . rollback ( ) raise JsonApiException ( "Delete object error: " + str ( e ) ) self . after_delete_object ( obj , view_kwargs )
Delete an object through sqlalchemy
191
7
224,010
def create_relationship ( self , json_data , relationship_field , related_id_field , view_kwargs ) : self . before_create_relationship ( json_data , relationship_field , related_id_field , view_kwargs ) obj = self . get_object ( view_kwargs ) if obj is None : url_field = getattr ( self , 'url_field' , 'id' ) filter_value = view_kwargs [ url_field ] raise ObjectNotFound ( '{}: {} not found' . format ( self . model . __name__ , filter_value ) , source = { 'parameter' : url_field } ) if not hasattr ( obj , relationship_field ) : raise RelationNotFound ( "{} has no attribute {}" . format ( obj . __class__ . __name__ , relationship_field ) ) related_model = getattr ( obj . __class__ , relationship_field ) . property . mapper . class_ updated = False if isinstance ( json_data [ 'data' ] , list ) : obj_ids = { str ( getattr ( obj__ , related_id_field ) ) for obj__ in getattr ( obj , relationship_field ) } for obj_ in json_data [ 'data' ] : if obj_ [ 'id' ] not in obj_ids : getattr ( obj , relationship_field ) . append ( self . get_related_object ( related_model , related_id_field , obj_ ) ) updated = True else : related_object = None if json_data [ 'data' ] is not None : related_object = self . get_related_object ( related_model , related_id_field , json_data [ 'data' ] ) obj_id = getattr ( getattr ( obj , relationship_field ) , related_id_field , None ) new_obj_id = getattr ( related_object , related_id_field , None ) if obj_id != new_obj_id : setattr ( obj , relationship_field , related_object ) updated = True try : self . session . commit ( ) except JsonApiException as e : self . session . rollback ( ) raise e except Exception as e : self . session . rollback ( ) raise JsonApiException ( "Create relationship error: " + str ( e ) ) self . after_create_relationship ( obj , updated , json_data , relationship_field , related_id_field , view_kwargs ) return obj , updated
Create a relationship
552
3
224,011
def get_relationship ( self , relationship_field , related_type_ , related_id_field , view_kwargs ) : self . before_get_relationship ( relationship_field , related_type_ , related_id_field , view_kwargs ) obj = self . get_object ( view_kwargs ) if obj is None : url_field = getattr ( self , 'url_field' , 'id' ) filter_value = view_kwargs [ url_field ] raise ObjectNotFound ( '{}: {} not found' . format ( self . model . __name__ , filter_value ) , source = { 'parameter' : url_field } ) if not hasattr ( obj , relationship_field ) : raise RelationNotFound ( "{} has no attribute {}" . format ( obj . __class__ . __name__ , relationship_field ) ) related_objects = getattr ( obj , relationship_field ) if related_objects is None : return obj , related_objects self . after_get_relationship ( obj , related_objects , relationship_field , related_type_ , related_id_field , view_kwargs ) if isinstance ( related_objects , InstrumentedList ) : return obj , [ { 'type' : related_type_ , 'id' : getattr ( obj_ , related_id_field ) } for obj_ in related_objects ] else : return obj , { 'type' : related_type_ , 'id' : getattr ( related_objects , related_id_field ) }
Get a relationship
338
3
224,012
def delete_relationship ( self , json_data , relationship_field , related_id_field , view_kwargs ) : self . before_delete_relationship ( json_data , relationship_field , related_id_field , view_kwargs ) obj = self . get_object ( view_kwargs ) if obj is None : url_field = getattr ( self , 'url_field' , 'id' ) filter_value = view_kwargs [ url_field ] raise ObjectNotFound ( '{}: {} not found' . format ( self . model . __name__ , filter_value ) , source = { 'parameter' : url_field } ) if not hasattr ( obj , relationship_field ) : raise RelationNotFound ( "{} has no attribute {}" . format ( obj . __class__ . __name__ , relationship_field ) ) related_model = getattr ( obj . __class__ , relationship_field ) . property . mapper . class_ updated = False if isinstance ( json_data [ 'data' ] , list ) : obj_ids = { str ( getattr ( obj__ , related_id_field ) ) for obj__ in getattr ( obj , relationship_field ) } for obj_ in json_data [ 'data' ] : if obj_ [ 'id' ] in obj_ids : getattr ( obj , relationship_field ) . remove ( self . get_related_object ( related_model , related_id_field , obj_ ) ) updated = True else : setattr ( obj , relationship_field , None ) updated = True try : self . session . commit ( ) except JsonApiException as e : self . session . rollback ( ) raise e except Exception as e : self . session . rollback ( ) raise JsonApiException ( "Delete relationship error: " + str ( e ) ) self . after_delete_relationship ( obj , updated , json_data , relationship_field , related_id_field , view_kwargs ) return obj , updated
Delete a relationship
443
3
224,013
def get_related_object ( self , related_model , related_id_field , obj ) : try : related_object = self . session . query ( related_model ) . filter ( getattr ( related_model , related_id_field ) == obj [ 'id' ] ) . one ( ) except NoResultFound : raise RelatedObjectNotFound ( "{}.{}: {} not found" . format ( related_model . __name__ , related_id_field , obj [ 'id' ] ) ) return related_object
Get a related object
114
4
224,014
def apply_relationships ( self , data , obj ) : relationships_to_apply = [ ] relationship_fields = get_relationships ( self . resource . schema , model_field = True ) for key , value in data . items ( ) : if key in relationship_fields : related_model = getattr ( obj . __class__ , key ) . property . mapper . class_ schema_field = get_schema_field ( self . resource . schema , key ) related_id_field = self . resource . schema . _declared_fields [ schema_field ] . id_field if isinstance ( value , list ) : related_objects = [ ] for identifier in value : related_object = self . get_related_object ( related_model , related_id_field , { 'id' : identifier } ) related_objects . append ( related_object ) relationships_to_apply . append ( { 'field' : key , 'value' : related_objects } ) else : related_object = None if value is not None : related_object = self . get_related_object ( related_model , related_id_field , { 'id' : value } ) relationships_to_apply . append ( { 'field' : key , 'value' : related_object } ) for relationship in relationships_to_apply : setattr ( obj , relationship [ 'field' ] , relationship [ 'value' ] )
Apply relationship provided by data to obj
307
7
224,015
def filter_query ( self , query , filter_info , model ) : if filter_info : filters = create_filters ( model , filter_info , self . resource ) query = query . filter ( * filters ) return query
Filter query according to jsonapi 1 . 0
49
9
224,016
def sort_query ( self , query , sort_info ) : for sort_opt in sort_info : field = sort_opt [ 'field' ] if not hasattr ( self . model , field ) : raise InvalidSort ( "{} has no attribute {}" . format ( self . model . __name__ , field ) ) query = query . order_by ( getattr ( getattr ( self . model , field ) , sort_opt [ 'order' ] ) ( ) ) return query
Sort query according to jsonapi 1 . 0
105
9
224,017
def paginate_query ( self , query , paginate_info ) : if int ( paginate_info . get ( 'size' , 1 ) ) == 0 : return query page_size = int ( paginate_info . get ( 'size' , 0 ) ) or current_app . config [ 'PAGE_SIZE' ] query = query . limit ( page_size ) if paginate_info . get ( 'number' ) : query = query . offset ( ( int ( paginate_info [ 'number' ] ) - 1 ) * page_size ) return query
Paginate query according to jsonapi 1 . 0
124
11
224,018
def eagerload_includes ( self , query , qs ) : for include in qs . include : joinload_object = None if '.' in include : current_schema = self . resource . schema for obj in include . split ( '.' ) : try : field = get_model_field ( current_schema , obj ) except Exception as e : raise InvalidInclude ( str ( e ) ) if joinload_object is None : joinload_object = joinedload ( field ) else : joinload_object = joinload_object . joinedload ( field ) related_schema_cls = get_related_schema ( current_schema , obj ) if isinstance ( related_schema_cls , SchemaABC ) : related_schema_cls = related_schema_cls . __class__ else : related_schema_cls = class_registry . get_class ( related_schema_cls ) current_schema = related_schema_cls else : try : field = get_model_field ( self . resource . schema , include ) except Exception as e : raise InvalidInclude ( str ( e ) ) joinload_object = joinedload ( field ) query = query . options ( joinload_object ) return query
Use eagerload feature of sqlalchemy to optimize data retrieval for include querystring parameter
277
17
224,019
def retrieve_object_query ( self , view_kwargs , filter_field , filter_value ) : return self . session . query ( self . model ) . filter ( filter_field == filter_value )
Build query to retrieve object
45
5
224,020
def add_pagination_links ( data , object_count , querystring , base_url ) : links = { } all_qs_args = copy ( querystring . querystring ) links [ 'self' ] = base_url # compute self link if all_qs_args : links [ 'self' ] += '?' + urlencode ( all_qs_args ) if querystring . pagination . get ( 'size' ) != '0' and object_count > 1 : # compute last link page_size = int ( querystring . pagination . get ( 'size' , 0 ) ) or current_app . config [ 'PAGE_SIZE' ] last_page = int ( ceil ( object_count / page_size ) ) if last_page > 1 : links [ 'first' ] = links [ 'last' ] = base_url all_qs_args . pop ( 'page[number]' , None ) # compute first link if all_qs_args : links [ 'first' ] += '?' + urlencode ( all_qs_args ) all_qs_args . update ( { 'page[number]' : last_page } ) links [ 'last' ] += '?' + urlencode ( all_qs_args ) # compute previous and next link current_page = int ( querystring . pagination . get ( 'number' , 0 ) ) or 1 if current_page > 1 : all_qs_args . update ( { 'page[number]' : current_page - 1 } ) links [ 'prev' ] = '?' . join ( ( base_url , urlencode ( all_qs_args ) ) ) if current_page < last_page : all_qs_args . update ( { 'page[number]' : current_page + 1 } ) links [ 'next' ] = '?' . join ( ( base_url , urlencode ( all_qs_args ) ) ) data [ 'links' ] = links
Add pagination links to result
429
6
224,021
def idfn ( fixture_params : Iterable [ Any ] ) -> str : return ":" . join ( ( str ( item ) for item in fixture_params ) )
Function for pytest to produce uniform names for fixtures .
36
11
224,022
def get_fixtures_file_hash ( all_fixture_paths : Iterable [ str ] ) -> str : hasher = hashlib . md5 ( ) for fixture_path in sorted ( all_fixture_paths ) : with open ( fixture_path , 'rb' ) as fixture_file : hasher . update ( fixture_file . read ( ) ) return hasher . hexdigest ( )
Returns the MD5 hash of the fixture files . Used for cache busting .
91
16
224,023
def create_unsigned_transaction ( cls , * , nonce : int , gas_price : int , gas : int , to : Address , value : int , data : bytes ) -> 'BaseUnsignedTransaction' : raise NotImplementedError ( "Must be implemented by subclasses" )
Create an unsigned transaction .
64
5
224,024
def import_header ( self , header : BlockHeader ) -> Tuple [ Tuple [ BlockHeader , ... ] , Tuple [ BlockHeader , ... ] ] : new_canonical_headers = self . headerdb . persist_header ( header ) self . header = self . get_canonical_head ( ) return new_canonical_headers
Direct passthrough to headerdb
74
7
224,025
def from_parent ( cls , parent : 'BlockHeader' , gas_limit : int , difficulty : int , timestamp : int , coinbase : Address = ZERO_ADDRESS , nonce : bytes = None , extra_data : bytes = None , transaction_root : bytes = None , receipt_root : bytes = None ) -> 'BlockHeader' : header_kwargs = { 'parent_hash' : parent . hash , 'coinbase' : coinbase , 'state_root' : parent . state_root , 'gas_limit' : gas_limit , 'difficulty' : difficulty , 'block_number' : parent . block_number + 1 , 'timestamp' : timestamp , } if nonce is not None : header_kwargs [ 'nonce' ] = nonce if extra_data is not None : header_kwargs [ 'extra_data' ] = extra_data if transaction_root is not None : header_kwargs [ 'transaction_root' ] = transaction_root if receipt_root is not None : header_kwargs [ 'receipt_root' ] = receipt_root header = cls ( * * header_kwargs ) return header
Initialize a new block header with the parent header as the block s parent hash .
259
17
224,026
def get_block_uncles ( self , uncles_hash : Hash32 ) -> List [ BlockHeader ] : validate_word ( uncles_hash , title = "Uncles Hash" ) if uncles_hash == EMPTY_UNCLE_HASH : return [ ] try : encoded_uncles = self . db [ uncles_hash ] except KeyError : raise HeaderNotFound ( "No uncles found for hash {0}" . format ( uncles_hash ) ) else : return rlp . decode ( encoded_uncles , sedes = rlp . sedes . CountableList ( BlockHeader ) )
Returns an iterable of uncle headers specified by the given uncles_hash
134
15
224,027
def persist_block ( self , block : 'BaseBlock' ) -> Tuple [ Tuple [ Hash32 , ... ] , Tuple [ Hash32 , ... ] ] : with self . db . atomic_batch ( ) as db : return self . _persist_block ( db , block )
Persist the given block s header and uncles .
63
11
224,028
def persist_uncles ( self , uncles : Tuple [ BlockHeader ] ) -> Hash32 : return self . _persist_uncles ( self . db , uncles )
Persists the list of uncles to the database .
39
11
224,029
def add_receipt ( self , block_header : BlockHeader , index_key : int , receipt : Receipt ) -> Hash32 : receipt_db = HexaryTrie ( db = self . db , root_hash = block_header . receipt_root ) receipt_db [ index_key ] = rlp . encode ( receipt ) return receipt_db . root_hash
Adds the given receipt to the provided block header .
82
10
224,030
def add_transaction ( self , block_header : BlockHeader , index_key : int , transaction : 'BaseTransaction' ) -> Hash32 : transaction_db = HexaryTrie ( self . db , root_hash = block_header . transaction_root ) transaction_db [ index_key ] = rlp . encode ( transaction ) return transaction_db . root_hash
Adds the given transaction to the provided block header .
81
10
224,031
def get_block_transactions ( self , header : BlockHeader , transaction_class : Type [ 'BaseTransaction' ] ) -> Iterable [ 'BaseTransaction' ] : return self . _get_block_transactions ( header . transaction_root , transaction_class )
Returns an iterable of transactions for the block speficied by the given block header .
58
19
224,032
def get_block_transaction_hashes ( self , block_header : BlockHeader ) -> Iterable [ Hash32 ] : return self . _get_block_transaction_hashes ( self . db , block_header )
Returns an iterable of the transaction hashes from the block specified by the given block header .
50
18
224,033
def get_receipts ( self , header : BlockHeader , receipt_class : Type [ Receipt ] ) -> Iterable [ Receipt ] : receipt_db = HexaryTrie ( db = self . db , root_hash = header . receipt_root ) for receipt_idx in itertools . count ( ) : receipt_key = rlp . encode ( receipt_idx ) if receipt_key in receipt_db : receipt_data = receipt_db [ receipt_key ] yield rlp . decode ( receipt_data , sedes = receipt_class ) else : break
Returns an iterable of receipts for the block specified by the given block header .
127
16
224,034
def get_transaction_by_index ( self , block_number : BlockNumber , transaction_index : int , transaction_class : Type [ 'BaseTransaction' ] ) -> 'BaseTransaction' : try : block_header = self . get_canonical_block_header_by_number ( block_number ) except HeaderNotFound : raise TransactionNotFound ( "Block {} is not in the canonical chain" . format ( block_number ) ) transaction_db = HexaryTrie ( self . db , root_hash = block_header . transaction_root ) encoded_index = rlp . encode ( transaction_index ) if encoded_index in transaction_db : encoded_transaction = transaction_db [ encoded_index ] return rlp . decode ( encoded_transaction , sedes = transaction_class ) else : raise TransactionNotFound ( "No transaction is at index {} of block {}" . format ( transaction_index , block_number ) )
Returns the transaction at the specified transaction_index from the block specified by block_number from the canonical chain .
204
22
224,035
def get_receipt_by_index ( self , block_number : BlockNumber , receipt_index : int ) -> Receipt : try : block_header = self . get_canonical_block_header_by_number ( block_number ) except HeaderNotFound : raise ReceiptNotFound ( "Block {} is not in the canonical chain" . format ( block_number ) ) receipt_db = HexaryTrie ( db = self . db , root_hash = block_header . receipt_root ) receipt_key = rlp . encode ( receipt_index ) if receipt_key in receipt_db : receipt_data = receipt_db [ receipt_key ] return rlp . decode ( receipt_data , sedes = Receipt ) else : raise ReceiptNotFound ( "Receipt with index {} not found in block" . format ( receipt_index ) )
Returns the Receipt of the transaction at specified index for the block header obtained by the specified block number
189
20
224,036
def _get_block_transaction_data ( db : BaseDB , transaction_root : Hash32 ) -> Iterable [ Hash32 ] : transaction_db = HexaryTrie ( db , root_hash = transaction_root ) for transaction_idx in itertools . count ( ) : transaction_key = rlp . encode ( transaction_idx ) if transaction_key in transaction_db : yield transaction_db [ transaction_key ] else : break
Returns iterable of the encoded transactions for the given block header
99
12
224,037
def _get_block_transactions ( self , transaction_root : Hash32 , transaction_class : Type [ 'BaseTransaction' ] ) -> Iterable [ 'BaseTransaction' ] : for encoded_transaction in self . _get_block_transaction_data ( self . db , transaction_root ) : yield rlp . decode ( encoded_transaction , sedes = transaction_class )
Memoizable version of get_block_transactions
85
11
224,038
def _remove_transaction_from_canonical_chain ( db : BaseDB , transaction_hash : Hash32 ) -> None : db . delete ( SchemaV1 . make_transaction_hash_to_block_lookup_key ( transaction_hash ) )
Removes the transaction specified by the given hash from the canonical chain .
59
14
224,039
def persist_trie_data_dict ( self , trie_data_dict : Dict [ Hash32 , bytes ] ) -> None : with self . db . atomic_batch ( ) as db : for key , value in trie_data_dict . items ( ) : db [ key ] = value
Store raw trie data to db from a dict
66
10
224,040
def from_header ( cls , header : BlockHeader , chaindb : BaseChainDB ) -> BaseBlock : if header . uncles_hash == EMPTY_UNCLE_HASH : uncles = [ ] # type: List[BlockHeader] else : uncles = chaindb . get_block_uncles ( header . uncles_hash ) transactions = chaindb . get_block_transactions ( header , cls . get_transaction_class ( ) ) return cls ( header = header , transactions = transactions , uncles = uncles , )
Returns the block denoted by the given block header .
121
11
224,041
def shl ( computation : BaseComputation ) -> None : shift_length , value = computation . stack_pop ( num_items = 2 , type_hint = constants . UINT256 ) if shift_length >= 256 : result = 0 else : result = ( value << shift_length ) & constants . UINT_256_MAX computation . stack_push ( result )
Bitwise left shift
81
4
224,042
def sar ( computation : BaseComputation ) -> None : shift_length , value = computation . stack_pop ( num_items = 2 , type_hint = constants . UINT256 ) value = unsigned_to_signed ( value ) if shift_length >= 256 : result = 0 if value >= 0 else constants . UINT_255_NEGATIVE_ONE else : result = ( value >> shift_length ) & constants . UINT_256_MAX computation . stack_push ( result )
Arithmetic bitwise right shift
107
6
224,043
def compute_frontier_difficulty ( parent_header : BlockHeader , timestamp : int ) -> int : validate_gt ( timestamp , parent_header . timestamp , title = "Header timestamp" ) offset = parent_header . difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR # We set the minimum to the lowest of the protocol minimum and the parent # minimum to allow for the initial frontier *warming* period during which # the difficulty begins lower than the protocol minimum. difficulty_minimum = min ( parent_header . difficulty , DIFFICULTY_MINIMUM ) if timestamp - parent_header . timestamp < FRONTIER_DIFFICULTY_ADJUSTMENT_CUTOFF : base_difficulty = max ( parent_header . difficulty + offset , difficulty_minimum , ) else : base_difficulty = max ( parent_header . difficulty - offset , difficulty_minimum , ) # Adjust for difficulty bomb. num_bomb_periods = ( ( parent_header . block_number + 1 ) // BOMB_EXPONENTIAL_PERIOD ) - BOMB_EXPONENTIAL_FREE_PERIODS if num_bomb_periods >= 0 : difficulty = max ( base_difficulty + 2 ** num_bomb_periods , DIFFICULTY_MINIMUM , ) else : difficulty = base_difficulty return difficulty
Computes the difficulty for a frontier block based on the parent block .
299
14
224,044
def build_computation ( self , message : Message , transaction : BaseOrSpoofTransaction ) -> BaseComputation : transaction_context = self . vm_state . get_transaction_context ( transaction ) if message . is_create : is_collision = self . vm_state . has_code_or_nonce ( message . storage_address ) if is_collision : # The address of the newly created contract has *somehow* collided # with an existing contract address. computation = self . vm_state . get_computation ( message , transaction_context ) computation . _error = ContractCreationCollision ( "Address collision while creating contract: {0}" . format ( encode_hex ( message . storage_address ) , ) ) self . vm_state . logger . debug2 ( "Address collision while creating contract: %s" , encode_hex ( message . storage_address ) , ) else : computation = self . vm_state . get_computation ( message , transaction_context , ) . apply_create_message ( ) else : computation = self . vm_state . get_computation ( message , transaction_context ) . apply_message ( ) return computation
Apply the message to the VM .
258
7
224,045
def push ( self , value : Union [ int , bytes ] ) -> None : if len ( self . values ) > 1023 : raise FullStack ( 'Stack limit reached' ) validate_stack_item ( value ) self . values . append ( value )
Push an item onto the stack .
54
7
224,046
def pop ( self , num_items : int , type_hint : str ) -> Union [ int , bytes , Tuple [ Union [ int , bytes ] , ... ] ] : try : if num_items == 1 : return next ( self . _pop ( num_items , type_hint ) ) else : return tuple ( self . _pop ( num_items , type_hint ) ) except IndexError : raise InsufficientStack ( "No stack items" )
Pop an item off the stack .
101
7
224,047
def swap ( self , position : int ) -> None : idx = - 1 * position - 1 try : self . values [ - 1 ] , self . values [ idx ] = self . values [ idx ] , self . values [ - 1 ] except IndexError : raise InsufficientStack ( "Insufficient stack items for SWAP{0}" . format ( position ) )
Perform a SWAP operation on the stack .
80
10
224,048
def dup ( self , position : int ) -> None : idx = - 1 * position try : self . push ( self . values [ idx ] ) except IndexError : raise InsufficientStack ( "Insufficient stack items for DUP{0}" . format ( position ) )
Perform a DUP operation on the stack .
58
9
224,049
def get_canonical_block_hash ( self , block_number : BlockNumber ) -> Hash32 : return self . _get_canonical_block_hash ( self . db , block_number )
Returns the block hash for the canonical block at the given number .
44
13
224,050
def get_canonical_block_header_by_number ( self , block_number : BlockNumber ) -> BlockHeader : return self . _get_canonical_block_header_by_number ( self . db , block_number )
Returns the block header with the given number in the canonical chain .
52
13
224,051
def persist_header_chain ( self , headers : Iterable [ BlockHeader ] ) -> Tuple [ Tuple [ BlockHeader , ... ] , Tuple [ BlockHeader , ... ] ] : with self . db . atomic_batch ( ) as db : return self . _persist_header_chain ( db , headers )
Return two iterable of headers the first containing the new canonical headers the second containing the old canonical headers
69
20
224,052
def _set_as_canonical_chain_head ( cls , db : BaseDB , block_hash : Hash32 ) -> Tuple [ Tuple [ BlockHeader , ... ] , Tuple [ BlockHeader , ... ] ] : try : header = cls . _get_block_header_by_hash ( db , block_hash ) except HeaderNotFound : raise ValueError ( "Cannot use unknown block hash as canonical head: {}" . format ( block_hash ) ) new_canonical_headers = tuple ( reversed ( cls . _find_new_ancestors ( db , header ) ) ) old_canonical_headers = [ ] for h in new_canonical_headers : try : old_canonical_hash = cls . _get_canonical_block_hash ( db , h . block_number ) except HeaderNotFound : # no old_canonical block, and no more possible break else : old_canonical_header = cls . _get_block_header_by_hash ( db , old_canonical_hash ) old_canonical_headers . append ( old_canonical_header ) for h in new_canonical_headers : cls . _add_block_number_to_hash_lookup ( db , h ) db . set ( SchemaV1 . make_canonical_head_hash_lookup_key ( ) , header . hash ) return new_canonical_headers , tuple ( old_canonical_headers )
Sets the canonical chain HEAD to the block header as specified by the given block hash .
326
18
224,053
def _add_block_number_to_hash_lookup ( db : BaseDB , header : BlockHeader ) -> None : block_number_to_hash_key = SchemaV1 . make_block_number_to_hash_lookup_key ( header . block_number ) db . set ( block_number_to_hash_key , rlp . encode ( header . hash , sedes = rlp . sedes . binary ) , )
Sets a record in the database to allow looking up this header by its block number .
100
18
224,054
def compute_gas_limit_bounds ( parent : BlockHeader ) -> Tuple [ int , int ] : boundary_range = parent . gas_limit // GAS_LIMIT_ADJUSTMENT_FACTOR upper_bound = parent . gas_limit + boundary_range lower_bound = max ( GAS_LIMIT_MINIMUM , parent . gas_limit - boundary_range ) return lower_bound , upper_bound
Compute the boundaries for the block gas limit based on the parent block .
96
15
224,055
def compute_gas_limit ( parent_header : BlockHeader , gas_limit_floor : int ) -> int : if gas_limit_floor < GAS_LIMIT_MINIMUM : raise ValueError ( "The `gas_limit_floor` value must be greater than the " "GAS_LIMIT_MINIMUM. Got {0}. Must be greater than " "{1}" . format ( gas_limit_floor , GAS_LIMIT_MINIMUM ) ) decay = parent_header . gas_limit // GAS_LIMIT_EMA_DENOMINATOR if parent_header . gas_used : usage_increase = ( parent_header . gas_used * GAS_LIMIT_USAGE_ADJUSTMENT_NUMERATOR ) // ( GAS_LIMIT_USAGE_ADJUSTMENT_DENOMINATOR ) // ( GAS_LIMIT_EMA_DENOMINATOR ) else : usage_increase = 0 gas_limit = max ( GAS_LIMIT_MINIMUM , parent_header . gas_limit - decay + usage_increase ) if gas_limit < GAS_LIMIT_MINIMUM : return GAS_LIMIT_MINIMUM elif gas_limit < gas_limit_floor : return parent_header . gas_limit + decay else : return gas_limit
A simple strategy for adjusting the gas limit .
304
9
224,056
def generate_header_from_parent_header ( compute_difficulty_fn : Callable [ [ BlockHeader , int ] , int ] , parent_header : BlockHeader , coinbase : Address , timestamp : Optional [ int ] = None , extra_data : bytes = b'' ) -> BlockHeader : if timestamp is None : timestamp = max ( int ( time . time ( ) ) , parent_header . timestamp + 1 ) elif timestamp <= parent_header . timestamp : raise ValueError ( "header.timestamp ({}) should be higher than" "parent_header.timestamp ({})" . format ( timestamp , parent_header . timestamp , ) ) header = BlockHeader ( difficulty = compute_difficulty_fn ( parent_header , timestamp ) , block_number = ( parent_header . block_number + 1 ) , gas_limit = compute_gas_limit ( parent_header , gas_limit_floor = GENESIS_GAS_LIMIT , ) , timestamp = timestamp , parent_hash = parent_header . hash , state_root = parent_header . state_root , coinbase = coinbase , extra_data = extra_data , ) return header
Generate BlockHeader from state_root and parent_header
253
12
224,057
def state_definition_to_dict ( state_definition : GeneralState ) -> AccountState : if isinstance ( state_definition , Mapping ) : state_dict = state_definition elif isinstance ( state_definition , Iterable ) : state_dicts = [ assoc_in ( { } , state_item [ : - 1 ] , state_item [ - 1 ] ) if not isinstance ( state_item , Mapping ) else state_item for state_item in state_definition ] if not is_cleanly_mergable ( * state_dicts ) : raise ValidationError ( "Some state item is defined multiple times" ) state_dict = deep_merge ( * state_dicts ) else : assert TypeError ( "State definition must either be a mapping or a sequence" ) seen_keys = set ( concat ( d . keys ( ) for d in state_dict . values ( ) ) ) bad_keys = seen_keys - set ( [ "balance" , "nonce" , "storage" , "code" ] ) if bad_keys : raise ValidationError ( "State definition contains the following invalid account fields: {}" . format ( ", " . join ( bad_keys ) ) ) return state_dict
Convert a state definition to the canonical dict form .
270
11
224,058
def record_changeset ( self , custom_changeset_id : uuid . UUID = None ) -> uuid . UUID : if custom_changeset_id is not None : if custom_changeset_id in self . journal_data : raise ValidationError ( "Tried to record with an existing changeset id: %r" % custom_changeset_id ) else : changeset_id = custom_changeset_id else : changeset_id = uuid . uuid4 ( ) self . journal_data [ changeset_id ] = { } return changeset_id
Creates a new changeset . Changesets are referenced by a random uuid4 to prevent collisions between multiple changesets .
130
25
224,059
def pop_changeset ( self , changeset_id : uuid . UUID ) -> Dict [ bytes , Union [ bytes , DeletedEntry ] ] : if changeset_id not in self . journal_data : raise KeyError ( changeset_id , "Unknown changeset in JournalDB" ) all_ids = tuple ( self . journal_data . keys ( ) ) changeset_idx = all_ids . index ( changeset_id ) changesets_to_pop = all_ids [ changeset_idx : ] popped_clears = tuple ( idx for idx in changesets_to_pop if idx in self . _clears_at ) if popped_clears : last_clear_idx = changesets_to_pop . index ( popped_clears [ - 1 ] ) changesets_to_drop = changesets_to_pop [ : last_clear_idx ] changesets_to_merge = changesets_to_pop [ last_clear_idx : ] else : changesets_to_drop = ( ) changesets_to_merge = changesets_to_pop # we pull all of the changesets *after* the changeset we are # reverting to and collapse them to a single set of keys (giving # precedence to later changesets) changeset_data = merge ( * ( self . journal_data . pop ( c_id ) for c_id in changesets_to_merge ) ) # drop the changes on the floor if they came before a clear that is being committed for changeset_id in changesets_to_drop : self . journal_data . pop ( changeset_id ) self . _clears_at . difference_update ( popped_clears ) return changeset_data
Returns all changes from the given changeset . This includes all of the changes from any subsequent changeset giving precidence to later changesets .
387
28
224,060
def commit_changeset ( self , changeset_id : uuid . UUID ) -> Dict [ bytes , Union [ bytes , DeletedEntry ] ] : does_clear = self . has_clear ( changeset_id ) changeset_data = self . pop_changeset ( changeset_id ) if not self . is_empty ( ) : # we only have to assign changeset data into the latest changeset if # there is one. if does_clear : # if there was a clear and more changesets underneath then clear the latest # changeset, and replace with a new clear changeset self . latest = { } self . _clears_at . add ( self . latest_id ) self . record_changeset ( ) self . latest = changeset_data else : # otherwise, merge in all the current data self . latest = merge ( self . latest , changeset_data , ) return changeset_data
Collapses all changes for the given changeset into the previous changesets if it exists .
199
18
224,061
def _validate_changeset ( self , changeset_id : uuid . UUID ) -> None : if not self . journal . has_changeset ( changeset_id ) : raise ValidationError ( "Changeset not found in journal: {0}" . format ( str ( changeset_id ) ) )
Checks to be sure the changeset is known by the journal
69
13
224,062
def record ( self , custom_changeset_id : uuid . UUID = None ) -> uuid . UUID : return self . journal . record_changeset ( custom_changeset_id )
Starts a new recording and returns an id for the associated changeset
44
14
224,063
def discard ( self , changeset_id : uuid . UUID ) -> None : self . _validate_changeset ( changeset_id ) self . journal . pop_changeset ( changeset_id )
Throws away all journaled data starting at the given changeset
47
13
224,064
def commit ( self , changeset_id : uuid . UUID ) -> None : self . _validate_changeset ( changeset_id ) journal_data = self . journal . commit_changeset ( changeset_id ) if self . journal . is_empty ( ) : # Ensure the journal automatically restarts recording after # it has been persisted to the underlying db self . reset ( ) for key , value in journal_data . items ( ) : try : if value is DELETED_ENTRY : del self . wrapped_db [ key ] elif value is ERASE_CREATED_ENTRY : pass else : self . wrapped_db [ key ] = cast ( bytes , value ) except Exception : self . _reapply_changeset_to_journal ( changeset_id , journal_data ) raise
Commits a given changeset . This merges the given changeset and all subsequent changesets into the previous changeset giving precidence to later changesets in case of any conflicting keys .
176
38
224,065
def FQP_point_to_FQ2_point ( pt : Tuple [ FQP , FQP , FQP ] ) -> Tuple [ FQ2 , FQ2 , FQ2 ] : return ( FQ2 ( pt [ 0 ] . coeffs ) , FQ2 ( pt [ 1 ] . coeffs ) , FQ2 ( pt [ 2 ] . coeffs ) , )
Transform FQP to FQ2 for type hinting .
95
13
224,066
def as_opcode ( cls : Type [ T ] , logic_fn : Callable [ ... , Any ] , mnemonic : str , gas_cost : int ) -> Type [ T ] : if gas_cost : @ functools . wraps ( logic_fn ) def wrapped_logic_fn ( computation : 'BaseComputation' ) -> Any : """ Wrapper functionf or the logic function which consumes the base opcode gas cost prior to execution. """ computation . consume_gas ( gas_cost , mnemonic , ) return logic_fn ( computation ) else : wrapped_logic_fn = logic_fn props = { '__call__' : staticmethod ( wrapped_logic_fn ) , 'mnemonic' : mnemonic , 'gas_cost' : gas_cost , } opcode_cls = type ( "opcode:{0}" . format ( mnemonic ) , ( cls , ) , props ) return opcode_cls ( )
Class factory method for turning vanilla functions into Opcode classes .
214
12
224,067
def _wipe_storage ( self , address : Address ) -> None : account_store = self . _get_address_store ( address ) self . _dirty_accounts . add ( address ) account_store . delete ( )
Wipe out the storage without explicitly handling the storage root update
50
12
224,068
def setup_main_filler ( name : str , environment : Dict [ Any , Any ] = None ) -> Dict [ str , Dict [ str , Any ] ] : return setup_filler ( name , merge ( DEFAULT_MAIN_ENVIRONMENT , environment or { } ) )
Kick off the filler generation process by creating the general filler scaffold with a test name and general information about the testing environment .
67
25
224,069
def pre_state ( * raw_state : GeneralState , filler : Dict [ str , Any ] ) -> None : @ wraps ( pre_state ) def _pre_state ( filler : Dict [ str , Any ] ) -> Dict [ str , Any ] : test_name = get_test_name ( filler ) old_pre_state = filler [ test_name ] . get ( "pre_state" , { } ) pre_state = normalize_state ( raw_state ) defaults = { address : { "balance" : 0 , "nonce" : 0 , "code" : b"" , "storage" : { } , } for address in pre_state } new_pre_state = deep_merge ( defaults , old_pre_state , pre_state ) return assoc_in ( filler , [ test_name , "pre" ] , new_pre_state )
Specify the state prior to the test execution . Multiple invocations don t override the state but extend it instead .
195
23
224,070
def expect ( post_state : Dict [ str , Any ] = None , networks : Any = None , transaction : TransactionDict = None ) -> Callable [ ... , Dict [ str , Any ] ] : return partial ( _expect , post_state , networks , transaction )
Specify the expected result for the test .
61
9
224,071
def calldataload ( computation : BaseComputation ) -> None : start_position = computation . stack_pop ( type_hint = constants . UINT256 ) value = computation . msg . data_as_bytes [ start_position : start_position + 32 ] padded_value = value . ljust ( 32 , b'\x00' ) normalized_value = padded_value . lstrip ( b'\x00' ) computation . stack_push ( normalized_value )
Load call data into memory .
105
6
224,072
def clamp ( inclusive_lower_bound : int , inclusive_upper_bound : int , value : int ) -> int : if value <= inclusive_lower_bound : return inclusive_lower_bound elif value >= inclusive_upper_bound : return inclusive_upper_bound else : return value
Bound the given value between inclusive_lower_bound and inclusive_upper_bound .
61
17
224,073
def integer_squareroot ( value : int ) -> int : if not isinstance ( value , int ) or isinstance ( value , bool ) : raise ValueError ( "Value must be an integer: Got: {0}" . format ( type ( value ) , ) ) if value < 0 : raise ValueError ( "Value cannot be negative: Got: {0}" . format ( value , ) ) with decimal . localcontext ( ) as ctx : ctx . prec = 128 return int ( decimal . Decimal ( value ) . sqrt ( ) )
Return the integer square root of value .
117
8
224,074
def _commit_unless_raises ( cls , write_target_db : BaseDB ) -> Iterator [ 'AtomicDBWriteBatch' ] : readable_write_batch = cls ( write_target_db ) # type: AtomicDBWriteBatch try : yield readable_write_batch except Exception : cls . logger . exception ( "Unexpected error in atomic db write, dropped partial writes: %r" , readable_write_batch . _diff ( ) , ) raise else : readable_write_batch . _commit ( ) finally : # force a shutdown of this batch, to prevent out-of-context usage readable_write_batch . _track_diff = None readable_write_batch . _write_target_db = None
Commit all writes inside the context unless an exception was raised .
162
13
224,075
def slt ( computation : BaseComputation ) -> None : left , right = map ( unsigned_to_signed , computation . stack_pop ( num_items = 2 , type_hint = constants . UINT256 ) , ) if left < right : result = 1 else : result = 0 computation . stack_push ( signed_to_unsigned ( result ) )
Signed Lesser Comparison
79
5
224,076
def build ( obj : Any , * applicators : Callable [ ... , Any ] ) -> Any : if isinstance ( obj , BaseChain ) : return pipe ( obj , copy ( ) , * applicators ) else : return pipe ( obj , * applicators )
Run the provided object through the series of applicator functions .
56
12
224,077
def name ( class_name : str , chain_class : Type [ BaseChain ] ) -> Type [ BaseChain ] : return chain_class . configure ( __name__ = class_name )
Assign the given name to the chain class .
41
10
224,078
def chain_id ( chain_id : int , chain_class : Type [ BaseChain ] ) -> Type [ BaseChain ] : return chain_class . configure ( chain_id = chain_id )
Set the chain_id for the chain class .
43
10
224,079
def fork_at ( vm_class : Type [ BaseVM ] , at_block : int , chain_class : Type [ BaseChain ] ) -> Type [ BaseChain ] : if chain_class . vm_configuration is not None : base_configuration = chain_class . vm_configuration else : base_configuration = tuple ( ) vm_configuration = base_configuration + ( ( at_block , vm_class ) , ) validate_vm_configuration ( vm_configuration ) return chain_class . configure ( vm_configuration = vm_configuration )
Adds the vm_class to the chain s vm_configuration .
125
14
224,080
def enable_pow_mining ( chain_class : Type [ BaseChain ] ) -> Type [ BaseChain ] : if not chain_class . vm_configuration : raise ValidationError ( "Chain class has no vm_configuration" ) vm_configuration = _mix_in_pow_mining ( chain_class . vm_configuration ) return chain_class . configure ( vm_configuration = vm_configuration )
Inject on demand generation of the proof of work mining seal on newly mined blocks into each of the chain s vms .
93
25
224,081
def disable_pow_check ( chain_class : Type [ BaseChain ] ) -> Type [ BaseChain ] : if not chain_class . vm_configuration : raise ValidationError ( "Chain class has no vm_configuration" ) if issubclass ( chain_class , NoChainSealValidationMixin ) : # Seal validation already disabled, hence nothing to change chain_class_without_seal_validation = chain_class else : chain_class_without_seal_validation = type ( chain_class . __name__ , ( chain_class , NoChainSealValidationMixin ) , { } , ) return chain_class_without_seal_validation . configure ( # type: ignore vm_configuration = _mix_in_disable_seal_validation ( chain_class_without_seal_validation . vm_configuration # type: ignore ) , )
Disable the proof of work validation check for each of the chain s vms . This allows for block mining without generation of the proof of work seal .
199
30
224,082
def genesis ( chain_class : BaseChain , db : BaseAtomicDB = None , params : Dict [ str , HeaderParams ] = None , state : GeneralState = None ) -> BaseChain : if state is None : genesis_state = { } # type: AccountState else : genesis_state = _fill_and_normalize_state ( state ) genesis_params_defaults = _get_default_genesis_params ( genesis_state ) if params is None : genesis_params = genesis_params_defaults else : genesis_params = merge ( genesis_params_defaults , params ) if db is None : base_db = AtomicDB ( ) # type: BaseAtomicDB else : base_db = db return chain_class . from_genesis ( base_db , genesis_params , genesis_state )
Initialize the given chain class with the given genesis header parameters and chain state .
180
16
224,083
def mine_block ( chain : MiningChain , * * kwargs : Any ) -> MiningChain : if not isinstance ( chain , MiningChain ) : raise ValidationError ( '`mine_block` may only be used on MiningChain instances' ) chain . mine_block ( * * kwargs ) return chain
Mine a new block on the chain . Header parameters for the new block can be overridden using keyword arguments .
68
22
224,084
def import_block ( block : BaseBlock , chain : BaseChain ) -> BaseChain : chain . import_block ( block ) return chain
Import the provided block into the chain .
29
8
224,085
def copy ( chain : MiningChain ) -> MiningChain : if not isinstance ( chain , MiningChain ) : raise ValidationError ( "`at_block_number` may only be used with 'MiningChain" ) base_db = chain . chaindb . db if not isinstance ( base_db , AtomicDB ) : raise ValidationError ( "Unsupported database type: {0}" . format ( type ( base_db ) ) ) if isinstance ( base_db . wrapped_db , MemoryDB ) : db = AtomicDB ( MemoryDB ( base_db . wrapped_db . kv_store . copy ( ) ) ) else : raise ValidationError ( "Unsupported wrapped database: {0}" . format ( type ( base_db . wrapped_db ) ) ) chain_copy = type ( chain ) ( db , chain . header ) return chain_copy
Make a copy of the chain at the given state . Actions performed on the resulting chain will not affect the original chain .
187
24
224,086
def chain_split ( * splits : Iterable [ Callable [ ... , Any ] ] ) -> Callable [ [ BaseChain ] , Iterable [ BaseChain ] ] : # noqa: E501 if not splits : raise ValidationError ( "Cannot use `chain_split` without providing at least one split" ) @ functools . wraps ( chain_split ) @ to_tuple def _chain_split ( chain : BaseChain ) -> Iterable [ BaseChain ] : for split_fns in splits : result = build ( chain , * split_fns , ) yield result return _chain_split
Construct and execute multiple concurrent forks of the chain .
132
10
224,087
def at_block_number ( block_number : BlockNumber , chain : MiningChain ) -> MiningChain : if not isinstance ( chain , MiningChain ) : raise ValidationError ( "`at_block_number` may only be used with 'MiningChain" ) at_block = chain . get_canonical_block_by_number ( block_number ) db = chain . chaindb . db chain_at_block = type ( chain ) ( db , chain . create_header_from_parent ( at_block . header ) ) return chain_at_block
Rewind the chain back to the given block number . Calls to things like get_canonical_head will still return the canonical head of the chain however you can use mine_block to mine fork chains .
123
42
224,088
def load_json_fixture ( fixture_path : str ) -> Dict [ str , Any ] : with open ( fixture_path ) as fixture_file : file_fixtures = json . load ( fixture_file ) return file_fixtures
Loads a fixture file caching the most recent files it loaded .
53
13
224,089
def load_fixture ( fixture_path : str , fixture_key : str , normalize_fn : Callable [ ... , Any ] = identity ) -> Dict [ str , Any ] : file_fixtures = load_json_fixture ( fixture_path ) fixture = normalize_fn ( file_fixtures [ fixture_key ] ) return fixture
Loads a specific fixture from a fixture file optionally passing it through a normalization function .
77
18
224,090
def construct_evm_runtime_identifier ( ) -> str : return "Py-EVM/{0}/{platform}/{imp.name}{v.major}.{v.minor}.{v.micro}" . format ( __version__ , platform = sys . platform , v = sys . version_info , # mypy Doesn't recognize the `sys` module as having an `implementation` attribute. imp = sys . implementation , )
Constructs the EVM runtime identifier string
98
8
224,091
def binary_gas_search ( state : BaseState , transaction : BaseTransaction , tolerance : int = 1 ) -> int : if not hasattr ( transaction , 'sender' ) : raise TypeError ( "Transaction is missing attribute sender." , "If sending an unsigned transaction, use SpoofTransaction and provide the" , "sender using the 'from' parameter" ) minimum_transaction = SpoofTransaction ( transaction , gas = transaction . intrinsic_gas , gas_price = 0 , ) if _get_computation_error ( state , minimum_transaction ) is None : return transaction . intrinsic_gas maximum_transaction = SpoofTransaction ( transaction , gas = state . gas_limit , gas_price = 0 , ) error = _get_computation_error ( state , maximum_transaction ) if error is not None : raise error minimum_viable = state . gas_limit maximum_out_of_gas = transaction . intrinsic_gas while minimum_viable - maximum_out_of_gas > tolerance : midpoint = ( minimum_viable + maximum_out_of_gas ) // 2 test_transaction = SpoofTransaction ( transaction , gas = midpoint ) if _get_computation_error ( state , test_transaction ) is None : minimum_viable = midpoint else : maximum_out_of_gas = midpoint return minimum_viable
Run the transaction with various gas limits progressively approaching the minimum needed to succeed without an OutOfGas exception .
300
21
224,092
def commit_to ( self , db : BaseDB ) -> None : self . logger . debug2 ( 'persist storage root to data store' ) if self . _trie_nodes_batch is None : raise ValidationError ( "It is invalid to commit an account's storage if it has no pending changes. " "Always check storage_lookup.has_changed_root before attempting to commit." ) self . _trie_nodes_batch . commit_to ( db , apply_deletes = False ) self . _clear_changed_root ( )
Trying to commit changes when nothing has been written will raise a ValidationError
122
16
224,093
def _validate_flushed ( self ) -> None : journal_diff = self . _journal_storage . diff ( ) if len ( journal_diff ) > 0 : raise ValidationError ( "StorageDB had a dirty journal when it needed to be clean: %r" % journal_diff )
Will raise an exception if there are some changes made since the last persist .
64
15
224,094
def write ( self , start_position : int , size : int , value : bytes ) -> None : if size : validate_uint256 ( start_position ) validate_uint256 ( size ) validate_is_bytes ( value ) validate_length ( value , length = size ) validate_lte ( start_position + size , maximum = len ( self ) ) for idx , v in enumerate ( value ) : self . _bytes [ start_position + idx ] = v
Write value into memory .
103
5
224,095
def read ( self , start_position : int , size : int ) -> memoryview : return memoryview ( self . _bytes ) [ start_position : start_position + size ]
Return a view into the memory
39
6
224,096
def read_bytes ( self , start_position : int , size : int ) -> bytes : return bytes ( self . _bytes [ start_position : start_position + size ] )
Read a value from memory and return a fresh bytes instance
39
11
224,097
def extend_memory ( self , start_position : int , size : int ) -> None : validate_uint256 ( start_position , title = "Memory start position" ) validate_uint256 ( size , title = "Memory size" ) before_size = ceil32 ( len ( self . _memory ) ) after_size = ceil32 ( start_position + size ) before_cost = memory_gas_cost ( before_size ) after_cost = memory_gas_cost ( after_size ) if self . logger . show_debug2 : self . logger . debug2 ( "MEMORY: size (%s -> %s) | cost (%s -> %s)" , before_size , after_size , before_cost , after_cost , ) if size : if before_cost < after_cost : gas_fee = after_cost - before_cost self . _gas_meter . consume_gas ( gas_fee , reason = " " . join ( ( "Expanding memory" , str ( before_size ) , "->" , str ( after_size ) , ) ) ) self . _memory . extend ( start_position , size )
Extend the size of the memory to be at minimum start_position + size bytes in length . Raise eth . exceptions . OutOfGas if there is not enough gas to pay for extending the memory .
250
41
224,098
def memory_read ( self , start_position : int , size : int ) -> memoryview : return self . _memory . read ( start_position , size )
Read and return a view of size bytes from memory starting at start_position .
35
16
224,099
def memory_read_bytes ( self , start_position : int , size : int ) -> bytes : return self . _memory . read_bytes ( start_position , size )
Read and return size bytes from memory starting at start_position .
38
13