idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
250,700
def create_search_schema ( self , schema , content ) : if not self . yz_wm_schema : raise NotImplementedError ( "Search 2.0 administration is not " "supported for this version" ) url = self . search_schema_path ( schema ) headers = { 'Content-Type' : 'application/xml' } # Run the request... status , header , body = self . _request ( 'PUT' , url , headers , content ) if status != 204 : raise RiakError ( 'Error creating Search 2.0 schema.' ) return True
Create a new Solr schema for Yokozuna .
126
11
250,701
def get_search_schema ( self , schema ) : if not self . yz_wm_schema : raise NotImplementedError ( "Search 2.0 administration is not " "supported for this version" ) url = self . search_schema_path ( schema ) # Run the request... status , _ , body = self . _request ( 'GET' , url ) if status == 200 : result = { } result [ 'name' ] = schema result [ 'content' ] = bytes_to_str ( body ) return result else : raise RiakError ( 'Error getting Search 2.0 schema.' )
Fetch a Solr schema from Yokozuna .
134
11
250,702
def search ( self , index , query , * * params ) : if index is None : index = 'search' options = { } if 'op' in params : op = params . pop ( 'op' ) options [ 'q.op' ] = op options . update ( params ) url = self . solr_select_path ( index , query , * * options ) status , headers , data = self . _request ( 'GET' , url ) self . check_http_code ( status , [ 200 ] ) if 'json' in headers [ 'content-type' ] : results = json . loads ( bytes_to_str ( data ) ) return self . _normalize_json_search_response ( results ) elif 'xml' in headers [ 'content-type' ] : return self . _normalize_xml_search_response ( data ) else : raise ValueError ( "Could not decode search response" )
Performs a search query .
199
6
250,703
def fulltext_add ( self , index , docs ) : xml = Document ( ) root = xml . createElement ( 'add' ) for doc in docs : doc_element = xml . createElement ( 'doc' ) for key in doc : value = doc [ key ] field = xml . createElement ( 'field' ) field . setAttribute ( "name" , key ) text = xml . createTextNode ( value ) field . appendChild ( text ) doc_element . appendChild ( field ) root . appendChild ( doc_element ) xml . appendChild ( root ) self . _request ( 'POST' , self . solr_update_path ( index ) , { 'Content-Type' : 'text/xml' } , xml . toxml ( ) . encode ( 'utf-8' ) )
Adds documents to the search index .
173
7
250,704
def fulltext_delete ( self , index , docs = None , queries = None ) : xml = Document ( ) root = xml . createElement ( 'delete' ) if docs : for doc in docs : doc_element = xml . createElement ( 'id' ) text = xml . createTextNode ( doc ) doc_element . appendChild ( text ) root . appendChild ( doc_element ) if queries : for query in queries : query_element = xml . createElement ( 'query' ) text = xml . createTextNode ( query ) query_element . appendChild ( text ) root . appendChild ( query_element ) xml . appendChild ( root ) self . _request ( 'POST' , self . solr_update_path ( index ) , { 'Content-Type' : 'text/xml' } , xml . toxml ( ) . encode ( 'utf-8' ) )
Removes documents from the full - text index .
191
10
250,705
def release ( self ) : if self . errored : self . pool . delete_resource ( self ) else : self . pool . release ( self )
Releases this resource back to the pool it came from .
32
12
250,706
def delete_resource ( self , resource ) : with self . lock : self . resources . remove ( resource ) self . destroy_resource ( resource . object ) del resource
Deletes the resource from the pool and destroys the associated resource . Not usually needed by users of the pool but called internally when BadResource is raised .
35
30
250,707
def encode_timeseries_put ( self , tsobj ) : if tsobj . columns : raise NotImplementedError ( 'columns are not used' ) if tsobj . rows and isinstance ( tsobj . rows , list ) : req_rows = [ ] for row in tsobj . rows : req_r = [ ] for cell in row : req_r . append ( self . encode_to_ts_cell ( cell ) ) req_rows . append ( tuple ( req_r ) ) req = tsputreq_a , tsobj . table . name , [ ] , req_rows mc = MSG_CODE_TS_TTB_MSG rc = MSG_CODE_TS_TTB_MSG return Msg ( mc , encode ( req ) , rc ) else : raise RiakError ( "TsObject requires a list of rows" )
Returns an Erlang - TTB encoded tuple with the appropriate data and metadata from a TsObject .
194
20
250,708
def decode_timeseries_row ( self , tsrow , tsct , convert_timestamp = False ) : row = [ ] for i , cell in enumerate ( tsrow ) : if cell is None : row . append ( None ) elif isinstance ( cell , list ) and len ( cell ) == 0 : row . append ( None ) else : if convert_timestamp and tsct [ i ] == timestamp_a : row . append ( datetime_from_unix_time_millis ( cell ) ) else : row . append ( cell ) return row
Decodes a TTB - encoded TsRow into a list
122
12
250,709
def to_op ( self ) : if not self . _adds and not self . _removes : return None changes = { } if self . _adds : changes [ 'adds' ] = list ( self . _adds ) if self . _removes : changes [ 'removes' ] = list ( self . _removes ) return changes
Extracts the modification operation from the set .
77
10
250,710
def discard ( self , element ) : _check_element ( element ) self . _require_context ( ) self . _removes . add ( element )
Removes an element from the set .
33
8
250,711
def getone ( self , key ) : v = self . getall ( key ) if not v : raise KeyError ( 'Key not found: %r' % key ) if len ( v ) > 1 : raise KeyError ( 'Multiple values match %r: %r' % ( key , v ) ) return v [ 0 ]
Get one value matching the key raising a KeyError if multiple values were found .
71
16
250,712
def dict_of_lists ( self ) : result = { } for key , value in self . _items : if key in result : result [ key ] . append ( value ) else : result [ key ] = [ value ] return result
Returns a dictionary where each key is associated with a list of values .
50
14
250,713
def enq ( self , task ) : if not self . _stop . is_set ( ) : self . _inq . put ( task ) else : raise RuntimeError ( "Attempted to enqueue an operation while " "multi pool was shutdown!" )
Enqueues a fetch task to the pool of workers . This will raise a RuntimeError if the pool is stopped or in the process of stopping .
55
30
250,714
def start ( self ) : # Check whether we are already started, skip if we are. if not self . _started . is_set ( ) : # If we are not started, try to capture the lock. if self . _lock . acquire ( False ) : # If we got the lock, go ahead and start the worker # threads, set the started flag, and release the lock. for i in range ( self . _size ) : name = "riak.client.multi-worker-{0}-{1}" . format ( self . _name , i ) worker = Thread ( target = self . _worker_method , name = name ) worker . daemon = False worker . start ( ) self . _workers . append ( worker ) self . _started . set ( ) self . _lock . release ( ) else : # We didn't get the lock, so someone else is already # starting the worker threads. Wait until they have # signaled that the threads are started. self . _started . wait ( )
Starts the worker threads if they are not already started . This method is thread - safe and will be called automatically when executing an operation .
215
28
250,715
def stop ( self ) : if not self . stopped ( ) : self . _stop . set ( ) for worker in self . _workers : worker . join ( )
Signals the worker threads to exit and waits on them .
35
12
250,716
def _check_key ( self , key ) : if not len ( key ) == 2 : raise TypeError ( 'invalid key: %r' % key ) elif key [ 1 ] not in TYPES : raise TypeError ( 'invalid datatype: %s' % key [ 1 ] )
Ensures well - formedness of a key .
67
11
250,717
def modified ( self ) : if self . _removes : return True for v in self . _value : if self . _value [ v ] . modified : return True for v in self . _updates : if self . _updates [ v ] . modified : return True return False
Whether the map has staged local modifications .
61
8
250,718
def _format_python2_or_3 ( self ) : pb_files = set ( ) with open ( self . source , 'r' , buffering = 1 ) as csvfile : reader = csv . reader ( csvfile ) for row in reader : _ , _ , proto = row pb_files . add ( 'riak/pb/{0}_pb2.py' . format ( proto ) ) for im in sorted ( pb_files ) : with open ( im , 'r' , buffering = 1 ) as pbfile : contents = 'from six import *\n' + pbfile . read ( ) contents = re . sub ( r'riak_pb2' , r'riak.pb.riak_pb2' , contents ) # Look for this pattern in the protoc-generated file: # # class RpbCounterGetResp(_message.Message): # __metaclass__ = _reflection.GeneratedProtocolMessageType # # and convert it to: # # @add_metaclass(_reflection.GeneratedProtocolMessageType) # class RpbCounterGetResp(_message.Message): contents = re . sub ( r'class\s+(\S+)\((\S+)\):\s*\n' '\s+__metaclass__\s+=\s+(\S+)\s*\n' , r'@add_metaclass(\3)\nclass \1(\2):\n' , contents ) with open ( im , 'w' , buffering = 1 ) as pbfile : pbfile . write ( contents )
Change the PB files to use full pathnames for Python 3 . x and modify the metaclasses to be version agnostic
358
25
250,719
def reload ( self , * * params ) : if not self . bucket : raise ValueError ( 'bucket property not assigned' ) if not self . key : raise ValueError ( 'key property not assigned' ) dtype , value , context = self . bucket . _client . _fetch_datatype ( self . bucket , self . key , * * params ) if not dtype == self . type_name : raise TypeError ( "Expected datatype {} but " "got datatype {}" . format ( self . __class__ , TYPES [ dtype ] ) ) self . clear ( ) self . _context = context self . _set_value ( value ) return self
Reloads the datatype from Riak .
150
11
250,720
def update ( self , * * params ) : if not self . modified : raise ValueError ( "No operation to perform" ) params . setdefault ( 'return_body' , True ) self . bucket . _client . update_datatype ( self , * * params ) self . clear ( ) return self
Sends locally staged mutations to Riak .
66
9
250,721
def encode_quorum ( self , rw ) : if rw in QUORUM_TO_PB : return QUORUM_TO_PB [ rw ] elif type ( rw ) is int and rw >= 0 : return rw else : return None
Converts a symbolic quorum value into its on - the - wire equivalent .
58
16
250,722
def decode_contents ( self , contents , obj ) : obj . siblings = [ self . decode_content ( c , RiakContent ( obj ) ) for c in contents ] # Invoke sibling-resolution logic if len ( obj . siblings ) > 1 and obj . resolver is not None : obj . resolver ( obj ) return obj
Decodes the list of siblings from the protobuf representation into the object .
72
16
250,723
def decode_content ( self , rpb_content , sibling ) : if rpb_content . HasField ( "deleted" ) and rpb_content . deleted : sibling . exists = False else : sibling . exists = True if rpb_content . HasField ( "content_type" ) : sibling . content_type = bytes_to_str ( rpb_content . content_type ) if rpb_content . HasField ( "charset" ) : sibling . charset = bytes_to_str ( rpb_content . charset ) if rpb_content . HasField ( "content_encoding" ) : sibling . content_encoding = bytes_to_str ( rpb_content . content_encoding ) if rpb_content . HasField ( "vtag" ) : sibling . etag = bytes_to_str ( rpb_content . vtag ) sibling . links = [ self . decode_link ( link ) for link in rpb_content . links ] if rpb_content . HasField ( "last_mod" ) : sibling . last_modified = float ( rpb_content . last_mod ) if rpb_content . HasField ( "last_mod_usecs" ) : sibling . last_modified += rpb_content . last_mod_usecs / 1000000.0 sibling . usermeta = dict ( [ ( bytes_to_str ( usermd . key ) , bytes_to_str ( usermd . value ) ) for usermd in rpb_content . usermeta ] ) sibling . indexes = set ( [ ( bytes_to_str ( index . key ) , decode_index_value ( index . key , index . value ) ) for index in rpb_content . indexes ] ) sibling . encoded_data = rpb_content . value return sibling
Decodes a single sibling from the protobuf representation into a RiakObject .
397
17
250,724
def encode_content ( self , robj , rpb_content ) : if robj . content_type : rpb_content . content_type = str_to_bytes ( robj . content_type ) if robj . charset : rpb_content . charset = str_to_bytes ( robj . charset ) if robj . content_encoding : rpb_content . content_encoding = str_to_bytes ( robj . content_encoding ) for uk in robj . usermeta : pair = rpb_content . usermeta . add ( ) pair . key = str_to_bytes ( uk ) pair . value = str_to_bytes ( robj . usermeta [ uk ] ) for link in robj . links : pb_link = rpb_content . links . add ( ) try : bucket , key , tag = link except ValueError : raise RiakError ( "Invalid link tuple %s" % link ) pb_link . bucket = str_to_bytes ( bucket ) pb_link . key = str_to_bytes ( key ) if tag : pb_link . tag = str_to_bytes ( tag ) else : pb_link . tag = str_to_bytes ( '' ) for field , value in robj . indexes : pair = rpb_content . indexes . add ( ) pair . key = str_to_bytes ( field ) pair . value = str_to_bytes ( str ( value ) ) # Python 2.x data is stored in a string if six . PY2 : rpb_content . value = str ( robj . encoded_data ) else : rpb_content . value = robj . encoded_data
Fills an RpbContent message with the appropriate data and metadata from a RiakObject .
376
19
250,725
def decode_link ( self , link ) : if link . HasField ( "bucket" ) : bucket = bytes_to_str ( link . bucket ) else : bucket = None if link . HasField ( "key" ) : key = bytes_to_str ( link . key ) else : key = None if link . HasField ( "tag" ) : tag = bytes_to_str ( link . tag ) else : tag = None return ( bucket , key , tag )
Decodes an RpbLink message into a tuple
103
10
250,726
def encode_bucket_props ( self , props , msg ) : for prop in NORMAL_PROPS : if prop in props and props [ prop ] is not None : if isinstance ( props [ prop ] , six . string_types ) : setattr ( msg . props , prop , str_to_bytes ( props [ prop ] ) ) else : setattr ( msg . props , prop , props [ prop ] ) for prop in COMMIT_HOOK_PROPS : if prop in props : setattr ( msg . props , 'has_' + prop , True ) self . encode_hooklist ( props [ prop ] , getattr ( msg . props , prop ) ) for prop in MODFUN_PROPS : if prop in props and props [ prop ] is not None : self . encode_modfun ( props [ prop ] , getattr ( msg . props , prop ) ) for prop in QUORUM_PROPS : if prop in props and props [ prop ] not in ( None , 'default' ) : value = self . encode_quorum ( props [ prop ] ) if value is not None : if isinstance ( value , six . string_types ) : setattr ( msg . props , prop , str_to_bytes ( value ) ) else : setattr ( msg . props , prop , value ) if 'repl' in props : msg . props . repl = REPL_TO_PB [ props [ 'repl' ] ] return msg
Encodes a dict of bucket properties into the protobuf message .
311
14
250,727
def decode_bucket_props ( self , msg ) : props = { } for prop in NORMAL_PROPS : if msg . HasField ( prop ) : props [ prop ] = getattr ( msg , prop ) if isinstance ( props [ prop ] , bytes ) : props [ prop ] = bytes_to_str ( props [ prop ] ) for prop in COMMIT_HOOK_PROPS : if getattr ( msg , 'has_' + prop ) : props [ prop ] = self . decode_hooklist ( getattr ( msg , prop ) ) for prop in MODFUN_PROPS : if msg . HasField ( prop ) : props [ prop ] = self . decode_modfun ( getattr ( msg , prop ) ) for prop in QUORUM_PROPS : if msg . HasField ( prop ) : props [ prop ] = self . decode_quorum ( getattr ( msg , prop ) ) if msg . HasField ( 'repl' ) : props [ 'repl' ] = REPL_TO_PY [ msg . repl ] return props
Decodes the protobuf bucket properties message into a dict .
230
13
250,728
def encode_modfun ( self , props , msg = None ) : if msg is None : msg = riak . pb . riak_pb2 . RpbModFun ( ) msg . module = str_to_bytes ( props [ 'mod' ] ) msg . function = str_to_bytes ( props [ 'fun' ] ) return msg
Encodes a dict with mod and fun keys into a protobuf modfun pair . Used in bucket properties .
76
23
250,729
def encode_hooklist ( self , hooklist , msg ) : for hook in hooklist : pbhook = msg . add ( ) self . encode_hook ( hook , pbhook )
Encodes a list of commit hooks into their protobuf equivalent . Used in bucket properties .
41
19
250,730
def decode_hook ( self , hook ) : if hook . HasField ( 'modfun' ) : return self . decode_modfun ( hook . modfun ) else : return { 'name' : bytes_to_str ( hook . name ) }
Decodes a protobuf commit hook message into a dict . Used in bucket properties .
54
18
250,731
def encode_hook ( self , hook , msg ) : if 'name' in hook : msg . name = str_to_bytes ( hook [ 'name' ] ) else : self . encode_modfun ( hook , msg . modfun ) return msg
Encodes a commit hook dict into the protobuf message . Used in bucket properties .
54
18
250,732
def encode_index_req ( self , bucket , index , startkey , endkey = None , return_terms = None , max_results = None , continuation = None , timeout = None , term_regex = None , streaming = False ) : req = riak . pb . riak_kv_pb2 . RpbIndexReq ( bucket = str_to_bytes ( bucket . name ) , index = str_to_bytes ( index ) ) self . _add_bucket_type ( req , bucket . bucket_type ) if endkey is not None : req . qtype = riak . pb . riak_kv_pb2 . RpbIndexReq . range req . range_min = str_to_bytes ( str ( startkey ) ) req . range_max = str_to_bytes ( str ( endkey ) ) else : req . qtype = riak . pb . riak_kv_pb2 . RpbIndexReq . eq req . key = str_to_bytes ( str ( startkey ) ) if return_terms is not None : req . return_terms = return_terms if max_results : req . max_results = max_results if continuation : req . continuation = str_to_bytes ( continuation ) if timeout : if timeout == 'infinity' : req . timeout = 0 else : req . timeout = timeout if term_regex : req . term_regex = str_to_bytes ( term_regex ) req . stream = streaming mc = riak . pb . messages . MSG_CODE_INDEX_REQ rc = riak . pb . messages . MSG_CODE_INDEX_RESP return Msg ( mc , req . SerializeToString ( ) , rc )
Encodes a secondary index request into the protobuf message .
388
13
250,733
def decode_search_index ( self , index ) : result = { } result [ 'name' ] = bytes_to_str ( index . name ) if index . HasField ( 'schema' ) : result [ 'schema' ] = bytes_to_str ( index . schema ) if index . HasField ( 'n_val' ) : result [ 'n_val' ] = index . n_val return result
Fills an RpbYokozunaIndex message with the appropriate data .
92
16
250,734
def encode_timeseries_put ( self , tsobj ) : req = riak . pb . riak_ts_pb2 . TsPutReq ( ) req . table = str_to_bytes ( tsobj . table . name ) if tsobj . columns : raise NotImplementedError ( "columns are not implemented yet" ) if tsobj . rows and isinstance ( tsobj . rows , list ) : for row in tsobj . rows : tsr = req . rows . add ( ) # NB: type TsRow if not isinstance ( row , list ) : raise ValueError ( "TsObject row must be a list of values" ) for cell in row : tsc = tsr . cells . add ( ) # NB: type TsCell self . encode_to_ts_cell ( cell , tsc ) else : raise RiakError ( "TsObject requires a list of rows" ) mc = riak . pb . messages . MSG_CODE_TS_PUT_REQ rc = riak . pb . messages . MSG_CODE_TS_PUT_RESP return Msg ( mc , req . SerializeToString ( ) , rc )
Fills an TsPutReq message with the appropriate data and metadata from a TsObject .
261
19
250,735
def decode_timeseries_row ( self , tsrow , tscols = None , convert_timestamp = False ) : row = [ ] for i , cell in enumerate ( tsrow . cells ) : col = None if tscols is not None : col = tscols [ i ] if cell . HasField ( 'varchar_value' ) : if col and not ( col . type == TsColumnType . Value ( 'VARCHAR' ) or col . type == TsColumnType . Value ( 'BLOB' ) ) : raise TypeError ( 'expected VARCHAR or BLOB column' ) else : row . append ( cell . varchar_value ) elif cell . HasField ( 'sint64_value' ) : if col and col . type != TsColumnType . Value ( 'SINT64' ) : raise TypeError ( 'expected SINT64 column' ) else : row . append ( cell . sint64_value ) elif cell . HasField ( 'double_value' ) : if col and col . type != TsColumnType . Value ( 'DOUBLE' ) : raise TypeError ( 'expected DOUBLE column' ) else : row . append ( cell . double_value ) elif cell . HasField ( 'timestamp_value' ) : if col and col . type != TsColumnType . Value ( 'TIMESTAMP' ) : raise TypeError ( 'expected TIMESTAMP column' ) else : dt = cell . timestamp_value if convert_timestamp : dt = datetime_from_unix_time_millis ( cell . timestamp_value ) row . append ( dt ) elif cell . HasField ( 'boolean_value' ) : if col and col . type != TsColumnType . Value ( 'BOOLEAN' ) : raise TypeError ( 'expected BOOLEAN column' ) else : row . append ( cell . boolean_value ) else : row . append ( None ) return row
Decodes a TsRow into a list
431
8
250,736
def decode_preflist ( self , item ) : result = { 'partition' : item . partition , 'node' : bytes_to_str ( item . node ) , 'primary' : item . primary } return result
Decodes a preflist response
49
7
250,737
def ping ( self ) : msg_code = riak . pb . messages . MSG_CODE_PING_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_ping ( ) resp_code , _ = self . _request ( msg , codec ) if resp_code == riak . pb . messages . MSG_CODE_PING_RESP : return True else : return False
Ping the remote server
96
4
250,738
def get_server_info ( self ) : # NB: can't do it this way due to recursion # codec = self._get_codec(ttb_supported=False) codec = PbufCodec ( ) msg = Msg ( riak . pb . messages . MSG_CODE_GET_SERVER_INFO_REQ , None , riak . pb . messages . MSG_CODE_GET_SERVER_INFO_RESP ) resp_code , resp = self . _request ( msg , codec ) return codec . decode_get_server_info ( resp )
Get information about the server
129
5
250,739
def get ( self , robj , r = None , pr = None , timeout = None , basic_quorum = None , notfound_ok = None , head_only = False ) : msg_code = riak . pb . messages . MSG_CODE_GET_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_get ( robj , r , pr , timeout , basic_quorum , notfound_ok , head_only ) resp_code , resp = self . _request ( msg , codec ) return codec . decode_get ( robj , resp )
Serialize get request and deserialize response
134
9
250,740
def ts_stream_keys ( self , table , timeout = None ) : msg_code = riak . pb . messages . MSG_CODE_TS_LIST_KEYS_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_timeseries_listkeysreq ( table , timeout ) self . _send_msg ( msg . msg_code , msg . data ) return PbufTsKeyStream ( self , codec , self . _ts_convert_timestamp )
Streams keys from a timeseries table returning an iterator that yields lists of keys .
112
17
250,741
def get_keys ( self , bucket , timeout = None ) : msg_code = riak . pb . messages . MSG_CODE_LIST_KEYS_REQ codec = self . _get_codec ( msg_code ) stream = self . stream_keys ( bucket , timeout = timeout ) return codec . decode_get_keys ( stream )
Lists all keys within a bucket .
77
8
250,742
def stream_keys ( self , bucket , timeout = None ) : msg_code = riak . pb . messages . MSG_CODE_LIST_KEYS_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_stream_keys ( bucket , timeout ) self . _send_msg ( msg . msg_code , msg . data ) return PbufKeyStream ( self , codec )
Streams keys from a bucket returning an iterator that yields lists of keys .
93
15
250,743
def get_buckets ( self , bucket_type = None , timeout = None ) : msg_code = riak . pb . messages . MSG_CODE_LIST_BUCKETS_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_get_buckets ( bucket_type , timeout , streaming = False ) resp_code , resp = self . _request ( msg , codec ) return resp . buckets
Serialize bucket listing request and deserialize response
98
10
250,744
def get_bucket_props ( self , bucket ) : msg_code = riak . pb . messages . MSG_CODE_GET_BUCKET_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_get_bucket_props ( bucket ) resp_code , resp = self . _request ( msg , codec ) return codec . decode_bucket_props ( resp . props )
Serialize bucket property request and deserialize response
99
10
250,745
def set_bucket_props ( self , bucket , props ) : if not self . pb_all_bucket_props ( ) : for key in props : if key not in ( 'n_val' , 'allow_mult' ) : raise NotImplementedError ( 'Server only supports n_val and ' 'allow_mult properties over PBC' ) msg_code = riak . pb . messages . MSG_CODE_SET_BUCKET_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_set_bucket_props ( bucket , props ) resp_code , resp = self . _request ( msg , codec ) return True
Serialize set bucket property request and deserialize response
156
11
250,746
def clear_bucket_props ( self , bucket ) : if not self . pb_clear_bucket_props ( ) : return False msg_code = riak . pb . messages . MSG_CODE_RESET_BUCKET_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_clear_bucket_props ( bucket ) self . _request ( msg , codec ) return True
Clear bucket properties resetting them to their defaults
100
9
250,747
def get_bucket_type_props ( self , bucket_type ) : self . _check_bucket_types ( bucket_type ) msg_code = riak . pb . messages . MSG_CODE_GET_BUCKET_TYPE_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_get_bucket_type_props ( bucket_type ) resp_code , resp = self . _request ( msg , codec ) return codec . decode_bucket_props ( resp . props )
Fetch bucket - type properties
123
6
250,748
def set_bucket_type_props ( self , bucket_type , props ) : self . _check_bucket_types ( bucket_type ) msg_code = riak . pb . messages . MSG_CODE_SET_BUCKET_TYPE_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_set_bucket_type_props ( bucket_type , props ) resp_code , resp = self . _request ( msg , codec ) return True
Set bucket - type properties
114
5
250,749
def print_report ( label , user , system , real ) : print ( "{:<12s} {:12f} {:12f} ( {:12f} )" . format ( label , user , system , real ) )
Prints the report of one step of a benchmark .
49
11
250,750
def next ( self ) : if self . count == 0 : raise StopIteration elif self . count > 1 : print_rehearsal_header ( ) else : if self . rehearse : gc . collect ( ) print ( "-" * 59 ) print ( ) print_header ( ) self . count -= 1 return self
Runs the next iteration of the benchmark .
71
9
250,751
def add_object ( self , obj ) : return self . add_bucket_key_data ( obj . _bucket . _name , obj . _key , None )
Adds a RiakObject to the inputs .
38
9
250,752
def add_bucket ( self , bucket , bucket_type = None ) : if not riak . disable_list_exceptions : raise riak . ListError ( ) self . _input_mode = 'bucket' if isinstance ( bucket , riak . RiakBucket ) : if bucket . bucket_type . is_default ( ) : self . _inputs = { 'bucket' : bucket . name } else : self . _inputs = { 'bucket' : [ bucket . bucket_type . name , bucket . name ] } elif bucket_type is not None and bucket_type != "default" : self . _inputs = { 'bucket' : [ bucket_type , bucket ] } else : self . _inputs = { 'bucket' : bucket } return self
Adds all keys in a bucket to the inputs .
175
10
250,753
def add_key_filters ( self , key_filters ) : if self . _input_mode == 'query' : raise ValueError ( 'Key filters are not supported in a query.' ) self . _key_filters . extend ( key_filters ) return self
Adds key filters to the inputs .
60
7
250,754
def add_key_filter ( self , * args ) : if self . _input_mode == 'query' : raise ValueError ( 'Key filters are not supported in a query.' ) self . _key_filters . append ( args ) return self
Add a single key filter to the inputs .
54
9
250,755
def reduce_sort ( self , js_cmp = None , options = None ) : if options is None : options = dict ( ) if js_cmp : options [ 'arg' ] = js_cmp return self . reduce ( "Riak.reduceSort" , options = options )
Adds the Javascript built - in Riak . reduceSort to the query as a reduce phase .
61
19
250,756
def reduce_slice ( self , start , end , options = None ) : if options is None : options = dict ( ) options [ 'arg' ] = [ start , end ] return self . reduce ( "Riak.reduceSlice" , options = options )
Adds the Javascript built - in Riak . reduceSlice to the query as a reduce phase .
57
20
250,757
def to_array ( self ) : stepdef = { 'keep' : self . _keep , 'language' : self . _language , 'arg' : self . _arg } if self . _language == 'javascript' : if isinstance ( self . _function , list ) : stepdef [ 'bucket' ] = self . _function [ 0 ] stepdef [ 'key' ] = self . _function [ 1 ] elif isinstance ( self . _function , string_types ) : if ( "{" in self . _function ) : stepdef [ 'source' ] = self . _function else : stepdef [ 'name' ] = self . _function elif ( self . _language == 'erlang' and isinstance ( self . _function , list ) ) : stepdef [ 'module' ] = self . _function [ 0 ] stepdef [ 'function' ] = self . _function [ 1 ] elif ( self . _language == 'erlang' and isinstance ( self . _function , string_types ) ) : stepdef [ 'source' ] = self . _function return { self . _type : stepdef }
Convert the RiakMapReducePhase to a format that can be output into JSON . Used internally .
248
22
250,758
def to_array ( self ) : stepdef = { 'bucket' : self . _bucket , 'tag' : self . _tag , 'keep' : self . _keep } return { 'link' : stepdef }
Convert the RiakLinkPhase to a format that can be output into JSON . Used internally .
50
20
250,759
def last_written_resolver ( riak_object ) : riak_object . siblings = [ max ( riak_object . siblings , key = lambda x : x . last_modified ) , ]
A conflict - resolution function that resolves by selecting the most recently - modified sibling by timestamp .
44
18
250,760
def verify_cb ( conn , cert , errnum , depth , ok ) : if not ok : raise SecurityError ( "Could not verify CA certificate {0}" . format ( cert . get_subject ( ) ) ) return ok
The default OpenSSL certificate verification callback .
48
8
250,761
def next_page ( self , timeout = None , stream = None ) : if not self . continuation : raise ValueError ( "Cannot get next index page, no continuation" ) if stream is not None : self . stream = stream args = { 'bucket' : self . bucket , 'index' : self . index , 'startkey' : self . startkey , 'endkey' : self . endkey , 'return_terms' : self . return_terms , 'max_results' : self . max_results , 'continuation' : self . continuation , 'timeout' : timeout , 'term_regex' : self . term_regex } if self . stream : return self . client . stream_index ( * * args ) else : return self . client . get_index ( * * args )
Fetches the next page using the same parameters as the original query .
175
15
250,762
def _validate_timeout ( timeout , infinity_ok = False ) : if timeout is None : return if timeout == 'infinity' : if infinity_ok : return else : raise ValueError ( 'timeout must be a positive integer ' '("infinity" is not valid)' ) if isinstance ( timeout , six . integer_types ) and timeout > 0 : return raise ValueError ( 'timeout must be a positive integer' )
Raises an exception if the given timeout is an invalid value .
91
13
250,763
def stream_buckets ( self , bucket_type = None , timeout = None ) : if not riak . disable_list_exceptions : raise ListError ( ) _validate_timeout ( timeout ) if bucket_type : bucketfn = self . _bucket_type_bucket_builder else : bucketfn = self . _default_type_bucket_builder def make_op ( transport ) : return transport . stream_buckets ( bucket_type = bucket_type , timeout = timeout ) for bucket_list in self . _stream_with_retry ( make_op ) : bucket_list = [ bucketfn ( bytes_to_str ( name ) , bucket_type ) for name in bucket_list ] if len ( bucket_list ) > 0 : yield bucket_list
Streams the list of buckets . This is a generator method that should be iterated over .
170
19
250,764
def stream_index ( self , bucket , index , startkey , endkey = None , return_terms = None , max_results = None , continuation = None , timeout = None , term_regex = None ) : # TODO FUTURE: implement "retry on connection closed" # as in stream_mapred _validate_timeout ( timeout , infinity_ok = True ) page = IndexPage ( self , bucket , index , startkey , endkey , return_terms , max_results , term_regex ) page . stream = True resource = self . _acquire ( ) transport = resource . object page . results = transport . stream_index ( bucket , index , startkey , endkey , return_terms = return_terms , max_results = max_results , continuation = continuation , timeout = timeout , term_regex = term_regex ) page . results . attach ( resource ) return page
Queries a secondary index streaming matching keys through an iterator .
196
12
250,765
def stream_keys ( self , bucket , timeout = None ) : if not riak . disable_list_exceptions : raise ListError ( ) _validate_timeout ( timeout ) def make_op ( transport ) : return transport . stream_keys ( bucket , timeout = timeout ) for keylist in self . _stream_with_retry ( make_op ) : if len ( keylist ) > 0 : if six . PY2 : yield keylist else : yield [ bytes_to_str ( item ) for item in keylist ]
Lists all keys in a bucket via a stream . This is a generator method which should be iterated over .
116
23
250,766
def ts_stream_keys ( self , table , timeout = None ) : if not riak . disable_list_exceptions : raise ListError ( ) t = table if isinstance ( t , six . string_types ) : t = Table ( self , table ) _validate_timeout ( timeout ) resource = self . _acquire ( ) transport = resource . object stream = transport . ts_stream_keys ( t , timeout ) stream . attach ( resource ) try : for keylist in stream : if len ( keylist ) > 0 : yield keylist finally : stream . close ( )
Lists all keys in a time series table via a stream . This is a generator method which should be iterated over .
126
25
250,767
def multiget ( self , pairs , * * params ) : if self . _multiget_pool : params [ 'pool' ] = self . _multiget_pool return riak . client . multi . multiget ( self , pairs , * * params )
Fetches many keys in parallel via threads .
59
10
250,768
def multiput ( self , objs , * * params ) : if self . _multiput_pool : params [ 'pool' ] = self . _multiput_pool return riak . client . multi . multiput ( self , objs , * * params )
Stores objects in parallel via threads .
59
8
250,769
def fetch_datatype ( self , bucket , key , r = None , pr = None , basic_quorum = None , notfound_ok = None , timeout = None , include_context = None ) : dtype , value , context = self . _fetch_datatype ( bucket , key , r = r , pr = pr , basic_quorum = basic_quorum , notfound_ok = notfound_ok , timeout = timeout , include_context = include_context ) return TYPES [ dtype ] ( bucket = bucket , key = key , value = value , context = context )
Fetches the value of a Riak Datatype .
132
13
250,770
def update_datatype ( self , datatype , w = None , dw = None , pw = None , return_body = None , timeout = None , include_context = None ) : _validate_timeout ( timeout ) with self . _transport ( ) as transport : return transport . update_datatype ( datatype , w = w , dw = dw , pw = pw , return_body = return_body , timeout = timeout , include_context = include_context )
Sends an update to a Riak Datatype to the server . This operation is not idempotent and so will not be retried automatically .
109
32
250,771
def _non_connect_send_recv ( self , msg_code , data = None ) : self . _non_connect_send_msg ( msg_code , data ) return self . _recv_msg ( )
Similar to self . _send_recv but doesn t try to initiate a connection thus preventing an infinite loop .
49
23
250,772
def _non_connect_send_msg ( self , msg_code , data ) : try : self . _socket . sendall ( self . _encode_msg ( msg_code , data ) ) except ( IOError , socket . error ) as e : if e . errno == errno . EPIPE : raise ConnectionClosed ( e ) else : raise
Similar to self . _send but doesn t try to initiate a connection thus preventing an infinite loop .
79
20
250,773
def _init_security ( self ) : if not self . _starttls ( ) : raise SecurityError ( "Could not start TLS connection" ) # _ssh_handshake() will throw an exception upon failure self . _ssl_handshake ( ) if not self . _auth ( ) : raise SecurityError ( "Could not authorize connection" )
Initialize a secure connection to the server .
74
9
250,774
def _starttls ( self ) : resp_code , _ = self . _non_connect_send_recv ( riak . pb . messages . MSG_CODE_START_TLS ) if resp_code == riak . pb . messages . MSG_CODE_START_TLS : return True else : return False
Exchange a STARTTLS message with Riak to initiate secure communications return True is Riak responds with a STARTTLS response False otherwise
76
28
250,775
def close ( self ) : if self . _socket : if USE_STDLIB_SSL : # NB: Python 2.7.8 and earlier does not have a compatible # shutdown() method due to the SSL lib try : self . _socket . shutdown ( socket . SHUT_RDWR ) except EnvironmentError : # NB: sometimes these exceptions are raised if the initial # connection didn't succeed correctly, or if shutdown() is # called after the connection dies logging . debug ( 'Exception occurred while shutting ' 'down socket.' , exc_info = True ) self . _socket . close ( ) del self . _socket
Closes the underlying socket of the PB connection .
130
10
250,776
def content_property ( name , doc = None ) : def _setter ( self , value ) : if len ( self . siblings ) == 0 : # In this case, assume that what the user wants is to # create a new sibling inside an empty object. self . siblings = [ RiakContent ( self ) ] if len ( self . siblings ) != 1 : raise ConflictError ( ) setattr ( self . siblings [ 0 ] , name , value ) def _getter ( self ) : if len ( self . siblings ) == 0 : return if len ( self . siblings ) != 1 : raise ConflictError ( ) return getattr ( self . siblings [ 0 ] , name ) return property ( _getter , _setter , doc = doc )
Delegates a property to the first sibling in a RiakObject raising an error when the object is in conflict .
158
23
250,777
def content_method ( name ) : def _delegate ( self , * args , * * kwargs ) : if len ( self . siblings ) != 1 : raise ConflictError ( ) return getattr ( self . siblings [ 0 ] , name ) . __call__ ( * args , * * kwargs ) _delegate . __doc__ = getattr ( RiakContent , name ) . __doc__ return _delegate
Delegates a method to the first sibling in a RiakObject raising an error when the object is in conflict .
92
23
250,778
def store ( self , w = None , dw = None , pw = None , return_body = True , if_none_match = False , timeout = None ) : if len ( self . siblings ) != 1 : raise ConflictError ( "Attempting to store an invalid object, " "resolve the siblings first" ) self . client . put ( self , w = w , dw = dw , pw = pw , return_body = return_body , if_none_match = if_none_match , timeout = timeout ) return self
Store the object in Riak . When this operation completes the object could contain new metadata and possibly new data if Riak contains a newer version of the object according to the object s vector clock .
117
39
250,779
def reload ( self , r = None , pr = None , timeout = None , basic_quorum = None , notfound_ok = None , head_only = False ) : self . client . get ( self , r = r , pr = pr , timeout = timeout , head_only = head_only ) return self
Reload the object from Riak . When this operation completes the object could contain new metadata and a new value if the object was updated in Riak since it was last retrieved .
68
36
250,780
def delete ( self , r = None , w = None , dw = None , pr = None , pw = None , timeout = None ) : self . client . delete ( self , r = r , w = w , dw = dw , pr = pr , pw = pw , timeout = timeout ) self . clear ( ) return self
Delete this object from Riak .
72
7
250,781
def get_encoder ( self , content_type ) : if content_type in self . _encoders : return self . _encoders [ content_type ] else : return self . _client . get_encoder ( content_type )
Get the encoding function for the provided content type for this bucket .
54
13
250,782
def get_decoder ( self , content_type ) : if content_type in self . _decoders : return self . _decoders [ content_type ] else : return self . _client . get_decoder ( content_type )
Get the decoding function for the provided content type for this bucket .
54
13
250,783
def multiget ( self , keys , r = None , pr = None , timeout = None , basic_quorum = None , notfound_ok = None , head_only = False ) : bkeys = [ ( self . bucket_type . name , self . name , key ) for key in keys ] return self . _client . multiget ( bkeys , r = r , pr = pr , timeout = timeout , basic_quorum = basic_quorum , notfound_ok = notfound_ok , head_only = head_only )
Retrieves a list of keys belonging to this bucket in parallel .
119
14
250,784
def stream_buckets ( self , timeout = None ) : return self . _client . stream_buckets ( bucket_type = self , timeout = timeout )
Streams the list of buckets under this bucket - type . This is a generator method that should be iterated over .
34
24
250,785
def incr ( self , d ) : with self . lock : self . p = self . value ( ) + d
Increases the value by the argument .
25
7
250,786
def make_random_client_id ( self ) : if PY2 : return ( 'py_%s' % base64 . b64encode ( str ( random . randint ( 1 , 0x40000000 ) ) ) ) else : return ( 'py_%s' % base64 . b64encode ( bytes ( str ( random . randint ( 1 , 0x40000000 ) ) , 'ascii' ) ) )
Returns a random client identifier
96
5
250,787
def get ( self , robj , r = None , pr = None , timeout = None , basic_quorum = None , notfound_ok = None , head_only = False ) : raise NotImplementedError
Fetches an object .
47
6
250,788
def put ( self , robj , w = None , dw = None , pw = None , return_body = None , if_none_match = None , timeout = None ) : raise NotImplementedError
Stores an object .
46
5
250,789
def delete ( self , robj , rw = None , r = None , w = None , dw = None , pr = None , pw = None , timeout = None ) : raise NotImplementedError
Deletes an object .
45
5
250,790
def update_counter ( self , bucket , key , value , w = None , dw = None , pw = None , returnvalue = False ) : raise NotImplementedError
Updates a counter by the given value .
38
9
250,791
def fetch_datatype ( self , bucket , key , r = None , pr = None , basic_quorum = None , notfound_ok = None , timeout = None , include_context = None ) : raise NotImplementedError
Fetches a Riak Datatype .
52
10
250,792
def update_datatype ( self , datatype , w = None , dw = None , pw = None , return_body = None , timeout = None , include_context = None ) : raise NotImplementedError
Updates a Riak Datatype by sending local operations to the server .
49
16
250,793
def _search_mapred_emu ( self , index , query ) : phases = [ ] if not self . phaseless_mapred ( ) : phases . append ( { 'language' : 'erlang' , 'module' : 'riak_kv_mapreduce' , 'function' : 'reduce_identity' , 'keep' : True } ) mr_result = self . mapred ( { 'module' : 'riak_search' , 'function' : 'mapred_search' , 'arg' : [ index , query ] } , phases ) result = { 'num_found' : len ( mr_result ) , 'max_score' : 0.0 , 'docs' : [ ] } for bucket , key , data in mr_result : if u'score' in data and data [ u'score' ] [ 0 ] > result [ 'max_score' ] : result [ 'max_score' ] = data [ u'score' ] [ 0 ] result [ 'docs' ] . append ( { u'id' : key } ) return result
Emulates a search request via MapReduce . Used in the case where the transport supports MapReduce but has no native search capability .
242
28
250,794
def _get_index_mapred_emu ( self , bucket , index , startkey , endkey = None ) : phases = [ ] if not self . phaseless_mapred ( ) : phases . append ( { 'language' : 'erlang' , 'module' : 'riak_kv_mapreduce' , 'function' : 'reduce_identity' , 'keep' : True } ) if endkey : result = self . mapred ( { 'bucket' : bucket , 'index' : index , 'start' : startkey , 'end' : endkey } , phases ) else : result = self . mapred ( { 'bucket' : bucket , 'index' : index , 'key' : startkey } , phases ) return [ key for resultbucket , key in result ]
Emulates a secondary index request via MapReduce . Used in the case where the transport supports MapReduce but has no native secondary index query capability .
180
31
250,795
def _parse_body ( self , robj , response , expected_statuses ) : # If no response given, then return. if response is None : return None status , headers , data = response # Check if the server is down(status==0) if not status : m = 'Could not contact Riak Server: http://{0}:{1}!' . format ( self . _node . host , self . _node . http_port ) raise RiakError ( m ) # Make sure expected code came back self . check_http_code ( status , expected_statuses ) if 'x-riak-vclock' in headers : robj . vclock = VClock ( headers [ 'x-riak-vclock' ] , 'base64' ) # If 404(Not Found), then clear the object. if status == 404 : robj . siblings = [ ] return None # If 201 Created, we need to extract the location and set the # key on the object. elif status == 201 : robj . key = headers [ 'location' ] . strip ( ) . split ( '/' ) [ - 1 ] # If 300(Siblings), apply the siblings to the object elif status == 300 : ctype , params = parse_header ( headers [ 'content-type' ] ) if ctype == 'multipart/mixed' : if six . PY3 : data = bytes_to_str ( data ) boundary = re . compile ( '\r?\n--%s(?:--)?\r?\n' % re . escape ( params [ 'boundary' ] ) ) parts = [ message_from_string ( p ) for p in re . split ( boundary , data ) [ 1 : - 1 ] ] robj . siblings = [ self . _parse_sibling ( RiakContent ( robj ) , part . items ( ) , part . get_payload ( ) ) for part in parts ] # Invoke sibling-resolution logic if robj . resolver is not None : robj . resolver ( robj ) return robj else : raise Exception ( 'unexpected sibling response format: {0}' . format ( ctype ) ) robj . siblings = [ self . _parse_sibling ( RiakContent ( robj ) , headers . items ( ) , data ) ] return robj
Parse the body of an object response and populate the object .
505
13
250,796
def _parse_sibling ( self , sibling , headers , data ) : sibling . exists = True # Parse the headers... for header , value in headers : header = header . lower ( ) if header == 'content-type' : sibling . content_type , sibling . charset = self . _parse_content_type ( value ) elif header == 'etag' : sibling . etag = value elif header == 'link' : sibling . links = self . _parse_links ( value ) elif header == 'last-modified' : sibling . last_modified = mktime_tz ( parsedate_tz ( value ) ) elif header . startswith ( 'x-riak-meta-' ) : metakey = header . replace ( 'x-riak-meta-' , '' ) sibling . usermeta [ metakey ] = value elif header . startswith ( 'x-riak-index-' ) : field = header . replace ( 'x-riak-index-' , '' ) reader = csv . reader ( [ value ] , skipinitialspace = True ) for line in reader : for token in line : token = decode_index_value ( field , token ) sibling . add_index ( field , token ) elif header == 'x-riak-deleted' : sibling . exists = False sibling . encoded_data = data return sibling
Parses a single sibling out of a response .
297
11
250,797
def _to_link_header ( self , link ) : try : bucket , key , tag = link except ValueError : raise RiakError ( "Invalid link tuple %s" % link ) tag = tag if tag is not None else bucket url = self . object_path ( bucket , key ) header = '<%s>; riaktag="%s"' % ( url , tag ) return header
Convert the link tuple to a link header string . Used internally .
86
14
250,798
def _normalize_json_search_response ( self , json ) : result = { } if 'facet_counts' in json : result [ 'facet_counts' ] = json [ u'facet_counts' ] if 'grouped' in json : result [ 'grouped' ] = json [ u'grouped' ] if 'stats' in json : result [ 'stats' ] = json [ u'stats' ] if u'response' in json : result [ 'num_found' ] = json [ u'response' ] [ u'numFound' ] result [ 'max_score' ] = float ( json [ u'response' ] [ u'maxScore' ] ) docs = [ ] for doc in json [ u'response' ] [ u'docs' ] : resdoc = { } if u'_yz_rk' in doc : # Is this a Riak 2.0 result? resdoc = doc else : # Riak Search 1.0 Legacy assumptions about format resdoc [ u'id' ] = doc [ u'id' ] if u'fields' in doc : for k , v in six . iteritems ( doc [ u'fields' ] ) : resdoc [ k ] = v docs . append ( resdoc ) result [ 'docs' ] = docs return result
Normalizes a JSON search response so that PB and HTTP have the same return value
294
16
250,799
def _normalize_xml_search_response ( self , xml ) : target = XMLSearchResult ( ) parser = ElementTree . XMLParser ( target = target ) parser . feed ( xml ) return parser . close ( )
Normalizes an XML search response so that PB and HTTP have the same return value
47
16