idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
17,800
def intraclass_corr ( data = None , groups = None , raters = None , scores = None , ci = .95 ) : from pingouin import anova if any ( v is None for v in [ data , groups , raters , scores ] ) : raise ValueError ( 'Data, groups, raters and scores must be specified' ) assert isinstance ( data , pd . DataFrame ) , 'Data must be a pandas dataframe.' assert data [ scores ] . dtype . kind in 'fi' , 'Scores must be numeric.' if data . groupby ( raters ) [ scores ] . count ( ) . nunique ( ) > 1 : raise ValueError ( 'Data must be balanced.' ) k = data [ raters ] . nunique ( ) aov = anova ( dv = scores , data = data , between = groups , detailed = True ) icc = ( aov . loc [ 0 , 'MS' ] - aov . loc [ 1 , 'MS' ] ) / ( aov . loc [ 0 , 'MS' ] + ( k - 1 ) * aov . loc [ 1 , 'MS' ] ) alpha = 1 - ci df_num , df_den = aov . loc [ 0 , 'DF' ] , aov . loc [ 1 , 'DF' ] f_lower = aov . loc [ 0 , 'F' ] / f . isf ( alpha / 2 , df_num , df_den ) f_upper = aov . loc [ 0 , 'F' ] * f . isf ( alpha / 2 , df_den , df_num ) lower = ( f_lower - 1 ) / ( f_lower + k - 1 ) upper = ( f_upper - 1 ) / ( f_upper + k - 1 ) return round ( icc , 6 ) , np . round ( [ lower , upper ] , 3 )
Intra - class correlation coefficient .
17,801
def _func ( a , p , r , v ) : f = a [ 0 ] * math . log ( r - 1. ) + a [ 1 ] * math . log ( r - 1. ) ** 2 + a [ 2 ] * math . log ( r - 1. ) ** 3 + a [ 3 ] * math . log ( r - 1. ) ** 4 if r == 3 : f += - 0.002 / ( 1. + 12. * _phi ( p ) ** 2 ) if v <= 4.364 : f += 1. / 517. - 1. / ( 312. * ( v , 1e38 ) [ np . isinf ( v ) ] ) else : f += 1. / ( 191. * ( v , 1e38 ) [ np . isinf ( v ) ] ) return - f
calculates f - hat for the coefficients in a probability p sample mean difference r and degrees of freedom v .
17,802
def _select_ps ( p ) : if p >= .99 : return .990 , .995 , .999 elif p >= .975 : return .975 , .990 , .995 elif p >= .95 : return .950 , .975 , .990 elif p >= .9125 : return .900 , .950 , .975 elif p >= .875 : return .850 , .900 , .950 elif p >= .825 : return .800 , .850 , .900 elif p >= .7625 : return .750 , .800 , .850 elif p >= .675 : return .675 , .750 , .800 elif p >= .500 : return .500 , .675 , .750 else : return .100 , .500 , .675
returns the points to use for interpolating p
17,803
def _select_vs ( v , p ) : if v >= 120. : return 60 , 120 , inf elif v >= 60. : return 40 , 60 , 120 elif v >= 40. : return 30 , 40 , 60 elif v >= 30. : return 24 , 30 , 40 elif v >= 24. : return 20 , 24 , 30 elif v >= 19.5 : return 19 , 20 , 24 if p >= .9 : if v < 2.5 : return 1 , 2 , 3 else : if v < 3.5 : return 2 , 3 , 4 vi = int ( round ( v ) ) return vi - 1 , vi , vi + 1
returns the points to use for interpolating v
17,804
def _interpolate_v ( p , r , v ) : v0 , v1 , v2 = _select_vs ( v , p ) y0_sq = ( _func ( A [ ( p , v0 ) ] , p , r , v0 ) + 1. ) ** 2. y1_sq = ( _func ( A [ ( p , v1 ) ] , p , r , v1 ) + 1. ) ** 2. y2_sq = ( _func ( A [ ( p , v2 ) ] , p , r , v2 ) + 1. ) ** 2. if v2 > 1e38 : v2 = 1e38 v_ , v0_ , v1_ , v2_ = 1. / v , 1. / v0 , 1. / v1 , 1. / v2 d2 = 2. * ( ( y2_sq - y1_sq ) / ( v2_ - v1_ ) - ( y0_sq - y1_sq ) / ( v0_ - v1_ ) ) / ( v2_ - v0_ ) if ( v2_ + v0_ ) >= ( v1_ + v1_ ) : d1 = ( y2_sq - y1_sq ) / ( v2_ - v1_ ) - 0.5 * d2 * ( v2_ - v1_ ) else : d1 = ( y1_sq - y0_sq ) / ( v1_ - v0_ ) + 0.5 * d2 * ( v1_ - v0_ ) d0 = y1_sq y = math . sqrt ( ( d2 / 2. ) * ( v_ - v1_ ) ** 2. + d1 * ( v_ - v1_ ) + d0 ) return y
interpolates v based on the values in the A table for the scalar value of r and th
17,805
def _qsturng ( p , r , v ) : global A , p_keys , v_keys if p < .1 or p > .999 : raise ValueError ( 'p must be between .1 and .999' ) if p < .9 : if v < 2 : raise ValueError ( 'v must be > 2 when p < .9' ) else : if v < 1 : raise ValueError ( 'v must be > 1 when p >= .9' ) p = float ( p ) if isinstance ( v , np . ndarray ) : v = v . item ( ) if ( p , v ) in A : y = _func ( A [ ( p , v ) ] , p , r , v ) + 1. elif p not in p_keys and v not in v_keys + ( [ ] , [ 1 ] ) [ p >= .90 ] : v0 , v1 , v2 = _select_vs ( v , p ) p0 , p1 , p2 = _select_ps ( p ) r0_sq = _interpolate_p ( p , r , v0 ) ** 2 r1_sq = _interpolate_p ( p , r , v1 ) ** 2 r2_sq = _interpolate_p ( p , r , v2 ) ** 2 v_ , v0_ , v1_ , v2_ = 1. / v , 1. / v0 , 1. / v1 , 1. / v2 d2 = 2. * ( ( r2_sq - r1_sq ) / ( v2_ - v1_ ) - ( r0_sq - r1_sq ) / ( v0_ - v1_ ) ) / ( v2_ - v0_ ) if ( v2_ + v0_ ) >= ( v1_ + v1_ ) : d1 = ( r2_sq - r1_sq ) / ( v2_ - v1_ ) - 0.5 * d2 * ( v2_ - v1_ ) else : d1 = ( r1_sq - r0_sq ) / ( v1_ - v0_ ) + 0.5 * d2 * ( v1_ - v0_ ) d0 = r1_sq y = math . sqrt ( ( d2 / 2. ) * ( v_ - v1_ ) ** 2. + d1 * ( v_ - v1_ ) + d0 ) elif v not in v_keys + ( [ ] , [ 1 ] ) [ p >= .90 ] : y = _interpolate_v ( p , r , v ) elif p not in p_keys : y = _interpolate_p ( p , r , v ) return math . sqrt ( 2 ) * - y * scipy . stats . t . isf ( ( 1. + p ) / 2. , max ( v , 1e38 ) )
scalar version of qsturng
17,806
def qsturng ( p , r , v ) : if all ( map ( _isfloat , [ p , r , v ] ) ) : return _qsturng ( p , r , v ) return _vqsturng ( p , r , v )
Approximates the quantile p for a studentized range distribution having v degrees of freedom and r samples for probability p .
17,807
def _psturng ( q , r , v ) : if q < 0. : raise ValueError ( 'q should be >= 0' ) opt_func = lambda p , r , v : abs ( _qsturng ( p , r , v ) - q ) if v == 1 : if q < _qsturng ( .9 , r , 1 ) : return .1 elif q > _qsturng ( .999 , r , 1 ) : return .001 return 1. - fminbound ( opt_func , .9 , .999 , args = ( r , v ) ) else : if q < _qsturng ( .1 , r , v ) : return .9 elif q > _qsturng ( .999 , r , v ) : return .001 return 1. - fminbound ( opt_func , .1 , .999 , args = ( r , v ) )
scalar version of psturng
17,808
def psturng ( q , r , v ) : if all ( map ( _isfloat , [ q , r , v ] ) ) : return _psturng ( q , r , v ) return _vpsturng ( q , r , v )
Evaluates the probability from 0 to q for a studentized range having v degrees of freedom and r samples .
17,809
def power_anova ( eta = None , k = None , n = None , power = None , alpha = 0.05 ) : n_none = sum ( [ v is None for v in [ eta , k , n , power , alpha ] ] ) if n_none != 1 : err = 'Exactly one of eta, k, n, power, and alpha must be None.' raise ValueError ( err ) if eta is not None : eta = abs ( eta ) f_sq = eta / ( 1 - eta ) if alpha is not None : assert 0 < alpha <= 1 if power is not None : assert 0 < power <= 1 def func ( f_sq , k , n , power , alpha ) : nc = ( n * k ) * f_sq dof1 = k - 1 dof2 = ( n * k ) - k fcrit = stats . f . ppf ( 1 - alpha , dof1 , dof2 ) return stats . ncf . sf ( fcrit , dof1 , dof2 , nc ) if power is None : return func ( f_sq , k , n , power , alpha ) elif k is None : def _eval_k ( k , eta , n , power , alpha ) : return func ( f_sq , k , n , power , alpha ) - power try : return brenth ( _eval_k , 2 , 100 , args = ( f_sq , n , power , alpha ) ) except ValueError : return np . nan elif n is None : def _eval_n ( n , f_sq , k , power , alpha ) : return func ( f_sq , k , n , power , alpha ) - power try : return brenth ( _eval_n , 2 , 1e+07 , args = ( f_sq , k , power , alpha ) ) except ValueError : return np . nan elif eta is None : def _eval_eta ( f_sq , k , n , power , alpha ) : return func ( f_sq , k , n , power , alpha ) - power try : f_sq = brenth ( _eval_eta , 1e-10 , 1 - 1e-10 , args = ( k , n , power , alpha ) ) return f_sq / ( f_sq + 1 ) except ValueError : return np . nan else : def _eval_alpha ( alpha , f_sq , k , n , power ) : return func ( f_sq , k , n , power , alpha ) - power try : return brenth ( _eval_alpha , 1e-10 , 1 - 1e-10 , args = ( f_sq , k , n , power ) ) except ValueError : return np . nan
Evaluate power sample size effect size or significance level of a one - way balanced ANOVA .
17,810
def consume ( self , timeout = None , loop = None ) : if self . _consumer_fn is None : raise ValueError ( 'Consumer function is not defined yet' ) logger . info ( 'Start consuming the stream' ) @ asyncio . coroutine def worker ( conn_url ) : extra_headers = { 'Connection' : 'upgrade' , 'Upgrade' : 'websocket' , 'Sec-Websocket-Version' : 13 , } ws = yield from websockets . connect ( conn_url , extra_headers = extra_headers ) if ws is None : raise RuntimeError ( "Couldn't connect to the '%s'" % conn_url ) try : while True : message = yield from ws . recv ( ) yield from self . _consumer_fn ( message ) finally : yield from ws . close ( ) if loop is None : loop = asyncio . new_event_loop ( ) asyncio . set_event_loop ( loop ) try : task = worker ( conn_url = self . _conn_url ) if timeout : logger . info ( 'Running task with timeout %s sec' , timeout ) loop . run_until_complete ( asyncio . wait_for ( task , timeout = timeout ) ) else : loop . run_until_complete ( task ) except asyncio . TimeoutError : logger . info ( 'Timeout is reached. Closing the loop' ) loop . close ( ) except KeyboardInterrupt : logger . info ( 'Closing the loop' ) loop . close ( )
Start consuming the stream
17,811
def add_rule ( self , value , tag ) : resp = requests . post ( url = self . REQUEST_URL . format ( ** self . _params ) , json = { 'rule' : { 'value' : value , 'tag' : tag } } ) return resp . json ( )
Add a new rule
17,812
def remove_rule ( self , tag ) : resp = requests . delete ( url = self . REQUEST_URL . format ( ** self . _params ) , json = { 'tag' : tag } ) return resp . json ( )
Remove a rule by tag
17,813
def stringify_values ( data ) : if not isinstance ( data , dict ) : raise ValueError ( 'Data must be dict. %r is passed' % data ) values_dict = { } for key , value in data . items ( ) : items = [ ] if isinstance ( value , six . string_types ) : items . append ( value ) elif isinstance ( value , Iterable ) : for v in value : if isinstance ( v , int ) : v = str ( v ) try : item = six . u ( v ) except TypeError : item = v items . append ( item ) value = ',' . join ( items ) values_dict [ key ] = value return values_dict
Coerce iterable values to val1 val2 valN
17,814
def parse_url_query_params ( url , fragment = True ) : parsed_url = urlparse ( url ) if fragment : url_query = parse_qsl ( parsed_url . fragment ) else : url_query = parse_qsl ( parsed_url . query ) url_query = dict ( url_query ) return url_query
Parse url query params
17,815
def parse_masked_phone_number ( html , parser = None ) : if parser is None : parser = bs4 . BeautifulSoup ( html , 'html.parser' ) fields = parser . find_all ( 'span' , { 'class' : 'field_prefix' } ) if not fields : raise VkParseError ( 'No <span class="field_prefix">...</span> in the \n%s' % html ) result = [ ] for f in fields : value = f . get_text ( ) . replace ( six . u ( '\xa0' ) , '' ) result . append ( value ) return tuple ( result )
Get masked phone number from security check html
17,816
def check_html_warnings ( html , parser = None ) : if parser is None : parser = bs4 . BeautifulSoup ( html , 'html.parser' ) warnings = parser . find_all ( 'div' , { 'class' : 'service_msg_warning' } ) if warnings : raise VkPageWarningsError ( '; ' . join ( [ w . get_text ( ) for w in warnings ] ) ) return True
Check html warnings
17,817
def http_session ( self ) : if self . _http_session is None : session = VerboseHTTPSession ( ) session . headers . update ( self . DEFAULT_HTTP_HEADERS ) self . _http_session = session return self . _http_session
HTTP Session property
17,818
def do_login ( self , http_session ) : response = http_session . get ( self . LOGIN_URL ) action_url = parse_form_action_url ( response . text ) if not action_url : logger . debug ( response . text ) raise VkParseError ( "Can't parse form action url" ) login_form_data = { 'email' : self . _login , 'pass' : self . _password } login_response = http_session . post ( action_url , login_form_data ) logger . debug ( 'Cookies: %s' , http_session . cookies ) response_url_query = parse_url_query_params ( login_response . url , fragment = False ) logger . debug ( 'response_url_query: %s' , response_url_query ) act = response_url_query . get ( 'act' ) if 'sid' in response_url_query : self . require_auth_captcha ( response = login_response , query_params = response_url_query , login_form_data = login_form_data , http_session = http_session ) elif act == 'authcheck' : self . require_2fa ( html = login_response . text , http_session = http_session ) elif act == 'security_check' : self . require_phone_number ( html = login_response . text , session = http_session ) session_cookies = ( 'remixsid' in http_session . cookies , 'remixsid6' in http_session . cookies ) if any ( session_cookies ) : logger . info ( 'VK session is established' ) return True else : message = 'Authorization error: incorrect password or ' 'authentication code' logger . error ( message ) raise VkAuthError ( message )
Do vk login
17,819
def require_auth_captcha ( self , response , query_params , login_form_data , http_session ) : logger . info ( 'Captcha is needed. Query params: %s' , query_params ) form_text = response . text action_url = parse_form_action_url ( form_text ) logger . debug ( 'form action url: %s' , action_url ) if not action_url : raise VkAuthError ( 'Cannot find form action url' ) captcha_sid , captcha_url = parse_captcha_html ( html = response . text , response_url = response . url ) logger . info ( 'Captcha url %s' , captcha_url ) login_form_data [ 'captcha_sid' ] = captcha_sid login_form_data [ 'captcha_key' ] = self . get_captcha_key ( captcha_url ) response = http_session . post ( action_url , login_form_data ) return response
Resolve auth captcha case
17,820
def get_captcha_key ( self , captcha_image_url ) : if self . interactive : print ( 'Open CAPTCHA image url in your browser and enter it below: ' , captcha_image_url ) captcha_key = raw_input ( 'Enter CAPTCHA key: ' ) return captcha_key else : raise VkAuthError ( 'Captcha is required. Use interactive mode to enter it ' 'manually' )
Read CAPTCHA key from user input
17,821
def make_request ( self , request , captcha_response = None ) : logger . debug ( 'Prepare API Method request %r' , request ) response = self . _send_api_request ( request = request , captcha_response = captcha_response ) response . raise_for_status ( ) response_or_error = json . loads ( response . text ) logger . debug ( 'response: %s' , response_or_error ) if 'error' in response_or_error : error_data = response_or_error [ 'error' ] vk_error = VkAPIError ( error_data ) if vk_error . is_captcha_needed ( ) : captcha_key = self . get_captcha_key ( vk_error . captcha_img_url ) if not captcha_key : raise vk_error captcha_response = { 'sid' : vk_error . captcha_sid , 'key' : captcha_key , } return self . make_request ( request , captcha_response = captcha_response ) elif vk_error . is_access_token_incorrect ( ) : logger . info ( 'Authorization failed. Access token will be dropped' ) self . _access_token = None return self . make_request ( request ) else : raise vk_error elif 'execute_errors' in response_or_error : raise VkAPIError ( response_or_error [ 'execute_errors' ] [ 0 ] ) elif 'response' in response_or_error : return response_or_error [ 'response' ]
Make api request helper function
17,822
def _send_api_request ( self , request , captcha_response = None ) : url = self . API_URL + request . method_name method_kwargs = { 'v' : self . api_version } for values in ( request . method_args , ) : method_kwargs . update ( stringify_values ( values ) ) if self . is_token_required ( ) or self . _service_token : method_kwargs [ 'access_token' ] = self . access_token if captcha_response : method_kwargs [ 'captcha_sid' ] = captcha_response [ 'sid' ] method_kwargs [ 'captcha_key' ] = captcha_response [ 'key' ] http_params = dict ( url = url , data = method_kwargs , ** request . http_params ) logger . debug ( 'send_api_request:http_params: %s' , http_params ) response = self . http_session . post ( ** http_params ) return response
Prepare and send HTTP API request
17,823
def create_api ( app_id = None , login = None , password = None , phone_number = None , scope = 'offline' , api_version = '5.92' , http_params = None , interactive = False , service_token = None , client_secret = None , two_fa_supported = False , two_fa_force_sms = False ) : session = VKSession ( app_id = app_id , user_login = login , user_password = password , phone_number = phone_number , scope = scope , service_token = service_token , api_version = api_version , interactive = interactive , client_secret = client_secret , two_fa_supported = two_fa_supported , two_fa_force_sms = two_fa_force_sms ) return API ( session = session , http_params = http_params )
Factory method to explicitly create API with app_id login password and phone_number parameters .
17,824
def result ( self , value ) : if self . _process_result : self . _result = self . _process_result ( value ) self . _raw_result = value
The result of the command .
17,825
def url ( self , host ) : path = '/' . join ( str ( v ) for v in self . _path ) return 'coaps://{}:5684/{}' . format ( host , path )
Generate url for coap client .
17,826
def _merge ( self , a , b ) : for k , v in a . items ( ) : if isinstance ( v , dict ) : item = b . setdefault ( k , { } ) self . _merge ( v , item ) elif isinstance ( v , list ) : item = b . setdefault ( k , [ { } ] ) if len ( v ) == 1 and isinstance ( v [ 0 ] , dict ) : self . _merge ( v [ 0 ] , item [ 0 ] ) else : b [ k ] = v else : b [ k ] = v return b
Merges a into b .
17,827
def combine_data ( self , command2 ) : if command2 is None : return self . _data = self . _merge ( command2 . _data , self . _data )
Combines the data for this command with another .
17,828
def load_json ( filename : str ) -> Union [ List , Dict ] : try : with open ( filename , encoding = 'utf-8' ) as fdesc : return json . loads ( fdesc . read ( ) ) except FileNotFoundError : _LOGGER . debug ( 'JSON file not found: %s' , filename ) except ValueError as error : _LOGGER . exception ( 'Could not parse JSON content: %s' , filename ) raise PytradfriError ( error ) except OSError as error : _LOGGER . exception ( 'JSON file reading failed: %s' , filename ) raise PytradfriError ( error ) return { }
Load JSON data from a file and return as dict or list .
17,829
def save_json ( filename : str , config : Union [ List , Dict ] ) : try : data = json . dumps ( config , sort_keys = True , indent = 4 ) with open ( filename , 'w' , encoding = 'utf-8' ) as fdesc : fdesc . write ( data ) return True except TypeError as error : _LOGGER . exception ( 'Failed to serialize to JSON: %s' , filename ) raise PytradfriError ( error ) except OSError as error : _LOGGER . exception ( 'Saving JSON file failed: %s' , filename ) raise PytradfriError ( error )
Save JSON data to a file .
17,830
def get_selected_keys ( self , selection ) : return [ k for k , b in self . _lookup . items ( ) if b & selection ]
Return a list of keys for the given selection .
17,831
def get_selected_values ( self , selection ) : return [ v for b , v in self . _choices if b & selection ]
Return a list of values for the given selection .
17,832
def retry_timeout ( api , retries = 3 ) : @ wraps ( api ) def retry_api ( * args , ** kwargs ) : for i in range ( 1 , retries + 1 ) : try : return api ( * args , ** kwargs ) except RequestTimeout : if i == retries : raise return retry_api
Retry API call when a timeout occurs .
17,833
def request ( self , api_commands , * , timeout = None ) : if not isinstance ( api_commands , list ) : return self . _execute ( api_commands , timeout = timeout ) command_results = [ ] for api_command in api_commands : result = self . _execute ( api_command , timeout = timeout ) command_results . append ( result ) return command_results
Make a request . Timeout is in seconds .
17,834
def task_start_time ( self ) : return datetime . time ( self . task_start_parameters [ ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR ] , self . task_start_parameters [ ATTR_SMART_TASK_TRIGGER_TIME_START_MIN ] )
Return the time the task starts .
17,835
def tasks ( self ) : return [ StartActionItem ( self . _task , i , self . state , self . path , self . raw ) for i in range ( len ( self . raw ) ) ]
Return task objects of the task control .
17,836
def set_dimmer ( self , dimmer ) : command = { ATTR_START_ACTION : { ATTR_DEVICE_STATE : self . state , ROOT_START_ACTION : [ { ATTR_ID : self . raw [ ATTR_ID ] , ATTR_LIGHT_DIMMER : dimmer , ATTR_TRANSITION_TIME : self . raw [ ATTR_TRANSITION_TIME ] } , self . devices_dict ] } } return self . set_values ( command )
Set final dimmer value for task .
17,837
def observe ( self , callback , err_callback , duration = 60 ) : def observe_callback ( value ) : self . raw = value callback ( self ) return Command ( 'get' , self . path , process_result = observe_callback , err_callback = err_callback , observe = True , observe_duration = duration )
Observe resource and call callback when updated .
17,838
def update ( self ) : def process_result ( result ) : self . raw = result return Command ( 'get' , self . path , process_result = process_result )
Update the group .
17,839
def generate_psk ( self , identity ) : def process_result ( result ) : return result [ ATTR_PSK ] return Command ( 'post' , [ ROOT_GATEWAY , ATTR_AUTH ] , { ATTR_IDENTITY : identity } , process_result = process_result )
Generates the PRE_SHARED_KEY from the gateway .
17,840
def get_endpoints ( self ) : def process_result ( result ) : return [ line . split ( ';' ) [ 0 ] [ 2 : - 1 ] for line in result . split ( ',' ) ] return Command ( 'get' , [ '.well-known' , 'core' ] , parse_json = False , process_result = process_result )
Return all available endpoints on the gateway .
17,841
def get_devices ( self ) : def process_result ( result ) : return [ self . get_device ( dev ) for dev in result ] return Command ( 'get' , [ ROOT_DEVICES ] , process_result = process_result )
Return the devices linked to the gateway .
17,842
def get_device ( self , device_id ) : def process_result ( result ) : return Device ( result ) return Command ( 'get' , [ ROOT_DEVICES , device_id ] , process_result = process_result )
Return specified device .
17,843
def get_groups ( self ) : def process_result ( result ) : return [ self . get_group ( group ) for group in result ] return Command ( 'get' , [ ROOT_GROUPS ] , process_result = process_result )
Return the groups linked to the gateway .
17,844
def get_group ( self , group_id ) : def process_result ( result ) : return Group ( self , result ) return Command ( 'get' , [ ROOT_GROUPS , group_id ] , process_result = process_result )
Return specified group .
17,845
def get_gateway_info ( self ) : def process_result ( result ) : return GatewayInfo ( result ) return Command ( 'get' , [ ROOT_GATEWAY , ATTR_GATEWAY_INFO ] , process_result = process_result )
Return the gateway info .
17,846
def get_moods ( self ) : mood_parent = self . _get_mood_parent ( ) def process_result ( result ) : return [ self . get_mood ( mood , mood_parent = mood_parent ) for mood in result ] return Command ( 'get' , [ ROOT_MOODS , mood_parent ] , process_result = process_result )
Return moods defined on the gateway .
17,847
def get_mood ( self , mood_id , * , mood_parent = None ) : if mood_parent is None : mood_parent = self . _get_mood_parent ( ) def process_result ( result ) : return Mood ( result , mood_parent ) return Command ( 'get' , [ ROOT_MOODS , mood_parent , mood_id ] , mood_parent , process_result = process_result )
Return a mood .
17,848
def get_smart_tasks ( self ) : def process_result ( result ) : return [ self . get_smart_task ( task ) for task in result ] return Command ( 'get' , [ ROOT_SMART_TASKS ] , process_result = process_result )
Return the transitions linked to the gateway .
17,849
def get_smart_task ( self , task_id ) : def process_result ( result ) : return SmartTask ( self , result ) return Command ( 'get' , [ ROOT_SMART_TASKS , task_id ] , process_result = process_result )
Return specified transition .
17,850
def first_setup ( self ) : if ATTR_FIRST_SETUP not in self . raw : return None return datetime . utcfromtimestamp ( self . raw [ ATTR_FIRST_SETUP ] )
This is a guess of the meaning of this value .
17,851
def power_source_str ( self ) : if DeviceInfo . ATTR_POWER_SOURCE not in self . raw : return None return DeviceInfo . VALUE_POWER_SOURCES . get ( self . power_source , 'Unknown' )
String representation of current power source .
17,852
def lights ( self ) : return [ Light ( self . _device , i ) for i in range ( len ( self . raw ) ) ]
Return light objects of the light control .
17,853
def set_state ( self , state , * , index = 0 ) : return self . set_values ( { ATTR_DEVICE_STATE : int ( state ) } , index = index )
Set state of a light .
17,854
def set_color_temp ( self , color_temp , * , index = 0 , transition_time = None ) : self . _value_validate ( color_temp , RANGE_MIREDS , "Color temperature" ) values = { ATTR_LIGHT_MIREDS : color_temp } if transition_time is not None : values [ ATTR_TRANSITION_TIME ] = transition_time return self . set_values ( values , index = index )
Set color temp a light .
17,855
def set_hex_color ( self , color , * , index = 0 , transition_time = None ) : values = { ATTR_LIGHT_COLOR_HEX : color , } if transition_time is not None : values [ ATTR_TRANSITION_TIME ] = transition_time return self . set_values ( values , index = index )
Set hex color of the light .
17,856
def set_xy_color ( self , color_x , color_y , * , index = 0 , transition_time = None ) : self . _value_validate ( color_x , RANGE_X , "X color" ) self . _value_validate ( color_y , RANGE_Y , "Y color" ) values = { ATTR_LIGHT_COLOR_X : color_x , ATTR_LIGHT_COLOR_Y : color_y } if transition_time is not None : values [ ATTR_TRANSITION_TIME ] = transition_time return self . set_values ( values , index = index )
Set xy color of the light .
17,857
def set_hsb ( self , hue , saturation , brightness = None , * , index = 0 , transition_time = None ) : self . _value_validate ( hue , RANGE_HUE , "Hue" ) self . _value_validate ( saturation , RANGE_SATURATION , "Saturation" ) values = { ATTR_LIGHT_COLOR_SATURATION : saturation , ATTR_LIGHT_COLOR_HUE : hue } if brightness is not None : values [ ATTR_LIGHT_DIMMER ] = brightness self . _value_validate ( brightness , RANGE_BRIGHTNESS , "Brightness" ) if transition_time is not None : values [ ATTR_TRANSITION_TIME ] = transition_time return self . set_values ( values , index = index )
Set HSB color settings of the light .
17,858
def _value_validate ( self , value , rnge , identifier = "Given" ) : if value is not None and ( value < rnge [ 0 ] or value > rnge [ 1 ] ) : raise ValueError ( '%s value must be between %d and %d.' % ( identifier , rnge [ 0 ] , rnge [ 1 ] ) )
Make sure a value is within a given range
17,859
def set_values ( self , values , * , index = 0 ) : assert len ( self . raw ) == 1 , 'Only devices with 1 light supported' return Command ( 'put' , self . _device . path , { ATTR_LIGHT_CONTROL : [ values ] } )
Set values on light control . Returns a Command .
17,860
def sockets ( self ) : return [ Socket ( self . _device , i ) for i in range ( len ( self . raw ) ) ]
Return socket objects of the socket control .
17,861
async def _get_protocol ( self ) : if self . _protocol is None : self . _protocol = asyncio . Task ( Context . create_client_context ( loop = self . _loop ) ) return ( await self . _protocol )
Get the protocol for the request .
17,862
async def _reset_protocol ( self , exc = None ) : protocol = await self . _get_protocol ( ) await protocol . shutdown ( ) self . _protocol = None for ob_error in self . _observations_err_callbacks : ob_error ( exc ) self . _observations_err_callbacks . clear ( )
Reset the protocol if an error occurs .
17,863
async def _get_response ( self , msg ) : try : protocol = await self . _get_protocol ( ) pr = protocol . request ( msg ) r = await pr . response return pr , r except ConstructionRenderableError as e : raise ClientError ( "There was an error with the request." , e ) except RequestTimedOut as e : await self . _reset_protocol ( e ) raise RequestTimeout ( 'Request timed out.' , e ) except ( OSError , socket . gaierror , Error ) as e : await self . _reset_protocol ( e ) raise ServerError ( "There was an error with the request." , e ) except asyncio . CancelledError as e : await self . _reset_protocol ( e ) raise e
Perform the request get the response .
17,864
def member_ids ( self ) : info = self . raw . get ( ATTR_MEMBERS , { } ) if not info or ROOT_DEVICES2 not in info : return [ ] return info [ ROOT_DEVICES2 ] . get ( ATTR_ID , [ ] )
Members of this group .
17,865
def set_dimmer ( self , dimmer , transition_time = None ) : values = { ATTR_LIGHT_DIMMER : dimmer , } if transition_time is not None : values [ ATTR_TRANSITION_TIME ] = transition_time return self . set_values ( values )
Set dimmer value of a group .
17,866
def print_gateway ( ) : print ( "Printing information about the Gateway" ) data = api ( gateway . get_gateway_info ( ) ) . raw print ( jsonify ( data ) )
Print gateway info as JSON
17,867
def print_all_devices ( ) : print ( "Printing information about all devices paired to the Gateway" ) if len ( devices ) == 0 : exit ( bold ( "No devices paired" ) ) container = [ ] for dev in devices : container . append ( dev . raw ) print ( jsonify ( container ) )
Print all devices as JSON
17,868
def print_lamps ( ) : print ( "Printing information about all lamps paired to the Gateway" ) lights = [ dev for dev in devices if dev . has_light_control ] if len ( lights ) == 0 : exit ( bold ( "No lamps paired" ) ) container = [ ] for l in lights : container . append ( l . raw ) print ( jsonify ( container ) )
Print all lamp devices as JSON
17,869
def print_smart_tasks ( ) : print ( "Printing information about smart tasks" ) tasks = api ( gateway . get_smart_tasks ( ) ) if len ( tasks ) == 0 : exit ( bold ( "No smart tasks defined" ) ) container = [ ] for task in tasks : container . append ( api ( task ) . task_control . raw ) print ( jsonify ( container ) )
Print smart tasks as JSON
17,870
def print_groups ( ) : print ( "Printing information about all groups defined in the Gateway" ) groups = api ( gateway . get_groups ( ) ) if len ( groups ) == 0 : exit ( bold ( "No groups defined" ) ) container = [ ] for group in groups : container . append ( api ( group ) . raw ) print ( jsonify ( container ) )
Print all groups as JSON
17,871
def LoadGDAL ( filename , no_data = None ) : if not GDAL_AVAILABLE : raise Exception ( "richdem.LoadGDAL() requires GDAL." ) allowed_types = { gdal . GDT_Byte , gdal . GDT_Int16 , gdal . GDT_Int32 , gdal . GDT_UInt16 , gdal . GDT_UInt32 , gdal . GDT_Float32 , gdal . GDT_Float64 } src_ds = gdal . Open ( filename ) srcband = src_ds . GetRasterBand ( 1 ) if no_data is None : no_data = srcband . GetNoDataValue ( ) if no_data is None : raise Exception ( "The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry." ) srcdata = rdarray ( srcband . ReadAsArray ( ) , no_data = no_data ) if not srcband . DataType in allowed_types : raise Exception ( "This datatype is not supported. Please file a bug report on RichDEM." ) srcdata . projection = src_ds . GetProjectionRef ( ) srcdata . geotransform = src_ds . GetGeoTransform ( ) srcdata . metadata = dict ( ) for k , v in src_ds . GetMetadata ( ) . items ( ) : srcdata . metadata [ k ] = v _AddAnalysis ( srcdata , "LoadGDAL(filename={0}, no_data={1})" . format ( filename , no_data ) ) return srcdata
Read a GDAL file .
17,872
def SaveGDAL ( filename , rda ) : if type ( rda ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) if not GDAL_AVAILABLE : raise Exception ( "richdem.SaveGDAL() requires GDAL." ) driver = gdal . GetDriverByName ( 'GTiff' ) data_type = gdal . GDT_Float32 data_set = driver . Create ( filename , xsize = rda . shape [ 1 ] , ysize = rda . shape [ 0 ] , bands = 1 , eType = data_type ) data_set . SetGeoTransform ( rda . geotransform ) data_set . SetProjection ( rda . projection ) band = data_set . GetRasterBand ( 1 ) band . SetNoDataValue ( rda . no_data ) band . WriteArray ( np . array ( rda ) ) for k , v in rda . metadata . items ( ) : data_set . SetMetadataItem ( str ( k ) , str ( v ) )
Save a GDAL file .
17,873
def FillDepressions ( dem , epsilon = False , in_place = False , topology = 'D8' ) : if type ( dem ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) if topology not in [ 'D8' , 'D4' ] : raise Exception ( "Unknown topology!" ) if not in_place : dem = dem . copy ( ) _AddAnalysis ( dem , "FillDepressions(dem, epsilon={0})" . format ( epsilon ) ) demw = dem . wrap ( ) if epsilon : if topology == 'D8' : _richdem . rdPFepsilonD8 ( demw ) elif topology == 'D4' : _richdem . rdPFepsilonD4 ( demw ) else : if topology == 'D8' : _richdem . rdFillDepressionsD8 ( demw ) elif topology == 'D4' : _richdem . rdFillDepressionsD4 ( demw ) dem . copyFromWrapped ( demw ) if not in_place : return dem
Fills all depressions in a DEM .
17,874
def BreachDepressions ( dem , in_place = False , topology = 'D8' ) : if type ( dem ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) if topology not in [ 'D8' , 'D4' ] : raise Exception ( "Unknown topology!" ) if not in_place : dem = dem . copy ( ) _AddAnalysis ( dem , "BreachDepressions(dem)" ) demw = dem . wrap ( ) if topology == 'D8' : _richdem . rdBreachDepressionsD8 ( demw ) elif topology == 'D4' : _richdem . rdBreachDepressionsD4 ( demw ) dem . copyFromWrapped ( demw ) if not in_place : return dem
Breaches all depressions in a DEM .
17,875
def ResolveFlats ( dem , in_place = False ) : if type ( dem ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) if not in_place : dem = dem . copy ( ) _AddAnalysis ( dem , "ResolveFlats(dem, in_place={in_place})" . format ( in_place = in_place ) ) demw = dem . wrap ( ) _richdem . rdResolveFlatsEpsilon ( demw ) dem . copyFromWrapped ( demw ) if not in_place : return dem
Attempts to resolve flats by imposing a local gradient
17,876
def FlowAccumulation ( dem , method = None , exponent = None , weights = None , in_place = False ) : if type ( dem ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) facc_methods = { "Tarboton" : _richdem . FA_Tarboton , "Dinf" : _richdem . FA_Tarboton , "Quinn" : _richdem . FA_Quinn , "FairfieldLeymarieD8" : _richdem . FA_FairfieldLeymarieD8 , "FairfieldLeymarieD4" : _richdem . FA_FairfieldLeymarieD4 , "Rho8" : _richdem . FA_Rho8 , "Rho4" : _richdem . FA_Rho4 , "OCallaghanD8" : _richdem . FA_OCallaghanD8 , "OCallaghanD4" : _richdem . FA_OCallaghanD4 , "D8" : _richdem . FA_D8 , "D4" : _richdem . FA_D4 } facc_methods_exponent = { "Freeman" : _richdem . FA_Freeman , "Holmgren" : _richdem . FA_Holmgren } if weights is not None and in_place : accum = rdarray ( weights , no_data = - 1 ) elif weights is not None and not in_place : accum = rdarray ( weights , copy = True , meta_obj = dem , no_data = - 1 ) elif weights is None : accum = rdarray ( np . ones ( shape = dem . shape , dtype = 'float64' ) , meta_obj = dem , no_data = - 1 ) else : raise Exception ( "Execution should never reach this point!" ) if accum . dtype != 'float64' : raise Exception ( "Accumulation array must be of type 'float64'!" ) accumw = accum . wrap ( ) _AddAnalysis ( accum , "FlowAccumulation(dem, method={method}, exponent={exponent}, weights={weights}, in_place={in_place})" . format ( method = method , exponent = exponent , weights = 'None' if weights is None else 'weights' , in_place = in_place ) ) if method in facc_methods : facc_methods [ method ] ( dem . wrap ( ) , accumw ) elif method in facc_methods_exponent : if exponent is None : raise Exception ( 'FlowAccumulation method "' + method + '" requires an exponent!' ) facc_methods_exponent [ method ] ( dem . wrap ( ) , accumw , exponent ) else : raise Exception ( "Invalid FlowAccumulation method. Valid methods are: " + ', ' . join ( list ( facc_methods . keys ( ) ) + list ( facc_methods_exponent . keys ( ) ) ) ) accum . copyFromWrapped ( accumw ) return accum
Calculates flow accumulation . A variety of methods are available .
17,877
def FlowAccumFromProps ( props , weights = None , in_place = False ) : if type ( props ) is not rd3array : raise Exception ( "A richdem.rd3array or numpy.ndarray is required!" ) if weights is not None and in_place : accum = rdarray ( weights , no_data = - 1 ) elif weights is not None and not in_place : accum = rdarray ( weights , copy = True , meta_obj = props , no_data = - 1 ) elif weights is None : accum = rdarray ( np . ones ( shape = props . shape [ 0 : 2 ] , dtype = 'float64' ) , meta_obj = props , no_data = - 1 ) else : raise Exception ( "Execution should never reach this point!" ) if accum . dtype != 'float64' : raise Exception ( "Accumulation array must be of type 'float64'!" ) accumw = accum . wrap ( ) _AddAnalysis ( accum , "FlowAccumFromProps(dem, weights={weights}, in_place={in_place})" . format ( weights = 'None' if weights is None else 'weights' , in_place = in_place ) ) _richdem . FlowAccumulation ( props . wrap ( ) , accumw ) accum . copyFromWrapped ( accumw ) return accum
Calculates flow accumulation from flow proportions .
17,878
def FlowProportions ( dem , method = None , exponent = None ) : if type ( dem ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) fprop_methods = { "Tarboton" : _richdem . FM_Tarboton , "Dinf" : _richdem . FM_Tarboton , "Quinn" : _richdem . FM_Quinn , "FairfieldLeymarieD8" : _richdem . FM_FairfieldLeymarieD8 , "FairfieldLeymarieD4" : _richdem . FM_FairfieldLeymarieD4 , "Rho8" : _richdem . FM_Rho8 , "Rho4" : _richdem . FM_Rho4 , "OCallaghanD8" : _richdem . FM_OCallaghanD8 , "OCallaghanD4" : _richdem . FM_OCallaghanD4 , "D8" : _richdem . FM_D8 , "D4" : _richdem . FM_D4 } fprop_methods_exponent = { "Freeman" : _richdem . FM_Freeman , "Holmgren" : _richdem . FM_Holmgren } fprops = rd3array ( np . zeros ( shape = dem . shape + ( 9 , ) , dtype = 'float32' ) , meta_obj = dem , no_data = - 2 ) fpropsw = fprops . wrap ( ) _AddAnalysis ( fprops , "FlowProportions(dem, method={method}, exponent={exponent})" . format ( method = method , exponent = exponent , ) ) if method in fprop_methods : fprop_methods [ method ] ( dem . wrap ( ) , fpropsw ) elif method in fprop_methods_exponent : if exponent is None : raise Exception ( 'FlowProportions method "' + method + '" requires an exponent!' ) fprop_methods_exponent [ method ] ( dem . wrap ( ) , fpropsw , exponent ) else : raise Exception ( "Invalid FlowProportions method. Valid methods are: " + ', ' . join ( list ( fprop_methods . keys ( ) ) + list ( fprop_methods_exponent . keys ( ) ) ) ) fprops . copyFromWrapped ( fpropsw ) return fprops
Calculates flow proportions . A variety of methods are available .
17,879
def TerrainAttribute ( dem , attrib , zscale = 1.0 ) : if type ( dem ) is not rdarray : raise Exception ( "A richdem.rdarray or numpy.ndarray is required!" ) terrain_attribs = { "slope_riserun" : _richdem . TA_slope_riserun , "slope_percentage" : _richdem . TA_slope_percentage , "slope_degrees" : _richdem . TA_slope_degrees , "slope_radians" : _richdem . TA_slope_radians , "aspect" : _richdem . TA_aspect , "curvature" : _richdem . TA_curvature , "planform_curvature" : _richdem . TA_planform_curvature , "profile_curvature" : _richdem . TA_profile_curvature , } if not attrib in terrain_attribs : raise Exception ( "Invalid TerrainAttributes attribute. Valid attributes are: " + ', ' . join ( terrain_attribs . keys ( ) ) ) result = rdarray ( np . zeros ( shape = dem . shape , dtype = 'float32' ) , meta_obj = dem , no_data = - 9999 ) resultw = result . wrap ( ) _AddAnalysis ( result , "TerrainAttribute(dem, attrib={0}, zscale={1})" . format ( attrib , zscale ) ) terrain_attribs [ attrib ] ( dem . wrap ( ) , resultw , zscale ) result . copyFromWrapped ( resultw ) return result
Calculates terrain attributes . A variety of methods are available .
17,880
def _join ( * args ) : return delimiter . join ( filter ( lambda s : s != '' , map ( lambda s : s . lstrip ( delimiter ) , args ) ) )
Join S3 bucket args together .
17,881
def initialize ( self ) : self . bookstore_settings = BookstoreSettings ( config = self . config ) self . session = aiobotocore . get_session ( )
Initialize a helper to get bookstore settings and session information quickly
17,882
async def put ( self , path = '' ) : self . log . info ( "Attempt publishing to %s" , path ) if path == '' or path == '/' : raise web . HTTPError ( 400 , "Must provide a path for publishing" ) model = self . get_json_body ( ) if model : await self . _publish ( model , path . lstrip ( '/' ) ) else : raise web . HTTPError ( 400 , "Cannot publish an empty model" )
Publish a notebook on a given path .
17,883
async def _publish ( self , model , path ) : if model [ 'type' ] != 'notebook' : raise web . HTTPError ( 400 , "bookstore only publishes notebooks" ) content = model [ 'content' ] full_s3_path = s3_path ( self . bookstore_settings . s3_bucket , self . bookstore_settings . published_prefix , path ) file_key = s3_key ( self . bookstore_settings . published_prefix , path ) self . log . info ( "Publishing to %s" , s3_display_path ( self . bookstore_settings . s3_bucket , self . bookstore_settings . published_prefix , path ) , ) async with self . session . create_client ( 's3' , aws_secret_access_key = self . bookstore_settings . s3_secret_access_key , aws_access_key_id = self . bookstore_settings . s3_access_key_id , endpoint_url = self . bookstore_settings . s3_endpoint_url , region_name = self . bookstore_settings . s3_region_name , ) as client : self . log . info ( "Processing published write of %s" , path ) obj = await client . put_object ( Bucket = self . bookstore_settings . s3_bucket , Key = file_key , Body = json . dumps ( content ) ) self . log . info ( "Done with published write of %s" , path ) self . set_status ( 201 ) resp_content = { "s3path" : full_s3_path } if 'VersionId' in obj : resp_content [ "versionID" ] = obj [ 'VersionId' ] resp_str = json . dumps ( resp_content ) self . finish ( resp_str )
Publish notebook model to the path
17,884
def setup_auth ( self ) : self . token = self . nb_record . token first = requests . get ( f"{self.url}/login" ) self . xsrf_token = first . cookies . get ( "_xsrf" , "" )
Sets up token access for authorizing requests to notebook server .
17,885
def setup_request_sessions ( self ) : self . req_session = requests . Session ( ) self . req_session . headers . update ( self . headers )
Sets up a requests . Session object for sharing headers across API requests .
17,886
async def archive ( self , record : ArchiveRecord ) : async with self . path_lock_ready : lock = self . path_locks . get ( record . filepath ) if lock is None : lock = Lock ( ) self . path_locks [ record . filepath ] = lock if lock . locked ( ) : self . log . info ( "Skipping archive of %s" , record . filepath ) return async with lock : try : async with self . session . create_client ( 's3' , aws_secret_access_key = self . settings . s3_secret_access_key , aws_access_key_id = self . settings . s3_access_key_id , endpoint_url = self . settings . s3_endpoint_url , region_name = self . settings . s3_region_name , ) as client : self . log . info ( "Processing storage write of %s" , record . filepath ) file_key = s3_key ( self . settings . workspace_prefix , record . filepath ) await client . put_object ( Bucket = self . settings . s3_bucket , Key = file_key , Body = record . content ) self . log . info ( "Done with storage write of %s" , record . filepath ) except Exception as e : self . log . error ( 'Error while archiving file: %s %s' , record . filepath , e , exc_info = True )
Process a record to write to storage .
17,887
def run_pre_save_hook ( self , model , path , ** kwargs ) : if model [ "type" ] != "notebook" : return content = json . dumps ( model [ "content" ] ) loop = ioloop . IOLoop . current ( ) loop . spawn_callback ( self . archive , ArchiveRecord ( content = content , filepath = path , queued_time = ioloop . IOLoop . current ( ) . time ( ) ) , )
Send request to store notebook to S3 .
17,888
def get ( self , url ) : return requests . get ( url , params = self . data , headers = self . config . HEADERS )
do a get transaction
17,889
def post ( self , url ) : return requests . post ( url , data = self . data , headers = self . config . HEADERS )
do a post request
17,890
def checkout ( self , transparent = False , ** kwargs ) : self . data [ 'currency' ] = self . config . CURRENCY self . build_checkout_params ( ** kwargs ) if transparent : response = self . post ( url = self . config . TRANSPARENT_CHECKOUT_URL ) else : response = self . post ( url = self . config . CHECKOUT_URL ) return PagSeguroCheckoutResponse ( response . content , config = self . config )
create a pagseguro checkout
17,891
def pre_approval_ask_payment ( self , ** kwargs ) : self . build_pre_approval_payment_params ( ** kwargs ) response = self . post ( url = self . config . PRE_APPROVAL_PAYMENT_URL ) return PagSeguroPreApprovalPayment ( response . content , self . config )
ask form a subscribe payment
17,892
def pre_approval_cancel ( self , code ) : response = self . get ( url = self . config . PRE_APPROVAL_CANCEL_URL % code ) return PagSeguroPreApprovalCancel ( response . content , self . config )
cancel a subscribe
17,893
def check_transaction ( self , code ) : response = self . get ( url = self . config . TRANSACTION_URL % code ) return PagSeguroNotificationResponse ( response . content , self . config )
check a transaction by its code
17,894
def query_transactions ( self , initial_date , final_date , page = None , max_results = None ) : last_page = False results = [ ] while last_page is False : search_result = self . _consume_query_transactions ( initial_date , final_date , page , max_results ) results . extend ( search_result . transactions ) if search_result . current_page is None or search_result . total_pages is None or search_result . current_page == search_result . total_pages : last_page = True else : page = search_result . current_page + 1 return results
query transaction by date range
17,895
def query_pre_approvals ( self , initial_date , final_date , page = None , max_results = None ) : last_page = False results = [ ] while last_page is False : search_result = self . _consume_query_pre_approvals ( initial_date , final_date , page , max_results ) results . extend ( search_result . pre_approvals ) if search_result . current_page is None or search_result . total_pages is None or search_result . current_page == search_result . total_pages : last_page = True else : page = search_result . current_page + 1 return results
query pre - approvals by date range
17,896
def add_to_cart ( item_id ) : cart = Cart ( session [ 'cart' ] ) if cart . change_item ( item_id , 'add' ) : session [ 'cart' ] = cart . to_dict ( ) return list_products ( )
Cart with Product
17,897
def to_dict ( self ) : return { "total" : self . total , "subtotal" : self . subtotal , "items" : self . items , "extra_amount" : self . extra_amount }
Attribute values to dict
17,898
def set_connection ( self , url ) : u = urlparse ( url ) if u . netloc . find ( '@' ) > - 1 and ( u . scheme == 'bolt' or u . scheme == 'bolt+routing' ) : credentials , hostname = u . netloc . rsplit ( '@' , 1 ) username , password , = credentials . split ( ':' ) else : raise ValueError ( "Expecting url format: bolt://user:password@localhost:7687" " got {0}" . format ( url ) ) self . driver = GraphDatabase . driver ( u . scheme + '://' + hostname , auth = basic_auth ( username , password ) , encrypted = config . ENCRYPTED_CONNECTION , max_pool_size = config . MAX_POOL_SIZE ) self . url = url self . _pid = os . getpid ( ) self . _active_transaction = None
Sets the connection URL to the address a Neo4j server is set up at
17,899
def begin ( self , access_mode = None ) : if self . _active_transaction : raise SystemError ( "Transaction in progress" ) self . _active_transaction = self . driver . session ( access_mode = access_mode ) . begin_transaction ( )
Begins a new transaction raises SystemError exception if a transaction is in progress