idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
36,800
def describe_instances ( self , * instance_ids ) : instances = { } for pos , instance_id in enumerate ( instance_ids ) : instances [ "InstanceId.%d" % ( pos + 1 ) ] = instance_id query = self . query_factory ( action = "DescribeInstances" , creds = self . creds , endpoint = self . endpoint , other_params = instances ) d = query . submit ( ) return d . addCallback ( self . parser . describe_instances )
Describe current instances .
36,801
def run_instances ( self , image_id , min_count , max_count , security_groups = None , key_name = None , instance_type = None , user_data = None , availability_zone = None , kernel_id = None , ramdisk_id = None , subnet_id = None , security_group_ids = None ) : params = { "ImageId" : image_id , "MinCount" : str ( min_count ) , "MaxCount" : str ( max_count ) } if key_name is not None : params [ "KeyName" ] = key_name if subnet_id is not None : params [ "SubnetId" ] = subnet_id if security_group_ids is not None : for i , id in enumerate ( security_group_ids ) : params [ "SecurityGroupId.%d" % ( i + 1 ) ] = id else : msg = "You must specify the security_group_ids with the subnet_id" raise ValueError ( msg ) elif security_groups is not None : for i , name in enumerate ( security_groups ) : params [ "SecurityGroup.%d" % ( i + 1 ) ] = name else : msg = ( "You must specify either the subnet_id and " "security_group_ids or security_groups" ) raise ValueError ( msg ) if user_data is not None : params [ "UserData" ] = b64encode ( user_data ) if instance_type is not None : params [ "InstanceType" ] = instance_type if availability_zone is not None : params [ "Placement.AvailabilityZone" ] = availability_zone if kernel_id is not None : params [ "KernelId" ] = kernel_id if ramdisk_id is not None : params [ "RamdiskId" ] = ramdisk_id query = self . query_factory ( action = "RunInstances" , creds = self . creds , endpoint = self . endpoint , other_params = params ) d = query . submit ( ) return d . addCallback ( self . parser . run_instances )
Run new instances .
36,802
def get_console_output ( self , instance_id ) : InstanceIDParam = { "InstanceId" : instance_id } query = self . query_factory ( action = "GetConsoleOutput" , creds = self . creds , endpoint = self . endpoint , other_params = InstanceIDParam ) d = query . submit ( ) return d . addCallback ( self . parser . get_console_output )
Get the console output for a single instance .
36,803
def describe_security_groups ( self , * names ) : group_names = { } if names : group_names = dict ( [ ( "GroupName.%d" % ( i + 1 ) , name ) for i , name in enumerate ( names ) ] ) query = self . query_factory ( action = "DescribeSecurityGroups" , creds = self . creds , endpoint = self . endpoint , other_params = group_names ) d = query . submit ( ) return d . addCallback ( self . parser . describe_security_groups )
Describe security groups .
36,804
def create_security_group ( self , name , description , vpc_id = None ) : parameters = { "GroupName" : name , "GroupDescription" : description } if vpc_id : parameters [ "VpcId" ] = vpc_id query = self . query_factory ( action = "CreateSecurityGroup" , creds = self . creds , endpoint = self . endpoint , other_params = parameters ) d = query . submit ( ) return d . addCallback ( self . parser . create_security_group )
Create security group .
36,805
def describe_volumes ( self , * volume_ids ) : volumeset = { } for pos , volume_id in enumerate ( volume_ids ) : volumeset [ "VolumeId.%d" % ( pos + 1 ) ] = volume_id query = self . query_factory ( action = "DescribeVolumes" , creds = self . creds , endpoint = self . endpoint , other_params = volumeset ) d = query . submit ( ) return d . addCallback ( self . parser . describe_volumes )
Describe available volumes .
36,806
def create_volume ( self , availability_zone , size = None , snapshot_id = None ) : params = { "AvailabilityZone" : availability_zone } if ( ( snapshot_id is None and size is None ) or ( snapshot_id is not None and size is not None ) ) : raise ValueError ( "Please provide either size or snapshot_id" ) if size is not None : params [ "Size" ] = str ( size ) if snapshot_id is not None : params [ "SnapshotId" ] = snapshot_id query = self . query_factory ( action = "CreateVolume" , creds = self . creds , endpoint = self . endpoint , other_params = params ) d = query . submit ( ) return d . addCallback ( self . parser . create_volume )
Create a new volume .
36,807
def describe_snapshots ( self , * snapshot_ids ) : snapshot_set = { } for pos , snapshot_id in enumerate ( snapshot_ids ) : snapshot_set [ "SnapshotId.%d" % ( pos + 1 ) ] = snapshot_id query = self . query_factory ( action = "DescribeSnapshots" , creds = self . creds , endpoint = self . endpoint , other_params = snapshot_set ) d = query . submit ( ) return d . addCallback ( self . parser . snapshots )
Describe available snapshots .
36,808
def delete_snapshot ( self , snapshot_id ) : query = self . query_factory ( action = "DeleteSnapshot" , creds = self . creds , endpoint = self . endpoint , other_params = { "SnapshotId" : snapshot_id } ) d = query . submit ( ) return d . addCallback ( self . parser . truth_return )
Remove a previously created snapshot .
36,809
def describe_keypairs ( self , * keypair_names ) : keypairs = { } for index , keypair_name in enumerate ( keypair_names ) : keypairs [ "KeyName.%d" % ( index + 1 ) ] = keypair_name query = self . query_factory ( action = "DescribeKeyPairs" , creds = self . creds , endpoint = self . endpoint , other_params = keypairs ) d = query . submit ( ) return d . addCallback ( self . parser . describe_keypairs )
Returns information about key pairs available .
36,810
def create_keypair ( self , keypair_name ) : query = self . query_factory ( action = "CreateKeyPair" , creds = self . creds , endpoint = self . endpoint , other_params = { "KeyName" : keypair_name } ) d = query . submit ( ) return d . addCallback ( self . parser . create_keypair )
Create a new 2048 bit RSA key pair and return a unique ID that can be used to reference the created key pair when launching new instances .
36,811
def allocate_address ( self ) : query = self . query_factory ( action = "AllocateAddress" , creds = self . creds , endpoint = self . endpoint , other_params = { } ) d = query . submit ( ) return d . addCallback ( self . parser . allocate_address )
Acquire an elastic IP address to be attached subsequently to EC2 instances .
36,812
def describe_addresses ( self , * addresses ) : address_set = { } for pos , address in enumerate ( addresses ) : address_set [ "PublicIp.%d" % ( pos + 1 ) ] = address query = self . query_factory ( action = "DescribeAddresses" , creds = self . creds , endpoint = self . endpoint , other_params = address_set ) d = query . submit ( ) return d . addCallback ( self . parser . describe_addresses )
List the elastic IPs allocated in this account .
36,813
def describe_instances ( self , xml_bytes ) : root = XML ( xml_bytes ) results = [ ] for reservation_data in root . find ( "reservationSet" ) : reservation = model . Reservation ( reservation_id = reservation_data . findtext ( "reservationId" ) , owner_id = reservation_data . findtext ( "ownerId" ) ) instances = self . instances_set ( reservation_data , reservation ) results . extend ( instances ) return results
Parse the reservations XML payload that is returned from an AWS describeInstances API call .
36,814
def run_instances ( self , xml_bytes ) : root = XML ( xml_bytes ) groups = [ ] for group_data in root . find ( "groupSet" ) : group_id = group_data . findtext ( "groupId" ) groups . append ( group_id ) reservation = model . Reservation ( reservation_id = root . findtext ( "reservationId" ) , owner_id = root . findtext ( "ownerId" ) , groups = groups ) instances = self . instances_set ( root , reservation ) return instances
Parse the reservations XML payload that is returned from an AWS RunInstances API call .
36,815
def compute ( self ) : if "Signature" in self . params : raise RuntimeError ( "Existing signature in parameters" ) if self . signature_version is not None : version = self . signature_version else : version = self . params [ "SignatureVersion" ] if str ( version ) == "1" : bytes = self . old_signing_text ( ) hash_type = "sha1" elif str ( version ) == "2" : bytes = self . signing_text ( ) if self . signature_method is not None : signature_method = self . signature_method else : signature_method = self . params [ "SignatureMethod" ] hash_type = signature_method [ len ( "Hmac" ) : ] . lower ( ) else : raise RuntimeError ( "Unsupported SignatureVersion: '%s'" % version ) return self . creds . sign ( bytes , hash_type )
Compute and return the signature according to the given data .
36,816
def old_signing_text ( self ) : result = [ ] lower_cmp = lambda x , y : cmp ( x [ 0 ] . lower ( ) , y [ 0 ] . lower ( ) ) for key , value in sorted ( self . params . items ( ) , cmp = lower_cmp ) : result . append ( "%s%s" % ( key , value ) ) return "" . join ( result )
Return the text needed for signing using SignatureVersion 1 .
36,817
def signing_text ( self ) : result = "%s\n%s\n%s\n%s" % ( self . endpoint . method , self . endpoint . get_canonical_host ( ) , self . endpoint . path , self . get_canonical_query_params ( ) ) return result
Return the text to be signed when signing the query .
36,818
def encode ( self , string ) : if isinstance ( string , unicode ) : string = string . encode ( "utf-8" ) return quote ( string , safe = "~" )
Encode a_string as per the canonicalisation encoding rules .
36,819
def from_xml ( cls , xml_bytes ) : root = XML ( xml_bytes ) return cls ( root . findtext ( 'Bucket' ) , root . findtext ( 'Key' ) , root . findtext ( 'UploadId' ) )
Create an instance of this from XML bytes .
36,820
def _initialize_with_array ( self , data , rowBased = True ) : if rowBased : self . matrix = [ ] if len ( data ) != self . _rows : raise ValueError ( "Size of Matrix does not match" ) for col in xrange ( self . _columns ) : self . matrix . append ( [ ] ) for row in xrange ( self . _rows ) : if len ( data [ row ] ) != self . _columns : raise ValueError ( "Size of Matrix does not match" ) self . matrix [ col ] . append ( data [ row ] [ col ] ) else : if len ( data ) != self . _columns : raise ValueError ( "Size of Matrix does not match" ) for col in data : if len ( col ) != self . _rows : raise ValueError ( "Size of Matrix does not match" ) self . matrix = copy . deepcopy ( data )
Set the matrix values from a two dimensional list .
36,821
def from_timeseries ( cls , timeSeries ) : width = 1 if isinstance ( timeSeries , MultiDimensionalTimeSeries ) : width = timeSeries . dimension_count ( ) matrixData = [ [ ] for dummy in xrange ( width ) ] for entry in timeSeries : for col in xrange ( 1 , len ( entry ) ) : matrixData [ col - 1 ] . append ( entry [ col ] ) if not matrixData [ 0 ] : raise ValueError ( "Cannot create Matrix from empty Timeseries" ) mtrx = Matrix . from_two_dim_array ( len ( matrixData ) , len ( matrixData [ 0 ] ) , matrixData ) return mtrx
Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
36,822
def from_two_dim_array ( cls , cols , rows , twoDimArray ) : return Matrix ( cols , rows , twoDimArray , rowBased = False , isOneDimArray = False )
Create a new Matrix instance from a two dimensional array .
36,823
def get_matrix_from_list ( self , rows , columns , matrix_list , rowBased = True ) : resultMatrix = Matrix ( columns , rows , matrix_list , rowBased ) return resultMatrix
Create a new Matrix instance from a matrix_list .
36,824
def set_value ( self , column , row , value ) : self . matrix [ column ] [ row ] = value
Set the value of the Matrix at the specified column and row .
36,825
def invers ( self ) : if self . _columns != self . _rows : raise ValueError ( "A square matrix is needed" ) mArray = self . get_array ( False ) appList = [ 0 ] * self . _columns for col in xrange ( self . _columns ) : mArray . append ( appList [ : ] ) mArray [ self . _columns + col ] [ col ] = 1 exMatrix = Matrix . from_two_dim_array ( 2 * self . _columns , self . _rows , mArray ) gjResult = exMatrix . gauss_jordan ( ) gjResult . matrix = gjResult . matrix [ self . _columns : ] gjResult . _columns = len ( gjResult . matrix ) return gjResult
Return the invers matrix if it can be calculated
36,826
def flatten ( self ) : blocksize = self . get_array ( ) [ 0 ] [ 0 ] . get_width ( ) width = self . get_width ( ) * blocksize columnsNew = [ [ ] for dummy in xrange ( width ) ] for row in self . get_array ( ) : index = 0 for submatrix in row : for column in submatrix . get_array ( False ) : columnsNew [ index ] += column index += 1 columnsFlat = sum ( columnsNew , [ ] ) return Matrix ( width , len ( columnsNew [ 0 ] ) , columnsFlat , rowBased = False )
If the current Matrix consists of Blockmatrixes as elementes method flattens the Matrix into one Matrix only consisting of the 2nd level elements
36,827
def multiply ( self , multiplicator ) : result = Matrix ( self . get_width ( ) , self . get_height ( ) ) for row in xrange ( self . get_height ( ) ) : for col in xrange ( self . get_width ( ) ) : result . set_value ( col , row , self . get_value ( col , row ) * multiplicator ) return result
Return a new Matrix with a multiple .
36,828
def transform ( self ) : t_matrix = Matrix ( self . _rows , self . _columns ) for col_i , col in enumerate ( self . matrix ) : for row_i , entry in enumerate ( col ) : t_matrix . set_value ( row_i , col_i , entry ) return t_matrix
Return a new transformed matrix .
36,829
def initialize_from_matrix ( cls , matrix , column ) : vec = Vector ( matrix . get_height ( ) ) for row in xrange ( matrix . get_height ( ) ) : vec . set_value ( 0 , row , matrix . get_value ( column , row ) ) return vec
Create vector from matrix
36,830
def unify ( self ) : length = float ( self . norm ( ) ) for row in xrange ( self . get_height ( ) ) : self . set_value ( 0 , row , self . get_value ( 0 , row ) / length ) return self
Unifies the vector . The length of the vector will be 1 .
36,831
def moving_frequency ( self , data_frame ) : f = [ ] for i in range ( 0 , ( data_frame . td [ - 1 ] . astype ( 'int' ) - self . window ) ) : f . append ( sum ( data_frame . action_type [ ( data_frame . td >= i ) & ( data_frame . td < ( i + self . window ) ) ] == 1 ) / float ( self . window ) ) diff_mov_freq = ( np . array ( f [ 1 : - 1 ] ) - np . array ( f [ 0 : - 2 ] ) ) / np . array ( f [ 0 : - 2 ] ) duration = math . ceil ( data_frame . td [ - 1 ] ) return diff_mov_freq , duration
This method returns moving frequency
36,832
def continuous_frequency ( self , data_frame ) : tap_timestamps = data_frame . td [ data_frame . action_type == 1 ] cont_freq = 1.0 / ( np . array ( tap_timestamps [ 1 : - 1 ] ) - np . array ( tap_timestamps [ 0 : - 2 ] ) ) duration = math . ceil ( data_frame . td [ - 1 ] ) return cont_freq , duration
This method returns continuous frequency
36,833
def incoordination_score ( self , data_frame ) : diff = data_frame . td [ 1 : - 1 ] . values - data_frame . td [ 0 : - 2 ] . values inc_s = np . var ( diff [ np . arange ( 1 , len ( diff ) , 2 ) ] , dtype = np . float64 ) * 1000.0 duration = math . ceil ( data_frame . td [ - 1 ] ) return inc_s , duration
This method calculates the variance of the time interval in msec between taps
36,834
def kinesia_scores ( self , data_frame ) : ks = sum ( data_frame . action_type == 1 ) duration = math . ceil ( data_frame . td [ - 1 ] ) return ks , duration
This method calculates the number of key taps
36,835
def akinesia_times ( self , data_frame ) : raise_timestamps = data_frame . td [ data_frame . action_type == 1 ] down_timestamps = data_frame . td [ data_frame . action_type == 0 ] if len ( raise_timestamps ) == len ( down_timestamps ) : at = np . mean ( down_timestamps . values - raise_timestamps . values ) else : if len ( raise_timestamps ) > len ( down_timestamps ) : at = np . mean ( down_timestamps . values - raise_timestamps . values [ : - ( len ( raise_timestamps ) - len ( down_timestamps ) ) ] ) else : at = np . mean ( down_timestamps . values [ : - ( len ( down_timestamps ) - len ( raise_timestamps ) ) ] - raise_timestamps . values ) duration = math . ceil ( data_frame . td [ - 1 ] ) return np . abs ( at ) , duration
This method calculates akinesia times mean dwell time on each key in milliseconds
36,836
def dysmetria_score ( self , data_frame ) : tap_data = data_frame [ data_frame . action_type == 0 ] ds = np . mean ( np . sqrt ( ( tap_data . x - tap_data . x_target ) ** 2 + ( tap_data . y - tap_data . y_target ) ** 2 ) ) duration = math . ceil ( data_frame . td [ - 1 ] ) return ds , duration
This method calculates accuracy of target taps in pixels
36,837
def extract_features ( self , data_frame , pre = '' ) : try : return { pre + 'frequency' : self . frequency ( data_frame ) [ 0 ] , pre + 'mean_moving_time' : self . mean_moving_time ( data_frame ) [ 0 ] , pre + 'incoordination_score' : self . incoordination_score ( data_frame ) [ 0 ] , pre + 'mean_alnt_target_distance' : self . mean_alnt_target_distance ( data_frame ) [ 0 ] , pre + 'kinesia_scores' : self . kinesia_scores ( data_frame ) [ 0 ] , pre + 'akinesia_times' : self . akinesia_times ( data_frame ) [ 0 ] , pre + 'dysmetria_score' : self . dysmetria_score ( data_frame ) [ 0 ] } except : logging . error ( "Error on FingerTappingProcessor process, extract features: %s" , sys . exc_info ( ) [ 0 ] )
This method extracts all the features available to the Finger Tapping Processor class .
36,838
def __get_features_for_observation ( self , data_frame = None , observation = 'LA-LL' , skip_id = None , last_column_is_id = False ) : try : features = np . array ( [ ] ) if data_frame is None : data_frame = self . data_frame for index , row in data_frame . iterrows ( ) : if not skip_id == row [ 'id' ] : features_row = np . nan_to_num ( row [ row . keys ( ) . str . contains ( observation ) ] . values ) features_row = np . append ( features_row , row [ 'id' ] ) features = np . vstack ( [ features , features_row ] ) if features . size else features_row if last_column_is_id : if np . ndim ( features ) > 1 : to_return = features [ : , : - 1 ] else : to_return = features [ : - 1 ] else : to_return = features return to_return , data_frame [ 'id' ] . values except : logging . error ( " observation not found in data frame" )
Extract the features for a given observation from a data frame
36,839
def predict ( self , measurement , output_format = 'array' ) : scores = np . array ( [ ] ) for obs in self . observations : knn = self . __get_knn_by_observation ( obs ) p , ids = self . __get_features_for_observation ( data_frame = measurement , observation = obs , skip_id = 3497 , last_column_is_id = True ) score = knn . predict ( pd . DataFrame ( p ) . T ) scores = np . append ( scores , score , axis = 0 ) if output_format == 'array' : return scores . astype ( int ) else : return np . array_str ( scores . astype ( int ) )
Method to predict the class labels for the provided data
36,840
def _namify_arguments ( mapping ) : result = [ ] for name , parameter in mapping . iteritems ( ) : parameter . name = name result . append ( parameter ) return result
Ensure that a mapping of names to parameters has the parameters set to the correct name .
36,841
def coerce ( self , value ) : if value is None : if self . optional : return self . default else : value = "" if value == "" : if not self . allow_none : raise MissingParameterError ( self . name , kind = self . kind ) return self . default try : self . _check_range ( value ) parsed = self . parse ( value ) if self . validator and not self . validator ( parsed ) : raise ValueError ( value ) return parsed except ValueError : try : value = value . decode ( "utf-8" ) message = "Invalid %s value %s" % ( self . kind , value ) except UnicodeDecodeError : message = "Invalid %s value" % self . kind raise InvalidParameterValueError ( message )
Coerce a single value according to this parameter s settings .
36,842
def parse ( self , value ) : result = { } rest = { } for k , v in value . iteritems ( ) : if k in self . fields : if ( isinstance ( v , dict ) and not self . fields [ k ] . supports_multiple ) : if len ( v ) == 1 : v = v . values ( ) [ 0 ] else : raise InvalidParameterCombinationError ( k ) result [ k ] = self . fields [ k ] . coerce ( v ) else : rest [ k ] = v for k , v in self . fields . iteritems ( ) : if k not in result : result [ k ] = v . coerce ( None ) if rest : raise UnknownParametersError ( result , rest ) return result
Convert a dictionary of raw values to a dictionary of processed values .
36,843
def format ( self , value ) : if not isinstance ( value , Arguments ) : value = value . iteritems ( ) return dict ( ( k , self . fields [ k ] . format ( v ) ) for k , v in value )
Convert a dictionary of processed values to a dictionary of raw values .
36,844
def extend ( self , * schema_items , ** kwargs ) : new_kwargs = { 'name' : self . name , 'doc' : self . doc , 'parameters' : self . _parameters [ : ] , 'result' : self . result . copy ( ) if self . result else { } , 'errors' : self . errors . copy ( ) if self . errors else set ( ) } if 'parameters' in kwargs : new_params = kwargs . pop ( 'parameters' ) new_kwargs [ 'parameters' ] . extend ( new_params ) new_kwargs [ 'result' ] . update ( kwargs . pop ( 'result' , { } ) ) new_kwargs [ 'errors' ] . update ( kwargs . pop ( 'errors' , set ( ) ) ) new_kwargs . update ( kwargs ) if schema_items : parameters = self . _convert_old_schema ( schema_items ) new_kwargs [ 'parameters' ] . extend ( parameters ) return Schema ( ** new_kwargs )
Add any number of schema items to a new schema .
36,845
def _convert_old_schema ( self , parameters ) : merged = [ ] for parameter in parameters : segments = parameter . name . split ( '.' ) _merge_associative_list ( merged , segments , parameter ) result = [ self . _inner_convert_old_schema ( node , 1 ) for node in merged ] return result
Convert an ugly old schema using dotted names to the hot new schema using List and Structure .
36,846
def finished ( self ) : self . progress_bar . set_state ( ProgressBar . STATE_DONE ) self . progress_bar . show ( )
Must be called to print final progress label .
36,847
def start_waiting ( self ) : if not self . waiting : self . waiting = True wait_msg = "Waiting for project to become ready for {}" . format ( self . msg_verb ) self . progress_bar . show_waiting ( wait_msg )
Show waiting progress bar until done_waiting is called . Only has an effect if we are in waiting state .
36,848
def s3_url_context ( service_endpoint , bucket = None , object_name = None ) : def p ( s ) : results = [ ] args = s . split ( u"&" ) for a in args : pieces = a . split ( u"=" ) if len ( pieces ) == 1 : results . append ( ( unquote ( pieces [ 0 ] ) , ) ) elif len ( pieces ) == 2 : results . append ( tuple ( map ( unquote , pieces ) ) ) else : raise Exception ( "oh no" ) return results query = [ ] path = [ ] if bucket is None : path . append ( u"" ) else : if isinstance ( bucket , bytes ) : bucket = bucket . decode ( "utf-8" ) path . append ( bucket ) if object_name is None : path . append ( u"" ) else : if isinstance ( object_name , bytes ) : object_name = object_name . decode ( "utf-8" ) if u"?" in object_name : object_name , query = object_name . split ( u"?" , 1 ) query = p ( query ) object_name_components = object_name . split ( u"/" ) if object_name_components [ 0 ] == u"" : object_name_components . pop ( 0 ) if object_name_components : path . extend ( object_name_components ) else : path . append ( u"" ) return _S3URLContext ( scheme = service_endpoint . scheme . decode ( "utf-8" ) , host = service_endpoint . get_host ( ) . decode ( "utf-8" ) , port = service_endpoint . port , path = path , query = query , )
Create a URL based on the given service endpoint and suitable for the given bucket or object .
36,849
def list_buckets ( self ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( ) , ) query = self . _query_factory ( details ) d = self . _submit ( query ) d . addCallback ( self . _parse_list_buckets ) return d
List all buckets .
36,850
def _parse_list_buckets ( self , ( response , xml_bytes ) ) : root = XML ( xml_bytes ) buckets = [ ] for bucket_data in root . find ( "Buckets" ) : name = bucket_data . findtext ( "Name" ) date_text = bucket_data . findtext ( "CreationDate" ) date_time = parseTime ( date_text ) bucket = Bucket ( name , date_time ) buckets . append ( bucket ) return buckets
Parse XML bucket list response .
36,851
def get_bucket ( self , bucket , marker = None , max_keys = None , prefix = None ) : args = [ ] if marker is not None : args . append ( ( "marker" , marker ) ) if max_keys is not None : args . append ( ( "max-keys" , "%d" % ( max_keys , ) ) ) if prefix is not None : args . append ( ( "prefix" , prefix ) ) if args : object_name = "?" + urlencode ( args ) else : object_name = None details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = object_name ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_get_bucket ) return d
Get a list of all the objects in a bucket .
36,852
def get_bucket_lifecycle ( self , bucket ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = "?lifecycle" ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_lifecycle_config ) return d
Get the lifecycle configuration of a bucket .
36,853
def get_bucket_website_config ( self , bucket ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = '?website' ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_website_config ) return d
Get the website configuration of a bucket .
36,854
def get_bucket_notification_config ( self , bucket ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = "?notification" ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_notification_config ) return d
Get the notification configuration of a bucket .
36,855
def get_bucket_versioning_config ( self , bucket ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = "?versioning" ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_versioning_config ) return d
Get the versioning configuration of a bucket .
36,856
def get_bucket_acl ( self , bucket ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = "?acl" ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_acl ) return d
Get the access control policy for a bucket .
36,857
def put_object ( self , bucket , object_name , data = None , content_type = None , metadata = { } , amz_headers = { } , body_producer = None ) : details = self . _details ( method = b"PUT" , url_context = self . _url_context ( bucket = bucket , object_name = object_name ) , headers = self . _headers ( content_type ) , metadata = metadata , amz_headers = amz_headers , body = data , body_producer = body_producer , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( itemgetter ( 1 ) ) return d
Put an object in a bucket .
36,858
def copy_object ( self , source_bucket , source_object_name , dest_bucket = None , dest_object_name = None , metadata = { } , amz_headers = { } ) : dest_bucket = dest_bucket or source_bucket dest_object_name = dest_object_name or source_object_name amz_headers [ "copy-source" ] = "/%s/%s" % ( source_bucket , source_object_name ) details = self . _details ( method = b"PUT" , url_context = self . _url_context ( bucket = dest_bucket , object_name = dest_object_name , ) , metadata = metadata , amz_headers = amz_headers , ) d = self . _submit ( self . _query_factory ( details ) ) return d
Copy an object stored in S3 from a source bucket to a destination bucket .
36,859
def get_object ( self , bucket , object_name ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = object_name ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( itemgetter ( 1 ) ) return d
Get an object from a bucket .
36,860
def head_object ( self , bucket , object_name ) : details = self . _details ( method = b"HEAD" , url_context = self . _url_context ( bucket = bucket , object_name = object_name ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( lambda ( response , body ) : _to_dict ( response . responseHeaders ) ) return d
Retrieve object metadata only .
36,861
def delete_object ( self , bucket , object_name ) : details = self . _details ( method = b"DELETE" , url_context = self . _url_context ( bucket = bucket , object_name = object_name ) , ) d = self . _submit ( self . _query_factory ( details ) ) return d
Delete an object from a bucket .
36,862
def put_object_acl ( self , bucket , object_name , access_control_policy ) : data = access_control_policy . to_xml ( ) details = self . _details ( method = b"PUT" , url_context = self . _url_context ( bucket = bucket , object_name = '%s?acl' % ( object_name , ) , ) , body = data , ) query = self . _query_factory ( details ) d = self . _submit ( query ) d . addCallback ( self . _parse_acl ) return d
Set access control policy on an object .
36,863
def put_request_payment ( self , bucket , payer ) : data = RequestPayment ( payer ) . to_xml ( ) details = self . _details ( method = b"PUT" , url_context = self . _url_context ( bucket = bucket , object_name = "?requestPayment" ) , body = data , ) d = self . _submit ( self . _query_factory ( details ) ) return d
Set request payment configuration on bucket to payer .
36,864
def get_request_payment ( self , bucket ) : details = self . _details ( method = b"GET" , url_context = self . _url_context ( bucket = bucket , object_name = "?requestPayment" ) , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( self . _parse_get_request_payment ) return d
Get the request payment configuration on a bucket .
36,865
def init_multipart_upload ( self , bucket , object_name , content_type = None , amz_headers = { } , metadata = { } ) : objectname_plus = '%s?uploads' % object_name details = self . _details ( method = b"POST" , url_context = self . _url_context ( bucket = bucket , object_name = objectname_plus ) , headers = self . _headers ( content_type ) , metadata = metadata , amz_headers = amz_headers , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( lambda ( response , body ) : MultipartInitiationResponse . from_xml ( body ) ) return d
Initiate a multipart upload to a bucket .
36,866
def upload_part ( self , bucket , object_name , upload_id , part_number , data = None , content_type = None , metadata = { } , body_producer = None ) : parms = 'partNumber=%s&uploadId=%s' % ( str ( part_number ) , upload_id ) objectname_plus = '%s?%s' % ( object_name , parms ) details = self . _details ( method = b"PUT" , url_context = self . _url_context ( bucket = bucket , object_name = objectname_plus ) , headers = self . _headers ( content_type ) , metadata = metadata , body = data , ) d = self . _submit ( self . _query_factory ( details ) ) d . addCallback ( lambda ( response , data ) : _to_dict ( response . responseHeaders ) ) return d
Upload a part of data corresponding to a multipart upload .
36,867
def set_content_type ( self ) : if self . object_name and not self . content_type : self . content_type , encoding = mimetypes . guess_type ( self . object_name , strict = False )
Set the content type based on the file extension used in the object name .
36,868
def get_headers ( self , instant ) : headers = { 'x-amz-date' : _auth_v4 . makeAMZDate ( instant ) } if self . body_producer is None : data = self . data if data is None : data = b"" headers [ "x-amz-content-sha256" ] = hashlib . sha256 ( data ) . hexdigest ( ) else : data = None headers [ "x-amz-content-sha256" ] = b"UNSIGNED-PAYLOAD" for key , value in self . metadata . iteritems ( ) : headers [ "x-amz-meta-" + key ] = value for key , value in self . amz_headers . iteritems ( ) : headers [ "x-amz-" + key ] = value self . set_content_type ( ) if self . content_type is not None : headers [ "Content-Type" ] = self . content_type if self . creds is not None : headers [ "Authorization" ] = self . sign ( headers , data , s3_url_context ( self . endpoint , self . bucket , self . object_name ) , instant , method = self . action ) return headers
Build the list of headers needed in order to perform S3 operations .
36,869
def attributes ( self ) : if 'id' in self . node . attrib : yield PlaceholderAttribute ( 'id' , self . node . attrib [ 'id' ] ) if 'tei-tag' in self . node . attrib : yield PlaceholderAttribute ( 'tei-tag' , self . node . attrib [ 'tei-tag' ] ) for attributes in self . node . iterchildren ( 'attributes' ) : for attribute in self . __iter_attributes__ ( attributes ) : yield attribute
Contain attributes applicable to this element
36,870
def divisions ( self ) : from . placeholder_division import PlaceholderDivision placeholder = None for item in self . __parts_and_divisions : if item . tag == 'part' : if not placeholder : placeholder = PlaceholderDivision ( ) placeholder . parts . append ( item ) else : if placeholder : yield placeholder placeholder = None yield item if placeholder : yield placeholder
Recursively get all the text divisions directly part of this element . If an element contains parts or text without tag . Those will be returned in order and wrapped with a TextDivision .
36,871
def parts ( self ) : for item in self . __parts_and_divisions : if item . tag == 'part' : yield item else : for part in item . parts : yield part
Get the parts directly below this element .
36,872
def __parts_and_divisions ( self ) : from . division import Division from . part import Part from . placeholder_part import PlaceholderPart text = self . node . text if text : stripped_text = text . replace ( '\n' , '' ) if stripped_text . strip ( ) : yield PlaceholderPart ( stripped_text ) for item in self . node : if item . tag == 'part' : yield Part ( item ) elif item . tag == 'div' : yield Division ( item ) if item . tail : stripped_tail = item . tail . replace ( '\n' , '' ) if stripped_tail . strip ( ) : yield PlaceholderPart ( stripped_tail )
The parts and divisions directly part of this element .
36,873
def _get_error_values ( self , startingPercentage , endPercentage , startDate , endDate ) : if startDate is not None : possibleDates = filter ( lambda date : date >= startDate , self . _errorDates ) if 0 == len ( possibleDates ) : raise ValueError ( "%s does not represent a valid startDate." % startDate ) startIdx = self . _errorDates . index ( min ( possibleDates ) ) else : startIdx = int ( ( startingPercentage * len ( self . _errorValues ) ) / 100.0 ) if endDate is not None : possibleDates = filter ( lambda date : date <= endDate , self . _errorDates ) if 0 == len ( possibleDates ) : raise ValueError ( "%s does not represent a valid endDate." % endDate ) endIdx = self . _errorDates . index ( max ( possibleDates ) ) + 1 else : endIdx = int ( ( endPercentage * len ( self . _errorValues ) ) / 100.0 ) return self . _errorValues [ startIdx : endIdx ]
Gets the defined subset of self . _errorValues .
36,874
def confidence_interval ( self , confidenceLevel ) : if not ( confidenceLevel >= 0 and confidenceLevel <= 1 ) : raise ValueError ( "Parameter percentage has to be in [0,1]" ) underestimations = [ ] overestimations = [ ] for error in self . _errorValues : if error is None : continue if error >= 0 : overestimations . append ( error ) if error <= 0 : underestimations . append ( error ) overestimations . sort ( ) underestimations . sort ( reverse = True ) overIdx = int ( len ( overestimations ) * confidenceLevel ) - 1 underIdx = int ( len ( underestimations ) * confidenceLevel ) - 1 overestimation = 0.0 underestimation = 0.0 if overIdx >= 0 : overestimation = overestimations [ overIdx ] else : print len ( overestimations ) , confidenceLevel if underIdx >= 0 : underestimation = underestimations [ underIdx ] return underestimation , overestimation
Calculates for which value confidenceLevel% of the errors are closer to 0 .
36,875
def load_segmented_data ( filename ) : data = pd . read_csv ( filename , index_col = 0 ) data . index = data . index . astype ( np . datetime64 ) return data
Helper function to load segmented gait time series data .
36,876
def load_finger_tapping_cloudupdrs_data ( filename , convert_times = 1000.0 ) : data_m = np . genfromtxt ( filename , delimiter = ',' , invalid_raise = False , skip_footer = 1 ) date_times = pd . to_datetime ( ( data_m [ : , 0 ] - data_m [ 0 , 0 ] ) ) time_difference = ( data_m [ : , 0 ] - data_m [ 0 , 0 ] ) / convert_times data = { 'td' : time_difference , 'action_type' : data_m [ : , 2 ] , 'x' : data_m [ : , 3 ] , 'y' : data_m [ : , 4 ] , 'x_target' : data_m [ : , 7 ] , 'y_target' : data_m [ : , 8 ] } data_frame = pd . DataFrame ( data , index = date_times , columns = [ 'td' , 'action_type' , 'x' , 'y' , 'x_target' , 'y_target' ] ) return data_frame
This method loads data in the cloudupdrs format for the finger tapping processor
36,877
def numerical_integration ( signal , sampling_frequency ) : integrate = sum ( signal [ 1 : ] ) / sampling_frequency + sum ( signal [ : - 1 ] ) integrate /= sampling_frequency * 2 return np . array ( integrate )
Numerically integrate a signal with it s sampling frequency .
36,878
def compute_interpeak ( data , sample_rate ) : freqs = fftfreq ( data . size , d = 1.0 / sample_rate ) f_signal = rfft ( data ) imax_freq = np . argsort ( f_signal ) [ - 2 ] freq = np . abs ( freqs [ imax_freq ] ) interpeak = np . int ( np . round ( sample_rate / freq ) ) return interpeak
Compute number of samples between signal peaks using the real part of FFT .
36,879
def non_zero_row ( arr ) : if len ( arr ) == 0 : return False for item in arr : if item == 0 : return False return True
0 . Empty row returns False .
36,880
def get_pypi_version ( ) : try : response = requests . get ( PYPI_URL , timeout = HALF_SECOND_TIMEOUT ) response . raise_for_status ( ) data = response . json ( ) version_str = data [ "info" ] [ "version" ] return _parse_version_str ( version_str ) except requests . exceptions . ConnectionError : raise VersionException ( UNABLE_TO_ACCESS_PYPI + " Failed to connect." ) except requests . exceptions . Timeout : raise VersionException ( UNABLE_TO_ACCESS_PYPI + " Timeout" )
Returns the version info from pypi for this app .
36,881
def start_tasks ( self ) : while self . tasks_at_once > len ( self . pending_results ) and self . _has_more_tasks ( ) : task , parent_result = self . tasks . popleft ( ) self . execute_task ( task , parent_result )
Start however many tasks we can based on our limits and what we have left to finish .
36,882
def get_finished_results ( self ) : task_and_results = [ ] for pending_result in self . pending_results : if pending_result . ready ( ) : ret = pending_result . get ( ) task_id , result = ret task = self . task_id_to_task [ task_id ] self . process_all_messages_in_queue ( ) task . after_run ( result ) task_and_results . append ( ( task , result ) ) self . pending_results . remove ( pending_result ) return task_and_results
Go through pending results and retrieve the results if they are done . Then start child tasks for the task that finished .
36,883
def get_route53_client ( agent , region , cooperator = None ) : if cooperator is None : cooperator = task return region . get_client ( _Route53Client , agent = agent , creds = region . creds , region = REGION_US_EAST_1 , endpoint = AWSServiceEndpoint ( _OTHER_ENDPOINT ) , cooperator = cooperator , )
Get a non - registration Route53 client .
36,884
def add ( self , method_class , action , version = None ) : by_version = self . _by_action . setdefault ( action , { } ) if version in by_version : raise RuntimeError ( "A method was already registered for action" " %s in version %s" % ( action , version ) ) by_version [ version ] = method_class
Add a method class to the regitry .
36,885
def check ( self , action , version = None ) : if action not in self . _by_action : raise APIError ( 400 , "InvalidAction" , "The action %s is not valid " "for this web service." % action ) by_version = self . _by_action [ action ] if None not in by_version : if version not in by_version : raise APIError ( 400 , "InvalidVersion" , "Invalid API version." )
Check if the given action is supported in the given version .
36,886
def get ( self , action , version = None ) : by_version = self . _by_action [ action ] if version in by_version : return by_version [ version ] else : return by_version [ None ]
Get the method class handing the given action and version .
36,887
def calculate_parameters ( self , independentTs , dependentTs ) : listX , listY = self . match_time_series ( independentTs , dependentTs ) if len ( listX ) == 0 or len ( listY ) == 0 : raise ValueError ( "Lists need to have some equal dates or cannot be empty" ) if len ( listX ) != len ( listY ) : raise ValueError ( "Each Timeseries need to have distinct dates" ) xValues = map ( lambda item : item [ 1 ] , listX ) yValues = map ( lambda item : item [ 1 ] , listY ) xMean = FusionMethods [ "mean" ] ( xValues ) yMean = FusionMethods [ "mean" ] ( yValues ) xDeviation = map ( lambda item : ( item - xMean ) , xValues ) yDeviation = map ( lambda item : ( item - yMean ) , yValues ) try : parameter1 = sum ( x * y for x , y in zip ( xDeviation , yDeviation ) ) / sum ( x * x for x in xDeviation ) except ZeroDivisionError : raise ValueError ( "Not enough distinct x values" ) parameter0 = yMean - ( parameter1 * xMean ) return ( parameter0 , parameter1 )
Calculate and return the parameters for the regression line
36,888
def calculate_parameters_with_confidence ( self , independentTs , dependentTs , confidenceLevel , samplePercentage = .1 ) : sampleY , trainingY = dependentTs . sample ( samplePercentage ) sampleX_list = self . match_time_series ( sampleY , independentTs ) [ 1 ] trainingX_list = self . match_time_series ( trainingY , independentTs ) [ 1 ] sampleX = TimeSeries . from_twodim_list ( sampleX_list ) trainingX = TimeSeries . from_twodim_list ( trainingX_list ) n , m = self . calculate_parameters ( trainingX , trainingY ) prediction = self . predict ( sampleX , n , m ) msd = MSD ( ) msd . initialize ( prediction , sampleY ) return ( n , m , msd . confidence_interval ( confidenceLevel ) )
Same functionality as calculate_parameters just that additionally the confidence interval for a given confidenceLevel is calculated . This is done based on a sample of the dependentTs training data that is validated against the prediction . The signed error of the predictions and the sample is then used to calculate the bounds of the interval .
36,889
def match_time_series ( self , timeseries1 , timeseries2 ) : time1 = map ( lambda item : item [ 0 ] , timeseries1 . to_twodim_list ( ) ) time2 = map ( lambda item : item [ 0 ] , timeseries2 . to_twodim_list ( ) ) matches = filter ( lambda x : ( x in time1 ) , time2 ) listX = filter ( lambda x : ( x [ 0 ] in matches ) , timeseries1 . to_twodim_list ( ) ) listY = filter ( lambda x : ( x [ 0 ] in matches ) , timeseries2 . to_twodim_list ( ) ) return listX , listY
Return two lists of the two input time series with matching dates
36,890
def lstsq ( cls , a , b ) : if a . get_height ( ) != b . get_height ( ) : raise ValueError ( "Size of input matrices does not match" ) if b . get_width ( ) != 1 : raise ValueError ( "Matrix with dependent variable has more than 1 column" ) aPseudo = a . pseudoinverse ( ) beta = aPseudo * b return beta
Return the least - squares solution to a linear matrix equation .
36,891
def _get_historic_means ( self , timeSeries ) : historyLength = self . _historyLength historicMeans = [ ] append = historicMeans . append for startIdx in xrange ( len ( timeSeries ) - historyLength - 1 ) : value = 0 for idx in xrange ( startIdx , startIdx + historyLength ) : value += abs ( timeSeries [ idx + 1 ] [ 1 ] - timeSeries [ idx ] [ 1 ] ) append ( value / float ( historyLength ) ) return historicMeans
Calculates the mean value for the history of the MeanAbsoluteScaledError .
36,892
def write_model ( self , filename = 'scores' , filepath = '' , output_format = 'csv' ) : scores_array = np . array ( [ ] ) for obs in self . observations : c , sd = self . __get_centroids_sd ( obs ) points , ids = self . __get_features_for_observation ( observation = obs , last_column_is_id = True ) b = np . array ( [ ] ) for p in points : b = np . append ( b , [ self . get_single_score ( p , centroids = c , sd = sd ) ] ) scores_array = np . vstack ( [ scores_array , b ] ) if scores_array . size else b scores_array = np . concatenate ( ( ids [ : , np . newaxis ] , scores_array . transpose ( ) ) , axis = 1 ) header = 'id,' + ',' . join ( self . observations ) try : if output_format == 'csv' : filename = join ( filepath , filename ) + '.' + output_format np . savetxt ( filename , scores_array , delimiter = "," , fmt = '%i' , header = header , comments = '' ) else : print ( scores_array ) except : logging . error ( "Unexpected error on writing output" )
This method calculates the scores and writes them to a file the data frame received . If the output format is other than csv it will print the scores .
36,893
def run ( self ) : files_to_download = self . get_files_to_download ( ) total_files_size = self . get_total_files_size ( files_to_download ) if self . file_download_pre_processor : self . run_preprocessor ( files_to_download ) self . try_create_dir ( self . dest_directory ) watcher = ProgressPrinter ( total_files_size , msg_verb = 'downloading' ) self . download_files ( files_to_download , watcher ) watcher . finished ( ) warnings = self . check_warnings ( ) if warnings : watcher . show_warning ( warnings )
Download the contents of the specified project name or id to dest_directory .
36,894
def make_local_directories ( self ) : for remote_path in self . _get_parent_remote_paths ( ) : local_path = os . path . join ( self . dest_directory , remote_path ) self . _assure_dir_exists ( local_path )
Create directories necessary to download the files into dest_directory
36,895
def make_big_empty_files ( self ) : for file_url in self . file_urls : local_path = file_url . get_local_path ( self . dest_directory ) with open ( local_path , "wb" ) as outfile : if file_url . size > 0 : outfile . seek ( int ( file_url . size ) - 1 ) outfile . write ( b'\0' )
Write out a empty file so the workers can seek to where they should write and write their data .
36,896
def check_downloaded_files_sizes ( self ) : for file_url in self . file_urls : local_path = file_url . get_local_path ( self . dest_directory ) self . check_file_size ( file_url . size , local_path )
Make sure the files sizes are correct . Since we manually create the files this will only catch overruns . Raises ValueError if there is a problematic file .
36,897
def _verify_download_complete ( self ) : if self . actual_bytes_read > self . bytes_to_read : raise TooLargeChunkDownloadError ( self . actual_bytes_read , self . bytes_to_read , self . local_path ) elif self . actual_bytes_read < self . bytes_to_read : raise PartialChunkDownloadError ( self . actual_bytes_read , self . bytes_to_read , self . local_path )
Make sure we received all the data
36,898
def optimize ( self , timeSeries , forecastingMethods = None , startingPercentage = 0.0 , endPercentage = 100.0 ) : if forecastingMethods is None or len ( forecastingMethods ) == 0 : raise ValueError ( "forecastingMethods cannot be empty." ) self . _startingPercentage = startingPercentage self . _endPercentage = endPercentage results = [ ] for forecastingMethod in forecastingMethods : results . append ( [ forecastingMethod ] + self . optimize_forecasting_method ( timeSeries , forecastingMethod ) ) bestForecastingMethod = min ( results , key = lambda item : item [ 1 ] . get_error ( self . _startingPercentage , self . _endPercentage ) ) for parameter in bestForecastingMethod [ 2 ] : bestForecastingMethod [ 0 ] . set_parameter ( parameter , bestForecastingMethod [ 2 ] [ parameter ] ) return bestForecastingMethod
Runs the optimization of the given TimeSeries .
36,899
def _generate_next_parameter_value ( self , parameter , forecastingMethod ) : interval = forecastingMethod . get_interval ( parameter ) precision = 10 ** self . _precison startValue = interval [ 0 ] endValue = interval [ 1 ] if not interval [ 2 ] : startValue += precision if interval [ 3 ] : endValue += precision while startValue < endValue : parameterValue = startValue yield parameterValue startValue += precision
Generator for a specific parameter of the given forecasting method .