signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def compute_index ( self , axis , data_object , compute_diff = True ) : """Computes the index after a number of rows have been removed . Note : In order for this to be used properly , the indexes must not be changed before you compute this . Args : axis : The axis to extract the index from . data _ object : The new data object to extract the index from . compute _ diff : True to use ` self ` to compute the index from self rather than data _ object . This is used when the dimension of the index may have changed , but the deleted rows / columns are unknown . Returns : A new pandas . Index object ."""
def pandas_index_extraction ( df , axis ) : if not axis : return df . index else : try : return df . columns except AttributeError : return pandas . Index ( [ ] ) index_obj = self . index if not axis else self . columns old_blocks = self . data if compute_diff else None new_indices = data_object . get_indices ( axis = axis , index_func = lambda df : pandas_index_extraction ( df , axis ) , old_blocks = old_blocks , ) return index_obj [ new_indices ] if compute_diff else new_indices
def nameop_put_collision ( cls , collisions , nameop ) : """Record a nameop as collided with another nameop in this block ."""
# these are supposed to have been put here by nameop _ set _ collided history_id_key = nameop . get ( '__collided_history_id_key__' , None ) history_id = nameop . get ( '__collided_history_id__' , None ) try : assert cls . nameop_is_collided ( nameop ) , "Nameop not collided" assert history_id_key is not None , "Nameop missing collision info" assert history_id is not None , "Nameop missing collision info" except Exception , e : log . exception ( e ) log . error ( "FATAL: BUG: bad collision info" ) os . abort ( ) if not collisions . has_key ( history_id_key ) : collisions [ history_id_key ] = [ history_id ] else : collisions [ history_id_key ] . append ( history_id )
def rm_docs ( self ) : """Remove converted docs ."""
for filename in self . created : if os . path . exists ( filename ) : os . unlink ( filename )
def parse_args ( self , argv ) : """parse arguments / options : param argv : argument list to parse , usually ` ` sys . argv [ 1 : ] ` ` : type argv : list : returns : parsed arguments : rtype : : py : class : ` argparse . Namespace `"""
desc = 'Report on AWS service limits and usage via boto3, optionally ' 'warn about any services with usage nearing or exceeding their' ' limits. For further help, see ' '<http://awslimitchecker.readthedocs.org/>' # # # # # # IMPORTANT license notice # # # # # # Pursuant to Sections 5 ( b ) and 13 of the GNU Affero General Public # License , version 3 , this notice MUST NOT be removed , and MUST be # displayed to ALL USERS of this software , even if they interact with # it remotely over a network . # See the " Development " section of the awslimitchecker documentation # ( docs / source / development . rst or # < http : / / awslimitchecker . readthedocs . org / en / latest / development . html > ) # for further information . # # # # # # IMPORTANT license notice # # # # # epilog = 'awslimitchecker is AGPLv3-licensed Free Software. Anyone ' 'using this program, even remotely over a network, is ' 'entitled to a copy of the source code. Use `--version` for ' 'information on the source code location.' p = argparse . ArgumentParser ( description = desc , epilog = epilog ) p . add_argument ( '-S' , '--service' , action = 'store' , nargs = '*' , help = 'perform action for only the specified service name' '; see -s|--list-services for valid names' ) p . add_argument ( '--skip-service' , action = 'append' , default = [ ] , dest = 'skip_service' , help = 'avoid performing actions for the specified service' ' name; see -s|--list-services for valid names' ) p . add_argument ( '--skip-check' , action = 'append' , default = [ ] , dest = 'skip_check' , help = 'avoid performing actions for the specified check' ' name' ) p . add_argument ( '-s' , '--list-services' , action = 'store_true' , default = False , help = 'print a list of all AWS service types that ' 'awslimitchecker knows how to check' ) p . add_argument ( '-l' , '--list-limits' , action = 'store_true' , default = False , help = 'print all AWS effective limits in "service_name/' 'limit_name" format' ) p . add_argument ( '--list-defaults' , action = 'store_true' , default = False , help = 'print all AWS default limits in "service_name/' 'limit_name" format' ) p . add_argument ( '-L' , '--limit' , action = StoreKeyValuePair , help = 'override a single AWS limit, specified in ' '"service_name/limit_name=value" format; can be ' 'specified multiple times.' ) p . add_argument ( '-u' , '--show-usage' , action = 'store_true' , default = False , help = 'find and print the current usage of all AWS ' 'services with known limits' ) p . add_argument ( '--iam-policy' , action = 'store_true' , default = False , help = 'output a JSON serialized IAM Policy ' 'listing the required permissions for ' 'awslimitchecker to run correctly.' ) p . add_argument ( '-W' , '--warning-threshold' , action = 'store' , type = int , default = 80 , help = 'default warning threshold (percentage of ' 'limit); default: 80' ) p . add_argument ( '-C' , '--critical-threshold' , action = 'store' , type = int , default = 99 , help = 'default critical threshold (percentage of ' 'limit); default: 99' ) p . add_argument ( '-P' , '--profile' , action = 'store' , dest = 'profile_name' , type = str , default = None , help = 'Name of profile in the AWS cross-sdk credentials ' 'file to use credentials from; similar to the ' 'corresponding awscli option' ) p . add_argument ( '-A' , '--sts-account-id' , action = 'store' , type = str , default = None , help = 'for use with STS, the Account ID of the ' 'destination account (account to assume a role in)' ) p . add_argument ( '-R' , '--sts-account-role' , action = 'store' , type = str , default = None , help = 'for use with STS, the name of the IAM role to ' 'assume' ) p . add_argument ( '-E' , '--external-id' , action = 'store' , type = str , default = None , help = 'External ID to use when assuming ' 'a role via STS' ) p . add_argument ( '-M' , '--mfa-serial-number' , action = 'store' , type = str , default = None , help = 'MFA Serial Number to use when ' 'assuming a role via STS' ) p . add_argument ( '-T' , '--mfa-token' , action = 'store' , type = str , default = None , help = 'MFA Token to use when assuming ' 'a role via STS' ) p . add_argument ( '-r' , '--region' , action = 'store' , type = str , default = None , help = 'AWS region name to connect to; required for STS' ) p . add_argument ( '--skip-ta' , action = 'store_true' , default = False , help = 'do not attempt to pull *any* information on limits' ' from Trusted Advisor' ) g = p . add_mutually_exclusive_group ( ) g . add_argument ( '--ta-refresh-wait' , dest = 'ta_refresh_wait' , action = 'store_true' , default = False , help = 'If applicable, refresh all Trusted Advisor ' 'limit-related checks, and wait for the refresh to' ' complete before continuing.' ) g . add_argument ( '--ta-refresh-trigger' , dest = 'ta_refresh_trigger' , action = 'store_true' , default = False , help = 'If applicable, trigger refreshes for all Trusted ' 'Advisor limit-related checks, but do not wait for ' 'them to finish refreshing; trigger the refresh ' 'and continue on (useful to ensure checks are ' 'refreshed before the next scheduled run).' ) g . add_argument ( '--ta-refresh-older' , dest = 'ta_refresh_older' , action = 'store' , type = int , default = None , help = 'If applicable, trigger refreshes for all Trusted ' 'Advisor limit-related checks with results more ' 'than this number of seconds old. Wait for the ' 'refresh to complete before continuing.' ) p . add_argument ( '--ta-refresh-timeout' , dest = 'ta_refresh_timeout' , type = int , action = 'store' , default = None , help = 'If waiting for TA checks to refresh, wait up to ' 'this number of seconds before continuing on ' 'anyway.' ) p . add_argument ( '--no-color' , action = 'store_true' , default = False , help = 'do not colorize output' ) p . add_argument ( '--no-check-version' , action = 'store_false' , default = True , dest = 'check_version' , help = 'do not check latest version at startup' ) p . add_argument ( '-v' , '--verbose' , dest = 'verbose' , action = 'count' , default = 0 , help = 'verbose output. specify twice for debug-level ' 'output.' ) p . add_argument ( '-V' , '--version' , dest = 'version' , action = 'store_true' , default = False , help = 'print version number and exit.' ) args = p . parse_args ( argv ) args . ta_refresh_mode = None if args . ta_refresh_wait : args . ta_refresh_mode = 'wait' elif args . ta_refresh_trigger : args . ta_refresh_mode = 'trigger' elif args . ta_refresh_older is not None : args . ta_refresh_mode = args . ta_refresh_older return args
def camel_case ( string ) : """Converts a string to camel case . For example : : camel _ case ( ' one _ two _ three ' ) - > ' oneTwoThree '"""
if not string : return string parts = snake_case ( string ) . split ( '_' ) rv = '' while parts : part = parts . pop ( 0 ) rv += part or '_' if part : break return rv + '' . join ( x . title ( ) for x in parts )
def over ( self , window ) : """Add a window clause to be applied to downstream analytic expressions"""
return GroupedTableExpr ( self . table , self . by , having = self . _having , order_by = self . _order_by , window = window , )
def add_empty_dataset ( self , value = 1 ) : """Create an empty data set . Empty means : all elements have the same value . Parameters value : float , optional which value to assign all element parameters . Default is one ."""
subdata = np . ones ( self . grid . nr_of_elements ) * value pid = self . add_data ( subdata ) return pid
def on_focusout ( self , event , a ) : """function that gets called whenever anywhere except entry is clicked"""
if event . widget . get ( ) == '' : event . widget . insert ( 0 , default_text [ a ] ) event . widget . config ( fg = 'grey' )
def _primary_input ( self , index ) : '''Primary input for the zkproof'''
primary_input = z . ZcashByteData ( ) primary_input += self . tx_joinsplits [ index ] . anchor primary_input += self . tx_joinsplits [ index ] . nullifiers primary_input += self . tx_joinsplits [ index ] . commitments primary_input += self . tx_joinsplits [ index ] . vpub_old primary_input += self . tx_joinsplits [ index ] . vpub_new primary_input += self . hsigs [ index ] primary_input += self . tx_joinsplits [ index ] . vmacs return primary_input . to_bytes ( )
def choose_labels ( alternatives ) : """Prompt the user select several labels from the provided alternatives . At least one label must be selected . : param list alternatives : Sequence of options that are available to select from : return : Several selected labels"""
if not alternatives : raise ValueError if not isinstance ( alternatives , list ) : raise TypeError choice_map = OrderedDict ( ( '{}' . format ( i ) , value ) for i , value in enumerate ( alternatives , 1 ) ) # prepend a termination option input_terminator = '0' choice_map . update ( { input_terminator : '<done>' } ) choice_map . move_to_end ( '0' , last = False ) choice_indexes = choice_map . keys ( ) choice_lines = [ '{} - {}' . format ( * c ) for c in choice_map . items ( ) ] prompt = '\n' . join ( ( 'Select labels:' , '\n' . join ( choice_lines ) , 'Choose from {}' . format ( ', ' . join ( choice_indexes ) ) , ) ) user_choices = set ( ) user_choice = None while not user_choice == input_terminator : if user_choices : note ( 'Selected labels: [{}]' . format ( ', ' . join ( user_choices ) ) ) user_choice = click . prompt ( prompt , type = click . Choice ( choice_indexes ) , default = input_terminator ) done = user_choice == input_terminator new_selection = user_choice not in user_choices nothing_selected = not user_choices if not done and new_selection : user_choices . add ( choice_map [ user_choice ] ) if done and nothing_selected : error ( 'Please select at least one label' ) user_choice = None return user_choices
def get_dedicated_package ( self , ha_enabled = False ) : """Retrieves the dedicated firewall package . : param bool ha _ enabled : True if HA is to be enabled on the firewall False for No HA : returns : A dictionary containing the dedicated virtual server firewall package"""
fwl_filter = 'Hardware Firewall (Dedicated)' ha_fwl_filter = 'Hardware Firewall (High Availability)' _filter = utils . NestedDict ( { } ) if ha_enabled : _filter [ 'items' ] [ 'description' ] = utils . query_filter ( ha_fwl_filter ) else : _filter [ 'items' ] [ 'description' ] = utils . query_filter ( fwl_filter ) return self . prod_pkg . getItems ( id = 0 , filter = _filter . to_dict ( ) )
def addAccount ( self , username , domain , password , avatars = None , protocol = u'email' , disabled = 0 , internal = False , verified = True ) : """Create a user account , add it to this LoginBase , and return it . This method must be called within a transaction in my store . @ param username : the user ' s name . @ param domain : the domain part of the user ' s name [ XXX TODO : this really ought to say something about whether it ' s a Q2Q domain , a SIP domain , an HTTP realm , or an email address domain - right now the assumption is generally that it ' s an email address domain , but not always ] @ param password : A shared secret . @ param avatars : ( Optional ) . A SubStore which , if passed , will be used by cred as the target of all adaptations for this user . By default , I will create a SubStore , and plugins can be installed on that substore using the powerUp method to provide implementations of cred client interfaces . @ raise DuplicateUniqueItem : if the ' avatars ' argument already contains a LoginAccount . @ return : an instance of a LoginAccount , with all attributes filled out as they are passed in , stored in my store ."""
# unicode ( None ) = = u ' None ' , kids . if username is not None : username = unicode ( username ) if domain is not None : domain = unicode ( domain ) if password is not None : password = unicode ( password ) if self . accountByAddress ( username , domain ) is not None : raise DuplicateUser ( username , domain ) if avatars is None : avatars = self . makeAvatars ( domain , username ) subStore = avatars . open ( ) # create this unconditionally ; as the docstring says , we must be run # within a transaction , so if something goes wrong in the substore # transaction this item ' s creation will be reverted . . . la = LoginAccount ( store = self . store , password = password , avatars = avatars , disabled = disabled ) def createSubStoreAccountObjects ( ) : LoginAccount ( store = subStore , password = password , disabled = disabled , avatars = subStore ) la . addLoginMethod ( localpart = username , domain = domain , protocol = protocol , internal = internal , verified = verified ) subStore . transact ( createSubStoreAccountObjects ) return la
def create_account_invitation ( self , account_id , body , ** kwargs ) : # noqa : E501 """Create a user invitation . # noqa : E501 An endpoint for inviting a new or an existing user to join the account . * * Example usage : * * ` curl - X POST https : / / api . us - east - 1 . mbedcloud . com / v3 / accouns / { account - id } / user - invitations - d { \" email \" : \" myemail @ company . com \" } - H ' content - type : application / json ' - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass asynchronous = True > > > thread = api . create _ account _ invitation ( account _ id , body , asynchronous = True ) > > > result = thread . get ( ) : param asynchronous bool : param str account _ id : Account ID . ( required ) : param UserInvitationReq body : A user invitation object with attributes . ( required ) : return : UserInvitationResp If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'asynchronous' ) : return self . create_account_invitation_with_http_info ( account_id , body , ** kwargs ) # noqa : E501 else : ( data ) = self . create_account_invitation_with_http_info ( account_id , body , ** kwargs ) # noqa : E501 return data
def remove_credentials ( self , credIDs ) : """Removes a credential ID from the database"""
for credID in credIDs : cur = self . conn . cursor ( ) cur . execute ( "DELETE FROM users WHERE id=?" , [ credID ] ) cur . close ( )
def getfile ( self , project_id , file_path , ref ) : """Allows you to receive information about file in repository like name , size , content . Note that file content is Base64 encoded . : param project _ id : project _ id : param file _ path : Full path to file . Ex . lib / class . rb : param ref : The name of branch , tag or commit : return :"""
data = { 'file_path' : file_path , 'ref' : ref } request = requests . get ( '{0}/{1}/repository/files' . format ( self . projects_url , project_id ) , headers = self . headers , data = data , verify = self . verify_ssl , auth = self . auth , timeout = self . timeout ) if request . status_code == 200 : return request . json ( ) else : return False
def get_system_info ( self , x = 255 , y = 255 ) : """Discover the integrity and resource availability of a whole SpiNNaker system . This command performs : py : meth : ` . get _ chip _ info ` on all working chips in the system returning an enhanced : py : class : ` dict ` ( : py : class : ` . SystemInfo ` ) containing a look - up from chip coordinate to : py : class : ` . ChipInfo ` . In addition to standard dictionary functionality , : py : class : ` . SystemInfo ` provides a number of convenience methods , which allow convenient iteration over various aspects of the information stored . . . note : : This method replaces the deprecated : py : meth : ` . get _ machine ` method . To build a : py : class : ` ~ rig . place _ and _ route . Machine ` for place - and - route purposes , the : py : func : ` rig . place _ and _ route . utils . build _ machine ` utility function may be used with : py : meth : ` . get _ system _ info ` like so : : > > from rig . place _ and _ route . utils import build _ machine > > sys _ info = mc . get _ system _ info ( ) > > machine = build _ machine ( sys _ info ) Parameters x : int y : int The coordinates of the chip from which system exploration should begin , by default ( 255 , 255 ) . Most users will not need to change these parameters . Returns : py : class : ` . SystemInfo ` An enhanced : py : class : ` dict ` object { ( x , y ) : : py : class : ` . ChipInfo ` , . . . } with a number of utility methods for accessing higher - level system information ."""
# A quick way of getting a list of working chips p2p_tables = self . get_p2p_routing_table ( x , y ) # Calculate the extent of the system max_x = max ( x_ for ( x_ , y_ ) , r in iteritems ( p2p_tables ) if r != consts . P2PTableEntry . none ) max_y = max ( y_ for ( x_ , y_ ) , r in iteritems ( p2p_tables ) if r != consts . P2PTableEntry . none ) sys_info = SystemInfo ( max_x + 1 , max_y + 1 ) for ( x , y ) , p2p_route in iteritems ( p2p_tables ) : if p2p_route != consts . P2PTableEntry . none : try : sys_info [ ( x , y ) ] = self . get_chip_info ( x , y ) except SCPError : # The chip was listed in the P2P table but is not # responding . Assume it is dead and don ' t include it in # the info returned . pass return sys_info
def solar_azimuth ( self , dateandtime , latitude , longitude ) : """Calculate the azimuth angle of the sun . : param dateandtime : The date and time for which to calculate the angle . : type dateandtime : : class : ` ~ datetime . datetime ` : param latitude : Latitude - Northern latitudes should be positive : type latitude : float : param longitude : Longitude - Eastern longitudes should be positive : type longitude : float : return : The azimuth angle in degrees clockwise from North . : rtype : float If ` dateandtime ` is a naive Python datetime then it is assumed to be in the UTC timezone ."""
if latitude > 89.8 : latitude = 89.8 if latitude < - 89.8 : latitude = - 89.8 if dateandtime . tzinfo is None : zone = 0 utc_datetime = dateandtime else : zone = - dateandtime . utcoffset ( ) . total_seconds ( ) / 3600.0 utc_datetime = dateandtime . astimezone ( pytz . utc ) timenow = ( utc_datetime . hour + ( utc_datetime . minute / 60.0 ) + ( utc_datetime . second / 3600.0 ) ) JD = self . _julianday ( dateandtime ) t = self . _jday_to_jcentury ( JD + timenow / 24.0 ) theta = self . _sun_declination ( t ) eqtime = self . _eq_of_time ( t ) solarDec = theta # in degrees solarTimeFix = eqtime - ( 4.0 * - longitude ) + ( 60 * zone ) trueSolarTime = ( dateandtime . hour * 60.0 + dateandtime . minute + dateandtime . second / 60.0 + solarTimeFix ) # in minutes while trueSolarTime > 1440 : trueSolarTime = trueSolarTime - 1440 hourangle = trueSolarTime / 4.0 - 180.0 # Thanks to Louis Schwarzmayr for the next line : if hourangle < - 180 : hourangle = hourangle + 360.0 harad = radians ( hourangle ) csz = sin ( radians ( latitude ) ) * sin ( radians ( solarDec ) ) + cos ( radians ( latitude ) ) * cos ( radians ( solarDec ) ) * cos ( harad ) if csz > 1.0 : csz = 1.0 elif csz < - 1.0 : csz = - 1.0 zenith = degrees ( acos ( csz ) ) azDenom = cos ( radians ( latitude ) ) * sin ( radians ( zenith ) ) if abs ( azDenom ) > 0.001 : azRad = ( ( sin ( radians ( latitude ) ) * cos ( radians ( zenith ) ) ) - sin ( radians ( solarDec ) ) ) / azDenom if abs ( azRad ) > 1.0 : if azRad < 0 : azRad = - 1.0 else : azRad = 1.0 azimuth = 180.0 - degrees ( acos ( azRad ) ) if hourangle > 0.0 : azimuth = - azimuth else : if latitude > 0.0 : azimuth = 180.0 else : azimuth = 0.0 if azimuth < 0.0 : azimuth = azimuth + 360.0 return azimuth
def ParseBookmarkRow ( self , parser_mediator , query , row , ** unused_kwargs ) : """Parses a bookmark row . Args : parser _ mediator ( ParserMediator ) : mediates interactions between parsers and other components , such as storage and dfvfs . query ( str ) : query that created the row . row ( sqlite3 . Row ) : row ."""
query_hash = hash ( query ) rev_host = self . _GetRowValue ( query_hash , row , 'rev_host' ) bookmark_type = self . _GetRowValue ( query_hash , row , 'type' ) event_data = FirefoxPlacesBookmarkEventData ( ) event_data . host = rev_host or 'N/A' event_data . offset = self . _GetRowValue ( query_hash , row , 'id' ) event_data . places_title = self . _GetRowValue ( query_hash , row , 'places_title' ) event_data . query = query event_data . title = self . _GetRowValue ( query_hash , row , 'bookmark_title' ) event_data . type = self . _BOOKMARK_TYPES . get ( bookmark_type , 'N/A' ) event_data . url = self . _GetRowValue ( query_hash , row , 'url' ) event_data . visit_count = self . _GetRowValue ( query_hash , row , 'visit_count' ) timestamp = self . _GetRowValue ( query_hash , row , 'dateAdded' ) if timestamp : date_time = dfdatetime_posix_time . PosixTimeInMicroseconds ( timestamp = timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_ADDED ) parser_mediator . ProduceEventWithEventData ( event , event_data ) timestamp = self . _GetRowValue ( query_hash , row , 'lastModified' ) if timestamp : date_time = dfdatetime_posix_time . PosixTimeInMicroseconds ( timestamp = timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_MODIFICATION ) parser_mediator . ProduceEventWithEventData ( event , event_data )
def pack_args_by_32 ( holder , maxlen , arg , typ , context , placeholder , dynamic_offset_counter = None , datamem_start = None , zero_pad_i = None , pos = None ) : """Copy necessary variables to pre - allocated memory section . : param holder : Complete holder for all args : param maxlen : Total length in bytes of the full arg section ( static + dynamic ) . : param arg : Current arg to pack : param context : Context of arg : param placeholder : Static placeholder for static argument part . : param dynamic _ offset _ counter : position counter stored in static args . : param dynamic _ placeholder : pointer to current position in memory to write dynamic values to . : param datamem _ start : position where the whole datemem section starts ."""
if isinstance ( typ , BaseType ) : if isinstance ( arg , LLLnode ) : value = unwrap_location ( arg ) else : value = parse_expr ( arg , context ) value = base_type_conversion ( value , value . typ , typ , pos ) holder . append ( LLLnode . from_list ( [ 'mstore' , placeholder , value ] , typ = typ , location = 'memory' ) ) elif isinstance ( typ , ByteArrayLike ) : if isinstance ( arg , LLLnode ) : # Is prealloacted variable . source_lll = arg else : source_lll = parse_expr ( arg , context ) # Set static offset , in arg slot . holder . append ( LLLnode . from_list ( [ 'mstore' , placeholder , [ 'mload' , dynamic_offset_counter ] ] ) ) # Get the biginning to write the ByteArray to . dest_placeholder = LLLnode . from_list ( [ 'add' , datamem_start , [ 'mload' , dynamic_offset_counter ] ] , typ = typ , location = 'memory' , annotation = "pack_args_by_32:dest_placeholder" ) copier = make_byte_array_copier ( dest_placeholder , source_lll , pos = pos ) holder . append ( copier ) # Add zero padding . new_maxlen = ceil32 ( source_lll . typ . maxlen ) holder . append ( [ 'with' , '_ceil32_end' , [ 'ceil32' , [ 'mload' , dest_placeholder ] ] , [ 'seq' , [ 'with' , '_bytearray_loc' , dest_placeholder , [ 'seq' , [ 'repeat' , zero_pad_i , [ 'mload' , '_bytearray_loc' ] , new_maxlen , [ 'seq' , # stay within allocated bounds [ 'if' , [ 'ge' , [ 'mload' , zero_pad_i ] , '_ceil32_end' ] , 'break' ] , [ 'mstore8' , [ 'add' , [ 'add' , '_bytearray_loc' , 32 ] , [ 'mload' , zero_pad_i ] ] , 0 , ] , ] ] , ] ] , ] ] ) # Increment offset counter . increment_counter = LLLnode . from_list ( [ 'mstore' , dynamic_offset_counter , [ 'add' , [ 'add' , [ 'mload' , dynamic_offset_counter ] , [ 'ceil32' , [ 'mload' , dest_placeholder ] ] ] , 32 , ] , ] , annotation = 'Increment dynamic offset counter' ) holder . append ( increment_counter ) elif isinstance ( typ , ListType ) : maxlen += ( typ . count - 1 ) * 32 typ = typ . subtype def check_list_type_match ( provided ) : # Check list types match . if provided != typ : raise TypeMismatchException ( "Log list type '%s' does not match provided, expected '%s'" % ( provided , typ ) ) # List from storage if isinstance ( arg , ast . Attribute ) and arg . value . id == 'self' : stor_list = context . globals [ arg . attr ] check_list_type_match ( stor_list . typ . subtype ) size = stor_list . typ . count mem_offset = 0 for i in range ( 0 , size ) : storage_offset = i arg2 = LLLnode . from_list ( [ 'sload' , [ 'add' , [ 'sha3_32' , Expr ( arg , context ) . lll_node ] , storage_offset ] ] , typ = typ , ) holder , maxlen = pack_args_by_32 ( holder , maxlen , arg2 , typ , context , placeholder + mem_offset , pos = pos , ) mem_offset += get_size_of_type ( typ ) * 32 # List from variable . elif isinstance ( arg , ast . Name ) : size = context . vars [ arg . id ] . size pos = context . vars [ arg . id ] . pos check_list_type_match ( context . vars [ arg . id ] . typ . subtype ) mem_offset = 0 for _ in range ( 0 , size ) : arg2 = LLLnode . from_list ( pos + mem_offset , typ = typ , location = 'memory' ) holder , maxlen = pack_args_by_32 ( holder , maxlen , arg2 , typ , context , placeholder + mem_offset , pos = pos , ) mem_offset += get_size_of_type ( typ ) * 32 # List from list literal . else : mem_offset = 0 for arg2 in arg . elts : holder , maxlen = pack_args_by_32 ( holder , maxlen , arg2 , typ , context , placeholder + mem_offset , pos = pos , ) mem_offset += get_size_of_type ( typ ) * 32 return holder , maxlen
def escape_attr ( s ) : '''escape the given string such that it can be placed in an XML attribute , like : < foo bar = ' $ value ' > Args : s ( str ) : the string to escape . Returns : str : the escaped string .'''
esc = xml . sax . saxutils . quoteattr ( s ) esc = esc . encode ( 'ascii' , 'xmlcharrefreplace' ) . decode ( 'ascii' ) esc = RESTRICTED_CHARS . sub ( '' , esc ) return esc
def create ( self , typ , data , return_response = False ) : """Create new type Valid arguments : skip : number of records to skip limit : number of records to limit request to"""
res = self . _request ( typ , method = 'POST' , data = data ) if res . status_code != 201 : try : data = res . json ( ) self . _throw ( res , data ) except ValueError as e : if not isinstance ( e , InvalidRequestException ) : self . _throw ( res , { } ) else : raise loc = res . headers . get ( "location" , None ) if loc and loc . startswith ( '/' ) : return self . _load ( self . _request ( None , url = self . url_update ( path = loc ) ) ) if return_response : return res . json ( ) return self . _load ( self . _request ( None , url = loc ) )
def add_output_arg ( self , out ) : """Add an output as an argument"""
self . add_arg ( out . _dax_repr ( ) ) self . _add_output ( out )
def firstPass ( ASTs , verbose ) : '''Return a dictionary of function definition nodes , a dictionary of imported object names and a dictionary of imported module names . All three dictionaries use source file paths as keys .'''
fdefs = dict ( ) cdefs = dict ( ) imp_obj_strs = dict ( ) imp_mods = dict ( ) for ( root , path ) in ASTs : fdefs [ path ] = [ ] fdefs [ path ] . append ( formatBodyNode ( root , path ) ) imp_obj_strs [ path ] = [ ] imp_mods [ path ] = [ ] cdefs [ path ] = [ ] for ( node , stack ) in traversal ( root ) : if isinstance ( node , ast . FunctionDef ) : fdefs [ path ] . append ( formatFunctionNode ( node , path , stack ) ) elif isinstance ( node , ast . ImportFrom ) : module = ia . getImportFromModule ( node , path , verbose ) if module : fn_names = ia . getImportFromObjects ( node ) for fn_name in fn_names : imp_obj_strs [ path ] . append ( ( module , fn_name ) ) else : if verbose : print ( "No module found " + ast . dump ( node ) ) elif isinstance ( node , ast . Import ) : module = ia . getImportModule ( node , path , verbose ) imp_mods [ path ] . append ( module ) elif isinstance ( node , ast . ClassDef ) : node . path = path cdefs [ path ] . append ( node ) return fdefs , imp_obj_strs , imp_mods , cdefs
def _build_prior ( self , unconstrained_tensor , constrained_tensor ) : """Build a tensorflow representation of the prior density . The log Jacobian is included ."""
if not misc . is_tensor ( unconstrained_tensor ) : raise GPflowError ( "Unconstrained input must be a tensor." ) if not misc . is_tensor ( constrained_tensor ) : raise GPflowError ( "Constrained input must be a tensor." ) prior_name = 'prior' if self . prior is None : return tf . constant ( 0.0 , settings . float_type , name = prior_name ) log_jacobian = self . transform . log_jacobian_tensor ( unconstrained_tensor ) logp_var = self . prior . logp ( constrained_tensor ) return tf . squeeze ( tf . add ( logp_var , log_jacobian , name = prior_name ) )
def update_file ( self , path ) : '''Updates the file watcher and calls the appropriate method for results @ return : False if we need to keep trying the connection'''
try : # grab the file result , stat = self . zoo_client . get ( path , watch = self . watch_file ) except ZookeeperError : self . set_valid ( False ) self . call_error ( self . INVALID_GET ) return False if self . pointer : if result is not None and len ( result ) > 0 : self . pointed_at_expired = False # file is a pointer , go update and watch other file self . point_path = result if self . compare_pointer ( result ) : self . update_pointed ( ) else : self . pointed_at_expired = True self . old_pointed = '' self . old_data = '' self . set_valid ( False ) self . call_error ( self . INVALID_PATH ) else : # file is not a pointer , return contents if self . compare_data ( result ) : self . call_config ( result ) self . set_valid ( True ) return True
def is_valid ( self , instance ) : '''Perform validation for * instance * and stores serialized data , indexes and errors into local cache . Return ` ` True ` ` if the instance is ready to be saved to database .'''
dbdata = instance . dbdata data = dbdata [ 'cleaned_data' ] = { } errors = dbdata [ 'errors' ] = { } # Loop over scalar fields first for field , value in instance . fieldvalue_pairs ( ) : name = field . attname try : svalue = field . set_get_value ( instance , value ) except Exception as e : errors [ name ] = str ( e ) else : if ( svalue is None or svalue is '' ) and field . required : errors [ name ] = ( "Field '{0}' is required for '{1}'." . format ( name , self ) ) else : if isinstance ( svalue , dict ) : data . update ( svalue ) elif svalue is not None : data [ name ] = svalue return len ( errors ) == 0
def modpath_last ( module , entry_point ) : """Provides the raw _ _ path _ _ . Incompatible with PEP 302 - based import hooks and incompatible with zip _ safe packages . Deprecated . Will be removed by calmjs - 4.0."""
module_paths = modpath_all ( module , entry_point ) if len ( module_paths ) > 1 : logger . info ( "module '%s' has multiple paths, default selecting '%s' as base." , module . __name__ , module_paths [ - 1 ] , ) return module_paths [ - 1 : ]
def read ( self , length = None ) : """Fill bytes and read some number of bytes ( up to length if specified ) < = length bytes may be read if reached the end of input if at buffer boundary , will attempt to read again until specified length is read"""
all_buffs = [ ] while length is None or length > 0 : self . _fillbuff ( ) if self . empty ( ) : break buff = self . buff . read ( length ) all_buffs . append ( buff ) if length : length -= len ( buff ) return b'' . join ( all_buffs )
def from_dict ( data , ctx ) : """Instantiate a new AccountChangesState from a dict ( generally from loading a JSON response ) . The data used to instantiate the AccountChangesState is a shallow copy of the dict passed in , with any complex child types instantiated appropriately ."""
data = data . copy ( ) if data . get ( 'unrealizedPL' ) is not None : data [ 'unrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'unrealizedPL' ) ) if data . get ( 'NAV' ) is not None : data [ 'NAV' ] = ctx . convert_decimal_number ( data . get ( 'NAV' ) ) if data . get ( 'marginUsed' ) is not None : data [ 'marginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginUsed' ) ) if data . get ( 'marginAvailable' ) is not None : data [ 'marginAvailable' ] = ctx . convert_decimal_number ( data . get ( 'marginAvailable' ) ) if data . get ( 'positionValue' ) is not None : data [ 'positionValue' ] = ctx . convert_decimal_number ( data . get ( 'positionValue' ) ) if data . get ( 'marginCloseoutUnrealizedPL' ) is not None : data [ 'marginCloseoutUnrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutUnrealizedPL' ) ) if data . get ( 'marginCloseoutNAV' ) is not None : data [ 'marginCloseoutNAV' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutNAV' ) ) if data . get ( 'marginCloseoutMarginUsed' ) is not None : data [ 'marginCloseoutMarginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutMarginUsed' ) ) if data . get ( 'marginCloseoutPercent' ) is not None : data [ 'marginCloseoutPercent' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutPercent' ) ) if data . get ( 'marginCloseoutPositionValue' ) is not None : data [ 'marginCloseoutPositionValue' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutPositionValue' ) ) if data . get ( 'withdrawalLimit' ) is not None : data [ 'withdrawalLimit' ] = ctx . convert_decimal_number ( data . get ( 'withdrawalLimit' ) ) if data . get ( 'marginCallMarginUsed' ) is not None : data [ 'marginCallMarginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginCallMarginUsed' ) ) if data . get ( 'marginCallPercent' ) is not None : data [ 'marginCallPercent' ] = ctx . convert_decimal_number ( data . get ( 'marginCallPercent' ) ) if data . get ( 'orders' ) is not None : data [ 'orders' ] = [ ctx . order . DynamicOrderState . from_dict ( d , ctx ) for d in data . get ( 'orders' ) ] if data . get ( 'trades' ) is not None : data [ 'trades' ] = [ ctx . trade . CalculatedTradeState . from_dict ( d , ctx ) for d in data . get ( 'trades' ) ] if data . get ( 'positions' ) is not None : data [ 'positions' ] = [ ctx . position . CalculatedPositionState . from_dict ( d , ctx ) for d in data . get ( 'positions' ) ] return AccountChangesState ( ** data )
def main ( ) : """Entrypoint for compare _ layers"""
parser = argparse . ArgumentParser ( description = 'Tool for testing caffe to mxnet conversion layer by layer' ) parser . add_argument ( '--image_url' , type = str , default = 'https://github.com/dmlc/web-data/raw/master/mxnet/doc/' 'tutorials/python/predict_image/cat.jpg' , help = 'input image to test inference, can be either file path or url' ) parser . add_argument ( '--caffe_prototxt_path' , type = str , default = './model.prototxt' , help = 'path to caffe prototxt' ) parser . add_argument ( '--caffe_model_path' , type = str , default = './model.caffemodel' , help = 'path to caffe weights' ) parser . add_argument ( '--caffe_mean' , type = str , default = './model_mean.binaryproto' , help = 'path to caffe mean file' ) parser . add_argument ( '--mean_diff_allowed' , type = int , default = 1e-03 , help = 'mean difference allowed between caffe blob and mxnet blob' ) parser . add_argument ( '--max_diff_allowed' , type = int , default = 1e-01 , help = 'max difference allowed between caffe blob and mxnet blob' ) parser . add_argument ( '--gpu' , type = int , default = - 1 , help = 'the gpu id used for predict' ) args = parser . parse_args ( ) convert_and_compare_caffe_to_mxnet ( args . image_url , args . gpu , args . caffe_prototxt_path , args . caffe_model_path , args . caffe_mean , args . mean_diff_allowed , args . max_diff_allowed )
def is_superset_of ( self , other , soft_combinator = False ) : """Return True iff this selector matches the same elements as ` other ` , and perhaps others . That is , ` ` . foo ` ` is a superset of ` ` . foo . bar ` ` , because the latter is more specific . Set ` soft _ combinator ` true to ignore the specific case of this selector having a descendent combinator and ` other ` having anything else . This is for superset checking for ` ` @ extend ` ` , where a space combinator really means " none " ."""
# Combinators must match , OR be compatible - - space is a superset of > , # ~ is a superset of + if soft_combinator and self . combinator == ' ' : combinator_superset = True else : combinator_superset = ( self . combinator == other . combinator or ( self . combinator == ' ' and other . combinator == '>' ) or ( self . combinator == '~' and other . combinator == '+' ) ) return ( combinator_superset and set ( self . tokens ) <= set ( other . tokens ) )
def delete_max ( self ) : """Delete the right - most value from a tree ."""
# Attempt to rotate left - leaning reds to the right . if self . left . red : self = self . rotate_right ( ) # Base case : If there are no selfs greater than this self , then this is # the self to delete . if self . right is NULL : return NULL , self . value # Acquire more reds if necessary to continue the traversal . NULL is # red so this check doesn ' t need to check for NULL . if not self . right . red and not self . right . left . red : self = self . move_red_right ( ) # Recursive case : Delete the maximum self of all selfs greater than this # self . right , value = self . right . delete_max ( ) self = self . _replace ( right = right ) return self . balance ( ) , value
def RedirectDemo ( handler , t ) : """Demonstration of redirecting to another number ."""
# t . say ( " One moment please . " ) t . redirect ( SIP_PHONE ) json = t . RenderJson ( ) logging . info ( "RedirectDemo json: %s" % json ) handler . response . out . write ( json )
def predict_log_partial_hazard ( self , X ) : r"""This is equivalent to R ' s linear . predictors . Returns the log of the partial hazard for the individuals , partial since the baseline hazard is not included . Equal to : math : ` ( x - \ bar { x } ) ' \ beta ` Parameters X : numpy array or DataFrame a ( n , d ) covariate numpy array or DataFrame . If a DataFrame , columns can be in any order . If a numpy array , columns must be in the same order as the training data . Returns DataFrame Note If X is a DataFrame , the order of the columns do not matter . But if X is an array , then the column ordering is assumed to be the same as the training dataset ."""
if isinstance ( X , pd . DataFrame ) : order = self . hazards_ . index X = X [ order ] check_for_numeric_dtypes_or_raise ( X ) X = X . astype ( float ) index = _get_index ( X ) X = normalize ( X , self . _norm_mean . values , 1 ) return pd . DataFrame ( np . dot ( X , self . hazards_ ) , index = index )
def chunk_by ( fn : Callable [ [ T ] , object ] ) : """> > > from Redy . Collections import Traversal , Flow > > > lst : Iterable [ int ] = [ 0 , 1 , 2 , 3 , 4 , 5 , 6] > > > x = Flow ( lst ) [ Traversal . chunk _ by ( lambda x : x / / 3 ) ] > > > assert list ( x . unbox ) = = [ [ 0 , 1 , 2 ] , [ 3 , 4 , 5 ] , [ 6 ] ] > > > x = Flow ( [ ] ) [ Traversal . chunk _ by ( lambda x : x ) ] > > > assert list ( x . unbox ) = = [ ]"""
def inner ( seq : ActualIterable [ T ] ) -> ActualIterable [ ActualIterable [ T ] ] : seq = iter ( seq ) try : head = next ( seq ) except StopIteration : return iter ( seq ) current_status = fn ( head ) group = [ head ] for each in seq : status = fn ( each ) if status != current_status : yield group group = [ each ] else : group . append ( each ) current_status = status if group : yield group return inner
def on ( self , event , handler = None , namespace = None ) : """Register an event handler . : param event : The event name . It can be any string . The event names ` ` ' connect ' ` ` , ` ` ' message ' ` ` and ` ` ' disconnect ' ` ` are reserved and should not be used . : param handler : The function that should be invoked to handle the event . When this parameter is not given , the method acts as a decorator for the handler function . : param namespace : The Socket . IO namespace for the event . If this argument is omitted the handler is associated with the default namespace . Example usage : : # as a decorator : @ socket _ io . on ( ' connect ' , namespace = ' / chat ' ) def connect _ handler ( sid , environ ) : print ( ' Connection request ' ) if environ [ ' REMOTE _ ADDR ' ] in blacklisted : return False # reject # as a method : def message _ handler ( sid , msg ) : print ( ' Received message : ' , msg ) eio . send ( sid , ' response ' ) socket _ io . on ( ' message ' , namespace = ' / chat ' , message _ handler ) The handler function receives the ` ` sid ` ` ( session ID ) for the client as first argument . The ` ` ' connect ' ` ` event handler receives the WSGI environment as a second argument , and can return ` ` False ` ` to reject the connection . The ` ` ' message ' ` ` handler and handlers for custom event names receive the message payload as a second argument . Any values returned from a message handler will be passed to the client ' s acknowledgement callback function if it exists . The ` ` ' disconnect ' ` ` handler does not take a second argument ."""
namespace = namespace or '/' def set_handler ( handler ) : if namespace not in self . handlers : self . handlers [ namespace ] = { } self . handlers [ namespace ] [ event ] = handler return handler if handler is None : return set_handler set_handler ( handler )
def _validate_inputs ( self , inputdict ) : """Validate input links ."""
# Check inputdict try : parameters = inputdict . pop ( self . get_linkname ( 'parameters' ) ) except KeyError : raise InputValidationError ( "No parameters specified for this " "calculation" ) if not isinstance ( parameters , RipsDistanceMatrixParameters ) : raise InputValidationError ( "parameters not of type " "RipsDistanceMatrixParameters" ) # Check code try : code = inputdict . pop ( self . get_linkname ( 'code' ) ) except KeyError : raise InputValidationError ( "No code specified for this " "calculation" ) # Check input files try : distance_matrix = inputdict . pop ( self . get_linkname ( 'distance_matrix' ) ) if not isinstance ( distance_matrix , SinglefileData ) : raise InputValidationError ( "distance_matrix not of type SinglefileData" ) symlink = None except KeyError : distance_matrix = None try : remote_folder = inputdict . pop ( self . get_linkname ( 'remote_folder' ) ) if not isinstance ( remote_folder , RemoteData ) : raise InputValidationError ( "remote_folder is not of type RemoteData" ) comp_uuid = remote_folder . get_computer ( ) . uuid remote_path = remote_folder . get_remote_path ( ) symlink = ( comp_uuid , remote_path , self . _REMOTE_FOLDER_LINK ) except KeyError : raise InputValidationError ( "Need to provide either distance_matrix or remote_folder" ) # Check that nothing is left unparsed if inputdict : raise ValidationError ( "Unrecognized inputs: {}" . format ( inputdict ) ) return parameters , code , distance_matrix , symlink
def on_close ( self , stats , previous_stats ) : """Print the extended JSON report to reporter ' s output . : param dict stats : Metrics for the current pylint run : param dict previous _ stats : Metrics for the previous pylint run"""
reports = { 'messages' : self . _messages , 'stats' : stats , 'previous' : previous_stats , } print ( json . dumps ( reports , cls = JSONSetEncoder , indent = 4 ) , file = self . out )
def present ( name , clients = None , hosts = None , options = None , exports = '/etc/exports' ) : '''Ensure that the named export is present with the given options name The export path to configure clients A list of hosts and the options applied to them . This option may not be used in combination with the ' hosts ' or ' options ' shortcuts . . . code - block : : yaml - clients : # First export - hosts : ' 10.0.2.0/24' options : - ' rw ' # Second export - hosts : ' * . example . com ' options : - ' ro ' - ' subtree _ check ' hosts A string matching a number of hosts , for example : . . code - block : : yaml hosts : ' 10.0.2.123' hosts : ' 10.0.2.0/24' hosts : ' minion1 . example . com ' hosts : ' * . example . com ' hosts : ' * ' options A list of NFS options , for example : . . code - block : : yaml options : - ' rw ' - ' subtree _ check ' '''
path = name ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' } if not clients : if not hosts : ret [ 'result' ] = False ret [ 'comment' ] = 'Either \'clients\' or \'hosts\' must be defined' return ret # options being None is handled by add _ export ( ) clients = [ { 'hosts' : hosts , 'options' : options } ] old = __salt__ [ 'nfs3.list_exports' ] ( exports ) if path in old : if old [ path ] == clients : ret [ 'result' ] = True ret [ 'comment' ] = 'Export {0} already configured' . format ( path ) return ret ret [ 'changes' ] [ 'new' ] = clients ret [ 'changes' ] [ 'old' ] = old [ path ] if __opts__ [ 'test' ] : ret [ 'result' ] = None ret [ 'comment' ] = 'Export {0} would be changed' . format ( path ) return ret __salt__ [ 'nfs3.del_export' ] ( exports , path ) else : ret [ 'changes' ] [ 'old' ] = None ret [ 'changes' ] [ 'new' ] = clients if __opts__ [ 'test' ] : ret [ 'result' ] = None ret [ 'comment' ] = 'Export {0} would be added' . format ( path ) return ret add_export = __salt__ [ 'nfs3.add_export' ] for exp in clients : add_export ( exports , path , exp [ 'hosts' ] , exp [ 'options' ] ) ret [ 'changes' ] [ 'new' ] = clients try_reload = __salt__ [ 'nfs3.reload_exports' ] ( ) ret [ 'comment' ] = try_reload [ 'stderr' ] ret [ 'result' ] = try_reload [ 'result' ] return ret
def destroy ( name , call = None ) : '''Destroy a node . CLI Example : . . code - block : : bash salt - cloud - - destroy mymachine'''
if call == 'function' : raise SaltCloudSystemExit ( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__ [ 'cloud.fire_event' ] ( 'event' , 'destroying instance' , 'salt/cloud/{0}/destroying' . format ( name ) , args = { 'name' : name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) vmobj = _get_vm_by_name ( name ) if vmobj is not None : # stop the vm if get_vm_status ( vmid = vmobj [ 'vmid' ] ) [ 'status' ] != 'stopped' : stop ( name , vmobj [ 'vmid' ] , 'action' ) # wait until stopped if not wait_for_state ( vmobj [ 'vmid' ] , 'stopped' ) : return { 'Error' : 'Unable to stop {0}, command timed out' . format ( name ) } # required to wait a bit here , otherwise the VM is sometimes # still locked and destroy fails . time . sleep ( 3 ) query ( 'delete' , 'nodes/{0}/{1}' . format ( vmobj [ 'node' ] , vmobj [ 'id' ] ) ) __utils__ [ 'cloud.fire_event' ] ( 'event' , 'destroyed instance' , 'salt/cloud/{0}/destroyed' . format ( name ) , args = { 'name' : name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) if __opts__ . get ( 'update_cachedir' , False ) is True : __utils__ [ 'cloud.delete_minion_cachedir' ] ( name , __active_provider_name__ . split ( ':' ) [ 0 ] , __opts__ ) return { 'Destroyed' : '{0} was destroyed.' . format ( name ) }
def plot_predict ( self , h = 5 , past_values = 20 , intervals = True , ** kwargs ) : """Makes forecast with the estimated model Parameters h : int ( default : 5) How many steps ahead would you like to forecast ? past _ values : int ( default : 20) How many past observations to show on the forecast graph ? intervals : Boolean Would you like to show prediction intervals for the forecast ? Returns - Plot of the forecast"""
import matplotlib . pyplot as plt import seaborn as sns figsize = kwargs . get ( 'figsize' , ( 10 , 7 ) ) nsims = kwargs . get ( 'nsims' , 200 ) if self . latent_variables . estimated is False : raise Exception ( "No latent variables estimated!" ) else : # Retrieve data , dates and ( transformed ) latent variables if self . latent_variables . estimation_method in [ 'M-H' ] : lower_final = 0 upper_final = 0 plot_values_final = 0 date_index = self . shift_dates ( h ) plot_index = date_index [ - h - past_values : ] for i in range ( nsims ) : t_params = self . draw_latent_variables ( nsims = 1 ) . T [ 0 ] a , P = self . _forecast_model ( t_params , h ) plot_values = a [ 0 ] [ - h - past_values : ] forecasted_values = a [ 0 ] [ - h : ] lower = forecasted_values - 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( t_params [ 0 ] ) , 0.5 ) upper = forecasted_values + 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( t_params [ 0 ] ) , 0.5 ) lower_final += np . append ( plot_values [ - h - 1 ] , lower ) upper_final += np . append ( plot_values [ - h - 1 ] , upper ) plot_values_final += plot_values plot_values_final = plot_values_final / nsims lower_final = lower_final / nsims upper_final = upper_final / nsims plt . figure ( figsize = figsize ) if intervals == True : plt . fill_between ( date_index [ - h - 1 : ] , lower_final , upper_final , alpha = 0.2 ) plt . plot ( plot_index , plot_values_final ) plt . title ( "Forecast for " + self . data_name ) plt . xlabel ( "Time" ) plt . ylabel ( self . data_name ) plt . show ( ) else : a , P = self . _forecast_model ( self . latent_variables . get_z_values ( ) , h ) date_index = self . shift_dates ( h ) plot_values = a [ 0 ] [ - h - past_values : ] forecasted_values = a [ 0 ] [ - h : ] lower = forecasted_values - 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( self . latent_variables . get_z_values ( ) [ 0 ] ) , 0.5 ) upper = forecasted_values + 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( self . latent_variables . get_z_values ( ) [ 0 ] ) , 0.5 ) lower = np . append ( plot_values [ - h - 1 ] , lower ) upper = np . append ( plot_values [ - h - 1 ] , upper ) plot_index = date_index [ - h - past_values : ] plt . figure ( figsize = figsize ) if intervals == True : plt . fill_between ( date_index [ - h - 1 : ] , lower , upper , alpha = 0.2 ) plt . plot ( plot_index , plot_values ) plt . title ( "Forecast for " + self . data_name ) plt . xlabel ( "Time" ) plt . ylabel ( self . data_name ) plt . show ( )
def read ( self , len = 1024 , buffer = None ) : """Read data from connection Read up to len bytes and return them . Arguments : len - - maximum number of bytes to read Return value : string containing read bytes"""
try : return self . _wrap_socket_library_call ( lambda : SSL_read ( self . _ssl . value , len , buffer ) , ERR_READ_TIMEOUT ) except openssl_error ( ) as err : if err . ssl_error == SSL_ERROR_SYSCALL and err . result == - 1 : raise_ssl_error ( ERR_PORT_UNREACHABLE , err ) raise
def get_frame_list ( ) : """Create the list of frames"""
# TODO : use this function in IPS below ( less code duplication ) frame_info_list = [ ] frame_list = [ ] frame = inspect . currentframe ( ) while frame is not None : frame_list . append ( frame ) info = inspect . getframeinfo ( frame ) frame_info_list . append ( info ) frame = frame . f_back frame_info_list . reverse ( ) frame_list . reverse ( ) frame_info_str_list = [ format_frameinfo ( fi ) for fi in frame_info_list ] return frame_list , frame_info_list , frame_info_str_list
def getQueryEngineDescriptionResponse ( self , queryEngine , vendorSpecific = None , ** kwargs ) : """CNRead . getQueryEngineDescription ( session , queryEngine ) → QueryEngineDescription https : / / releases . dataone . org / online / api - documentation - v2.0.1 / apis / CN _ APIs . html # CNRead . getQueryEngineDescription MNQuery . getQueryEngineDescription ( session , queryEngine ) → QueryEngineDescription http : / / jenkins - 1 . dataone . org / jenkins / job / API % 20D ocumentation % 20 - % 20trunk / ws . / api - documentation / build / html / apis / MN _ APIs . h tml # MNQuery . getQueryEngineDescription . Args : queryEngine : * * kwargs : Returns :"""
return self . GET ( [ 'query' , queryEngine ] , query = kwargs , headers = vendorSpecific )
def Validate ( self , win ) : """Returns True if Value in digits , False otherwise"""
val = self . GetWindow ( ) . GetValue ( ) for x in val : if x not in string . digits : return False return True
def send_notification ( self , method , params ) : """Send a notification"""
msg = self . _encoder . create_notification ( method , params ) self . _send_message ( msg )
def cut_edges ( image , numPix ) : """cuts out the edges of a 2d image and returns re - sized image to numPix center is well defined for odd pixel sizes . : param image : 2d numpy array : param numPix : square size of cut out image : return : cutout image with size numPix"""
nx , ny = image . shape if nx < numPix or ny < numPix : print ( 'WARNING: image can not be resized, in routine cut_edges.' ) return image if nx % 2 == 0 or ny % 2 == 0 or numPix % 2 == 0 : # pass print ( "WARNING: image or cutout side are even number. This routine only works for odd numbers %s %s %s" % ( nx , ny , numPix ) ) cx = int ( ( nx - 1 ) / 2 ) cy = int ( ( ny - 1 ) / 2 ) d = int ( ( numPix - 1 ) / 2 ) if nx % 2 == 0 : cx += 1 if ny % 2 == 0 : cy += 1 resized = image [ cx - d : cx + d + 1 , cy - d : cy + d + 1 ] return copy . deepcopy ( resized )
def save_var ( self , key , value , ** kwargs ) : 'Save one variable to the database .'
# Check whether Highwall ' s variables table exists self . __check_or_create_vars_table ( ) column_type = get_column_type ( value ) tmp = quote ( self . __vars_table_tmp ) self . execute ( u'DROP TABLE IF EXISTS %s' % tmp , commit = False ) # This is vulnerable to injection self . execute ( u'CREATE TABLE %s (`value` %s)' % ( tmp , column_type ) , commit = False ) # This is ugly self . execute ( u'INSERT INTO %s (`value`) VALUES (?)' % tmp , [ value ] , commit = False ) table = ( quote ( self . __vars_table ) , tmp ) params = [ key , column_type ] self . execute ( u''' INSERT OR REPLACE INTO %s (`key`, `type`, `value`) SELECT ? AS key, ? AS type, value FROM %s ''' % table , params ) self . execute ( u'DROP TABLE %s' % tmp , commit = False ) self . __commit_if_necessary ( kwargs )
def _iter_channels ( framefile ) : """Yields the name and type of each channel in a GWF file TOC * * Requires : * * | LDAStools . frameCPP | _ Parameters framefile : ` str ` , ` LDAStools . frameCPP . IFrameFStream ` path of GWF file , or open file stream , to read"""
from LDAStools import frameCPP if not isinstance ( framefile , frameCPP . IFrameFStream ) : framefile = open_gwf ( framefile , 'r' ) toc = framefile . GetTOC ( ) for typename in ( 'Sim' , 'Proc' , 'ADC' ) : typen = typename . lower ( ) for name in getattr ( toc , 'Get{0}' . format ( typename ) ) ( ) : yield name , typen
def plot_places ( self ) : '''Plot places ( in the parameter space ) of all the generated artifacts and the artifacts accepted to the domain .'''
from matplotlib import pyplot as plt fig , ax = plt . subplots ( ) title = "Agent places, artifacts and env artifacts ({} env artifacts)" . format ( len ( self . artifacts ) ) x = [ ] y = [ ] for a in self . get_agents ( ) : args = a . arg_history x = x + [ e [ 0 ] for e in args ] y = y + [ e [ 1 ] for e in args ] sc = ax . scatter ( x , y , marker = '.' , color = ( 0 , 0 , 1 , 0.1 ) , label = 'agent place' ) x = [ ] y = [ ] for a in self . get_agents ( ) : arts = a . A for ar in arts : if ar . self_criticism == 'pass' : args = ar . framings [ ar . creator ] [ 'args' ] x . append ( args [ 0 ] ) y . append ( args [ 1 ] ) sc = ax . scatter ( x , y , marker = "x" , color = ( 0 , 0 , 1 , 0.3 ) , label = 'agent artifact' ) x = [ ] y = [ ] for a in self . artifacts : args = a . framings [ a . creator ] [ 'args' ] x . append ( args [ 0 ] ) y . append ( args [ 1 ] ) sc = ax . scatter ( x , y , marker = "x" , color = 'red' , label = 'env artifact' , s = 40 ) ax . set_xlim ( [ - 200 , 200 ] ) ax . set_ylim ( [ - 200 , 200 ] ) ax . set_xlabel ( 'r' ) ax . set_ylabel ( 'r_' ) ax . legend ( loc = 'center left' , bbox_to_anchor = ( 1 , 0.5 ) , fontsize = 10 ) ax . set_title ( title ) plt . tight_layout ( rect = ( 0 , 0 , 0.8 , 1 ) ) if self . logger is not None and self . logger . folder is not None : imname = os . path . join ( self . logger . folder , 'arts_a{}_i{}_v{}.png' . format ( len ( self . get_agents ( ) ) , self . age , self . voting_method ) ) plt . savefig ( imname ) plt . close ( ) else : plt . show ( )
def moving_average_smooth ( t , y , dy , span = None , cv = True , t_out = None , span_out = None , period = None ) : """Perform a moving - average smooth of the data Parameters t , y , dy : array _ like time , value , and error in value of the input data span : array _ like the integer spans of the data cv : boolean ( default = True ) if True , treat the problem as a cross - validation , i . e . don ' t use each point in the evaluation of its own smoothing . t _ out : array _ like ( optional ) the output times for the moving averages span _ out : array _ like ( optional ) the spans associated with the output times t _ out period : float if provided , then consider the inputs periodic with the given period Returns y _ smooth : array _ like smoothed y values at each time t ( or t _ out )"""
prep = _prep_smooth ( t , y , dy , span , t_out , span_out , period ) t , y , dy , span , t_out , span_out , indices = prep w = 1. / ( dy ** 2 ) w , yw = windowed_sum ( [ w , y * w ] , t = t , span = span , subtract_mid = cv , indices = indices , period = period ) if t_out is None or span_out is not None : return yw / w else : i = np . minimum ( len ( t ) - 1 , np . searchsorted ( t , t_out ) ) return yw [ i ] / w [ i ]
def base64_bytes ( x ) : """Turn base64 into bytes"""
if six . PY2 : return base64 . decodestring ( x ) return base64 . decodebytes ( bytes_encode ( x ) )
def get_all_attributes ( klass_or_instance ) : """Get all attribute members ( attribute , property style method ) ."""
pairs = list ( ) for attr , value in inspect . getmembers ( klass_or_instance , lambda x : not inspect . isroutine ( x ) ) : if not ( attr . startswith ( "__" ) or attr . endswith ( "__" ) ) : pairs . append ( ( attr , value ) ) return pairs
def score ( self , eval_instances , verbosity = 0 ) : '''Return scores ( negative log likelihoods ) assigned to each testing instance in ` eval _ instances ` . : param eval _ instances : The data to use to evaluate the model . Instances should have at least the ` input ` and ` output ` fields populated . ` output ` is needed to define which score is to be returned . : param verbosity : The level of diagnostic output , relative to the global - - verbosity option . Used to adjust output when models are composed of multiple sub - models . : type eval _ instances : list ( instance . Instance ) : returns : list ( float )'''
if hasattr ( self , '_using_default_combined' ) and self . _using_default_combined : raise NotImplementedError self . _using_default_separate = True return self . predict_and_score ( eval_instances , verbosity = verbosity ) [ 1 ]
def get_name ( model_id ) : """Get the name for a model . : returns str : The model ' s name . If the id has no associated name , then " id = { ID } ( no name ) " is returned ."""
name = _names . get ( model_id ) if name is None : name = 'id = %s (no name)' % str ( model_id ) return name
def getReadNoise ( self , exten ) : """Method for returning the readnoise of a detector ( in counts ) . Returns readnoise : float The readnoise of the detector in * * units of counts / electrons * * ."""
rn = self . _image [ exten ] . _rdnoise if self . proc_unit == 'native' : rn = self . _rdnoise / self . getGain ( exten ) return rn
def to_array ( self ) : """Serializes this PhotoSize to a dictionary . : return : dictionary representation of this object . : rtype : dict"""
array = super ( PhotoSize , self ) . to_array ( ) array [ 'file_id' ] = u ( self . file_id ) # py2 : type unicode , py3 : type str array [ 'width' ] = int ( self . width ) # type int array [ 'height' ] = int ( self . height ) # type int if self . file_size is not None : array [ 'file_size' ] = int ( self . file_size ) # type int return array
def update_selection_sm_prior ( self ) : """State machine prior update of tree selection"""
if self . _do_selection_update : return self . _do_selection_update = True tree_selection , selected_model_list , sm_selection , sm_selected_model_list = self . get_selections ( ) if tree_selection is not None : # self . _ logger . info ( " SM SELECTION IS : { 2 } \ n { 0 } , \ n { 1 } " . format ( selected _ model _ list , sm _ selected _ model _ list , # tree _ selection . get _ mode ( ) ) ) self . iter_tree_with_handed_function ( self . update_selection_sm_prior_condition , selected_model_list , sm_selected_model_list ) self . check_selection_consistency ( ) self . _do_selection_update = False
def add_latlonalt ( self , lat , lon , altitude , terrain_alt = False ) : '''add a point via latitude / longitude / altitude'''
if terrain_alt : frame = mavutil . mavlink . MAV_FRAME_GLOBAL_TERRAIN_ALT else : frame = mavutil . mavlink . MAV_FRAME_GLOBAL_RELATIVE_ALT p = mavutil . mavlink . MAVLink_mission_item_message ( self . target_system , self . target_component , 0 , frame , mavutil . mavlink . MAV_CMD_NAV_WAYPOINT , 0 , 0 , 0 , 0 , 0 , 0 , lat , lon , altitude ) self . add ( p )
async def unmount ( self , device ) : """Unmount a Device if mounted . : param device : device object , block device path or mount path : returns : whether the device is unmounted"""
device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_filesystem : self . _log . warn ( _ ( 'not unmounting {0}: unhandled device' , device ) ) return False if not device . is_mounted : self . _log . info ( _ ( 'not unmounting {0}: not mounted' , device ) ) return True self . _log . debug ( _ ( 'unmounting {0}' , device ) ) await device . unmount ( ) self . _log . info ( _ ( 'unmounted {0}' , device ) ) return True
def set_exception ( self , exc_info ) : """This method allows you to set an exception in the future without requring that exception to be raised from the futures worker . This method can be called on an unbound future . : param exc _ info : Either an exception info tuple or an exception value . In the latter case , the traceback will be automatically generated from the parent frame . : raise RuntimeError : If the future is already enqueued ."""
if not isinstance ( exc_info , tuple ) : if not isinstance ( exc_info , BaseException ) : raise TypeError ( 'expected BaseException instance' ) try : # TODO : Filld the traceback so it appears as if the exception # was actually raised by the caller ? ( Not sure if possible ) raise exc_info except : exc_info = sys . exc_info ( ) exc_info = ( exc_info [ 0 ] , exc_info [ 1 ] , exc_info [ 2 ] ) with self . _lock : if self . _enqueued : raise RuntimeError ( 'can not set exception of enqueued Future' ) self . _exc_info = exc_info self . _completed = True callbacks = self . _prepare_done_callbacks ( ) callbacks ( )
def make_output_layers ( self ) : """Extract the ordering of output layers ."""
self . output_layers = [ ] # import pytest ; pytest . set _ trace ( ) if hasattr ( self . model , 'output_layers' ) : # find corresponding output layers in CoreML model # assume output layers are not shared # Helper function to recursively extract output layers # even if the model has a layer which is a nested model def extract_output_layers ( keras_model ) : output_layers = [ ] for layer in keras_model . output_layers : if hasattr ( layer , 'output_layers' ) : output_layers . extend ( extract_output_layers ( layer ) ) else : output_layers . append ( layer ) return output_layers for kl in extract_output_layers ( self . model ) : coreml_layers = self . get_coreml_layers ( kl ) if len ( coreml_layers ) > 0 : for cl in coreml_layers : self . output_layers . append ( cl ) elif len ( self . model . outputs ) > 0 : for model_output in self . model . outputs : for l in self . layer_list : k_layer = self . keras_layer_map [ l ] in_nodes = k_layer . _inbound_nodes if hasattr ( k_layer , '_inbound_nodes' ) else k_layer . inbound_nodes for idx in range ( len ( in_nodes ) ) : out_tensor = k_layer . get_output_at ( idx ) if out_tensor == model_output or ( out_tensor . name in model_output . name ) : self . output_layers . append ( l ) if len ( self . output_layers ) == 0 : raise ValueError ( "No outputs can be identified" )
def reset_permission_factories ( self ) : """Remove cached permission factories ."""
for key in ( 'read' , 'create' , 'update' , 'delete' ) : full_key = '{0}_permission_factory' . format ( key ) if full_key in self . __dict__ : del self . __dict__ [ full_key ]
def obo ( self ) : """str : the ` Relationship ` serialized in an ` ` [ Typedef ] ` ` stanza . Note : The following guide was used : ftp : / / ftp . geneontology . org / pub / go / www / GO . format . obo - 1_4 . shtml"""
lines = [ "[Typedef]" , "id: {}" . format ( self . obo_name ) , "name: {}" . format ( self . obo_name ) ] if self . complementary is not None : lines . append ( "inverse_of: {}" . format ( self . complementary ) ) if self . symmetry is not None : lines . append ( "is_symmetric: {}" . format ( self . symmetry ) . lower ( ) ) if self . transitivity is not None : lines . append ( "is_transitive: {}" . format ( self . transitivity ) . lower ( ) ) if self . reflexivity is not None : lines . append ( "is_reflexive: {}" . format ( self . reflexivity ) . lower ( ) ) if self . comment : lines . append ( "comment: {}" . format ( self . comment ) ) return "\n" . join ( lines )
def do_visualize ( self , line ) : """Visualize an ontology - ie wrapper for export command"""
if not self . current : self . _help_noontology ( ) return line = line . split ( ) try : # from . . viz . builder import action _ visualize from . . ontodocs . builder import action_visualize except : self . _print ( "This command requires the ontodocs package: `pip install ontodocs`" ) return import webbrowser url = action_visualize ( args = self . current [ 'file' ] , fromshell = True ) if url : webbrowser . open ( url ) return
def print_probabilities ( state : State , ndigits : int = 4 , file : TextIO = None ) -> None : """Pretty print state probabilities . Args : state : ndigits : Number of digits of accuracy file : Output stream ( Defaults to stdout )"""
prob = bk . evaluate ( state . probabilities ( ) ) for index , prob in np . ndenumerate ( prob ) : prob = round ( prob , ndigits ) if prob == 0.0 : continue ket = "" . join ( [ str ( n ) for n in index ] ) print ( ket , ":" , prob , file = file )
def make_event ( event : Callable ) -> Callable : """Create an event from a method signature ."""
@ property # type : ignore @ wraps ( event ) def actualevent ( self ) : # pylint : disable = missing - docstring name = event . __name__ [ 3 : ] try : # the getter post processing function # is preserved with an underscore getter = event ( self ) . __name__ except AttributeError : getter = None return Event ( name , self . _uuid , getter ) # pylint : disable = protected - access return actualevent
def iter ( self ) : """Extract DIAMOND records and yield C { ReadAlignments } instances . @ return : A generator that yields C { ReadAlignments } instances ."""
# Note that self . _ reader is already initialized ( in _ _ init _ _ ) for # the first input file . This is less clean than it could be , but it # makes testing easier , since open ( ) is then only called once for # each input file . reads = iter ( self . reads ) first = True for filename in self . filenames : if first : # The first file has already been opened , in _ _ init _ _ . first = False reader = self . _reader else : reader = self . _getReader ( filename , self . scoreClass ) for readAlignments in reader . readAlignments ( reads ) : yield readAlignments # Any remaining query reads must have had no subject matches . for read in reads : yield ReadAlignments ( read , [ ] )
def _compile_signature ( self , iexec , call_name ) : """Compiles the signature for the specified executable and returns as a dictionary ."""
if iexec is not None : summary = iexec . summary if isinstance ( iexec , Function ) : summary = iexec . returns + "| " + iexec . summary elif isinstance ( iexec , Subroutine ) and len ( iexec . modifiers ) > 0 : summary = ", " . join ( iexec . modifiers ) + " | " + iexec . summary elif isinstance ( iexec , Interface ) : summary = iexec . describe ( ) else : summary = iexec . summary # Add the name of the module who owns the method . Useful in case the # same executable is defined in multiple modules , but only one is # referenced in the current context . if iexec . parent is not None : summary += " | MODULE: {}" . format ( iexec . module . name ) else : summary += " | BUILTIN" return dict ( params = [ p . name for p in iexec . ordered_parameters ] , index = 0 , call_name = call_name , description = summary , ) else : return [ ]
def get_filters ( self ) : """Returns the result filters : rtype : str or None"""
if self . _filters : filters_list = self . _filters if isinstance ( filters_list [ - 1 ] , Enum ) : filters_list = filters_list [ : - 1 ] return ' ' . join ( [ fs . value if isinstance ( fs , Enum ) else fs [ 1 ] for fs in filters_list ] ) . strip ( ) else : return None
def get_crystal_field_spin ( self , coordination : str = "oct" , spin_config : str = "high" ) : """Calculate the crystal field spin based on coordination and spin configuration . Only works for transition metal species . Args : coordination ( str ) : Only oct and tet are supported at the moment . spin _ config ( str ) : Supported keywords are " high " or " low " . Returns : Crystal field spin in Bohr magneton . Raises : AttributeError if species is not a valid transition metal or has an invalid oxidation state . ValueError if invalid coordination or spin _ config ."""
if coordination not in ( "oct" , "tet" ) or spin_config not in ( "high" , "low" ) : raise ValueError ( "Invalid coordination or spin config." ) elec = self . full_electronic_structure if len ( elec ) < 4 or elec [ - 1 ] [ 1 ] != "s" or elec [ - 2 ] [ 1 ] != "d" : raise AttributeError ( "Invalid element {} for crystal field calculation." . format ( self . symbol ) ) nelectrons = elec [ - 1 ] [ 2 ] + elec [ - 2 ] [ 2 ] - self . oxi_state if nelectrons < 0 or nelectrons > 10 : raise AttributeError ( "Invalid oxidation state {} for element {}" . format ( self . oxi_state , self . symbol ) ) if spin_config == "high" : return nelectrons if nelectrons <= 5 else 10 - nelectrons elif spin_config == "low" : if coordination == "oct" : if nelectrons <= 3 : return nelectrons elif nelectrons <= 6 : return 6 - nelectrons elif nelectrons <= 8 : return nelectrons - 6 else : return 10 - nelectrons elif coordination == "tet" : if nelectrons <= 2 : return nelectrons elif nelectrons <= 4 : return 4 - nelectrons elif nelectrons <= 7 : return nelectrons - 4 else : return 10 - nelectrons
def _init_vocab ( self , token_generator , add_reserved_tokens = True ) : """Initialize vocabulary with tokens from token _ generator ."""
self . _id_to_token = { } non_reserved_start_index = 0 if add_reserved_tokens : self . _id_to_token . update ( enumerate ( RESERVED_TOKENS ) ) non_reserved_start_index = len ( RESERVED_TOKENS ) self . _id_to_token . update ( enumerate ( token_generator , start = non_reserved_start_index ) ) # _ token _ to _ id is the reverse of _ id _ to _ token self . _token_to_id = dict ( ( v , k ) for k , v in six . iteritems ( self . _id_to_token ) )
def stmts_from_path ( path , model , stmts ) : """Return source Statements corresponding to a path in a model . Parameters path : list [ tuple [ str , int ] ] A list of tuples where the first element of the tuple is the name of a rule , and the second is the associated polarity along a path . model : pysb . core . Model A PySB model which contains the rules along the path . stmts : list [ indra . statements . Statement ] A list of INDRA Statements from which the model was assembled . Returns path _ stmts : list [ indra . statements . Statement ] The Statements from which the rules along the path were obtained ."""
path_stmts = [ ] for path_rule , sign in path : for rule in model . rules : if rule . name == path_rule : stmt = stmt_from_rule ( path_rule , model , stmts ) assert stmt is not None path_stmts . append ( stmt ) return path_stmts
def add_key ( pub ) : """Adds a new public key to be used when encrypting new data is needed"""
global _server_keys key = rsa . PublicKey . load_pkcs1 ( pub ) _server_keys [ _compute_fingerprint ( key ) ] = key
def graphql_mutation_from_summary ( summary ) : """This function returns a graphql mutation corresponding to the provided summary ."""
# get the name of the mutation from the summary mutation_name = summary [ 'name' ] # print ( summary ) # the treat the " type " string as a gra input_name = mutation_name + "Input" input_fields = build_native_type_dictionary ( summary [ 'inputs' ] , name = input_name , respect_required = True ) # the inputs for the mutation are defined by a class record inputs = type ( 'Input' , ( object , ) , input_fields ) # the outputs for the mutation are attributes to the class record output_name = mutation_name + "Output" outputs = build_native_type_dictionary ( summary [ 'outputs' ] , name = output_name ) # a no - op in order to satisfy the introspection query mutate = classmethod ( lambda * _ , ** __ : 'hello' ) # create the appropriate mutation class record mutation = type ( mutation_name , ( graphene . Mutation , ) , { 'Input' : inputs , 'mutate' : mutate , ** outputs } ) # return the newly created mutation record return mutation
def set_mrt ( self , s , mrt : str ) : """accepts a statement and adds a qualifer setting the mrt modifies s in place : param s : a WDBaseDataType statement : param mrt : one of { ' close ' , ' broad ' , ' exact ' , ' related ' , ' narrow ' } : return : s"""
valid_mrts_abv = self . ABV_MRT . keys ( ) valid_mrts_uri = self . ABV_MRT . values ( ) if mrt in valid_mrts_abv : mrt_uri = self . ABV_MRT [ mrt ] elif mrt in valid_mrts_uri : mrt_uri = mrt else : raise ValueError ( "mrt must be one of {}, found {}" . format ( valid_mrts_abv , mrt ) ) mrt_qid = self . mrt_qids [ mrt_uri ] q = wdi_core . WDItemID ( mrt_qid , self . mrt_pid , is_qualifier = True ) s . qualifiers . append ( q ) return s
def terminate ( name , call = None ) : '''To do an immediate power off of a VM using its name . A ` ` SIGKILL ` ` is issued to the vmx process of the VM CLI Example : . . code - block : : bash salt - cloud - a terminate vmname'''
if call != 'action' : raise SaltCloudSystemExit ( 'The terminate action must be called with ' '-a or --action.' ) vm_properties = [ "name" , "summary.runtime.powerState" ] vm_list = salt . utils . vmware . get_mors_with_properties ( _get_si ( ) , vim . VirtualMachine , vm_properties ) for vm in vm_list : if vm [ "name" ] == name : if vm [ "summary.runtime.powerState" ] == "poweredOff" : ret = 'already powered off' log . info ( 'VM %s %s' , name , ret ) return ret try : log . info ( 'Terminating VM %s' , name ) vm [ "object" ] . Terminate ( ) except Exception as exc : log . error ( 'Error while terminating VM %s: %s' , name , exc , # Show the traceback if the debug logging level is enabled exc_info_on_loglevel = logging . DEBUG ) return 'failed to terminate' return 'terminated'
def insert_child ( self , child_pid , index = - 1 ) : """Insert a new child into a PID concept . Argument ' index ' can take the following values : 0,1,2 , . . . - insert child PID at the specified position -1 - insert the child PID at the last position None - insert child without order ( no re - ordering is done ) NOTE : If ' index ' is specified , all sibling relations should have PIDRelation . index information ."""
self . _check_child_limits ( child_pid ) if index is None : index = - 1 try : with db . session . begin_nested ( ) : if not isinstance ( child_pid , PersistentIdentifier ) : child_pid = resolve_pid ( child_pid ) child_relations = self . _resolved_pid . child_relations . filter ( PIDRelation . relation_type == self . relation_type . id ) . order_by ( PIDRelation . index ) . all ( ) relation_obj = PIDRelation . create ( self . _resolved_pid , child_pid , self . relation_type . id , None ) if index == - 1 : child_relations . append ( relation_obj ) else : child_relations . insert ( index , relation_obj ) for idx , c in enumerate ( child_relations ) : c . index = idx except IntegrityError : raise PIDRelationConsistencyError ( "PID Relation already exists." )
def plotAAClusters ( sequence , propertyNames , showLines = True , showFigure = True ) : """Plot amino acid property cluster numbers for a sequence . @ param sequence : An C { AARead } ( or a subclass ) instance . @ param propertyNames : An iterable of C { str } property names ( each of which must be a key of a key in the C { dark . aa . PROPERTY _ CLUSTERS } C { dict } ) . @ param showLines : If C { True } , lines will be drawn between successive AA property values . If not , just the values will be plotted as a scatter plot ( this greatly reduces visual clutter if the sequence is long and AA property values are variable ) . @ param showFigure : If C { True } , display the plot . Passing C { False } is useful in testing . @ raise ValueError : If an unknown property is given in C { propertyNames } . @ return : The return value from calling dark . aa . clustersForSequence : a C { dict } keyed by ( lowercase ) property name , with values that are C { list } s of the corresponding property value according to sequence position ."""
MISSING_AA_VALUE = 0 propertyClusters = clustersForSequence ( sequence , propertyNames , missingAAValue = MISSING_AA_VALUE ) if showFigure : minCluster = 1 maxCluster = - 1 legend = [ ] x = np . arange ( 0 , len ( sequence ) ) plot = plt . plot if showLines else plt . scatter for index , propertyName in enumerate ( propertyClusters ) : color = TABLEAU20 [ index ] clusterNumbers = propertyClusters [ propertyName ] plot ( x , clusterNumbers , color = color ) legend . append ( patches . Patch ( color = color , label = propertyName ) ) propertyMinCluster = min ( clusterNumbers ) if propertyMinCluster < minCluster : minCluster = propertyMinCluster propertyMaxCluster = max ( clusterNumbers ) if propertyMaxCluster > maxCluster : maxCluster = propertyMaxCluster plt . legend ( handles = legend , loc = ( 0 , 1.1 ) ) plt . xlim ( - 0.2 , len ( sequence ) - 0.8 ) plt . ylim ( minCluster - 0.5 , maxCluster + 0.5 ) plt . yticks ( range ( maxCluster + 1 ) ) plt . xlabel ( 'Sequence index' ) plt . ylabel ( 'Property cluster number' ) plt . title ( sequence . id ) plt . show ( ) return propertyClusters
def opacity ( self , value ) : """Setter for * * self . _ _ opacity * * attribute . : param value : Attribute value . : type value : float"""
if value is not None : assert type ( value ) in ( int , float ) , "'{0}' attribute: '{1}' type is not 'int' or 'float'!" . format ( "opacity" , value ) if value > 1 : value = 1 elif value < 0 : value = 0 self . __opacity = float ( value ) self . __set_style_sheet ( )
def take_percentile ( arr , percent ) : """take the top ` percent ` items in a list rounding up"""
size = len ( arr ) stop = min ( int ( size * percent ) , len ( arr ) ) return arr [ 0 : stop ]
def num_listeners ( self , event = None ) : """Return the number of listeners for ` ` event ` ` . Return the total number of listeners for all events on this object if ` ` event ` ` is : class : ` None ` ."""
if event is not None : return len ( self . _listeners [ event ] ) else : return sum ( len ( l ) for l in self . _listeners . values ( ) )
def create_poi_gdf ( polygon = None , amenities = None , north = None , south = None , east = None , west = None ) : """Parse GeoDataFrames from POI json that was returned by Overpass API . Parameters polygon : shapely Polygon or MultiPolygon geographic shape to fetch the POIs within amenities : list List of amenities that will be used for finding the POIs from the selected area . See available amenities from : http : / / wiki . openstreetmap . org / wiki / Key : amenity north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box Returns Geopandas GeoDataFrame with POIs and the associated attributes ."""
responses = osm_poi_download ( polygon = polygon , amenities = amenities , north = north , south = south , east = east , west = west ) # Parse coordinates from all the nodes in the response coords = parse_nodes_coords ( responses ) # POI nodes poi_nodes = { } # POI ways poi_ways = { } # A list of POI relations relations = [ ] for result in responses [ 'elements' ] : if result [ 'type' ] == 'node' and 'tags' in result : poi = parse_osm_node ( response = result ) # Add element _ type poi [ 'element_type' ] = 'node' # Add to ' pois ' poi_nodes [ result [ 'id' ] ] = poi elif result [ 'type' ] == 'way' : # Parse POI area Polygon poi_area = parse_polygonal_poi ( coords = coords , response = result ) if poi_area : # Add element _ type poi_area [ 'element_type' ] = 'way' # Add to ' poi _ ways ' poi_ways [ result [ 'id' ] ] = poi_area elif result [ 'type' ] == 'relation' : # Add relation to a relation list ( needs to be parsed after all nodes and ways have been parsed ) relations . append ( result ) # Create GeoDataFrames gdf_nodes = gpd . GeoDataFrame ( poi_nodes ) . T gdf_nodes . crs = settings . default_crs gdf_ways = gpd . GeoDataFrame ( poi_ways ) . T gdf_ways . crs = settings . default_crs # Parse relations ( MultiPolygons ) from ' ways ' gdf_ways = parse_osm_relations ( relations = relations , osm_way_df = gdf_ways ) # Combine GeoDataFrames gdf = gdf_nodes . append ( gdf_ways , sort = False ) return gdf
def get_targets ( self , raw_string ) : # type : ( Optional [ Text ] ) - > Dict [ Text , Text ] """Extract targets from a string in ' key : value ' format ."""
targets = { } if raw_string is not None : for line in raw_string . splitlines ( ) : if line : target , directory = line . split ( ':' , 1 ) targets [ target . strip ( ) ] = directory . strip ( ) return targets
def parse_digest_challenge ( authentication_header ) : '''Parses the value of a ' WWW - Authenticate ' header . Returns an object with properties corresponding to each of the recognized parameters in the header .'''
if not is_digest_challenge ( authentication_header ) : return None parts = parse_parts ( authentication_header [ 7 : ] , defaults = { 'algorithm' : 'MD5' , 'stale' : 'false' } ) if not _check_required_parts ( parts , _REQUIRED_DIGEST_CHALLENGE_PARTS ) : return None parts [ 'stale' ] = parts [ 'stale' ] . lower ( ) == 'true' digest_challenge = _build_object_from_parts ( parts , _REQUIRED_DIGEST_CHALLENGE_PARTS ) if ( 'MD5' , 'auth' ) != ( digest_challenge . algorithm , digest_challenge . qop ) : return None return digest_challenge
def send_request ( ** kwargs ) : """Return a data frame from a web service request to cBio portal . Sends a web service requrest to the cBio portal with arguments given in the dictionary data and returns a Pandas data frame on success . More information about the service here : http : / / www . cbioportal . org / web _ api . jsp Parameters kwargs : dict A dict of parameters for the query . Entries map directly to web service calls with the exception of the optional ' skiprows ' entry , whose value is used as the number of rows to skip when reading the result data frame . Returns df : pandas . DataFrame Response from cBioPortal as a Pandas DataFrame ."""
skiprows = kwargs . pop ( 'skiprows' , None ) res = requests . get ( cbio_url , params = kwargs ) if res . status_code == 200 : # Adaptively skip rows based on number of comment lines if skiprows == - 1 : lines = res . text . split ( '\n' ) skiprows = 0 for line in lines : if line . startswith ( '#' ) : skiprows += 1 else : break csv_StringIO = StringIO ( res . text ) df = pandas . read_csv ( csv_StringIO , sep = '\t' , skiprows = skiprows ) return df else : logger . error ( 'Request returned with code %d' % res . status_code )
def set_json ( self , config_json ) : """Permanently set the JSON configuration Unable to call twice ."""
if self . configuration_dict is not None : raise RuntimeError ( "Can only set configuration once" , self . configuration_dict ) schema = fetch_config ( 'ConfigurationSchema.json' ) validictory . validate ( config_json , schema ) config_json [ 'name' ] = self . name config_json [ 'run_number' ] = self . run config_json [ 'src_dir' ] = get_source_dir ( ) config_json [ 'data_dir' ] = get_data_dir ( ) config_json [ 'log_dir' ] = get_log_dir ( ) self . configuration_dict = config_json
def build_tree_from_alignment ( aln , moltype = DNA , best_tree = False , params = None ) : """Returns a tree from alignment Will check MolType of aln object"""
if params is None : params = { } if moltype == DNA or moltype == RNA : params [ '-nt' ] = True elif moltype == PROTEIN : params [ '-nt' ] = False else : raise ValueError , "FastTree does not support moltype: %s" % moltype . label if best_tree : params [ '-slow' ] = True # Create mapping between abbreviated IDs and full IDs int_map , int_keys = aln . getIntMap ( ) # Create SequenceCollection from int _ map . int_map = SequenceCollection ( int_map , MolType = moltype ) app = FastTree ( params = params ) result = app ( int_map . toFasta ( ) ) tree = DndParser ( result [ 'Tree' ] . read ( ) , constructor = PhyloNode ) # remap tip names for tip in tree . tips ( ) : tip . Name = int_keys [ tip . Name ] return tree
def to_xml ( self ) : '''Returns an XMLi representation of the shipping details . @ return : Element'''
for n , v in { "recipient" : self . recipient } . items ( ) : if is_empty_or_none ( v ) : raise ValueError ( "'%s' attribute cannot be empty or None." % n ) doc = Document ( ) root = doc . createElement ( "shipping" ) root . appendChild ( self . recipient . to_xml ( "recipient" ) ) return root
def fetch_internal ( item , request ) : """Fetches the given request by using the local Flask context ."""
# Break client dependence on Flask if internal fetches aren ' t being used . from flask import make_response from werkzeug . test import EnvironBuilder # Break circular dependencies . from dpxdt . server import app # Attempt to create a Flask environment from a urllib2 . Request object . environ_base = { 'REMOTE_ADDR' : '127.0.0.1' , } # The data object may be a generator from poster . multipart _ encode , so we # need to convert that to raw bytes here . Unfortunately EnvironBuilder # only works with the whole request buffered in memory . data = request . get_data ( ) if data and not isinstance ( data , str ) : data = '' . join ( list ( data ) ) builder = EnvironBuilder ( path = request . get_selector ( ) , base_url = '%s://%s' % ( request . get_type ( ) , request . get_host ( ) ) , method = request . get_method ( ) , data = data , headers = request . header_items ( ) , environ_base = environ_base ) with app . request_context ( builder . get_environ ( ) ) : response = make_response ( app . dispatch_request ( ) ) LOGGER . info ( '"%s" %s via internal routing' , request . get_selector ( ) , response . status_code ) item . status_code = response . status_code item . content_type = response . mimetype if item . result_path : # TODO : Is there a better way to access the response stream ? with open ( item . result_path , 'wb' ) as result_file : for piece in response . iter_encoded ( ) : result_file . write ( piece ) else : item . data = response . get_data ( ) return item
def from_values_indices ( cls , values , indices , populate = False , structure = None , voigt_rank = None , vsym = True , verbose = False ) : """Creates a tensor from values and indices , with options for populating the remainder of the tensor . Args : values ( floats ) : numbers to place at indices indices ( array - likes ) : indices to place values at populate ( bool ) : whether to populate the tensor structure ( Structure ) : structure to base population or fit _ to _ structure on voigt _ rank ( int ) : full tensor rank to indicate the shape of the resulting tensor . This is necessary if one provides a set of indices more minimal than the shape of the tensor they want , e . g . Tensor . from _ values _ indices ( ( 0 , 0 ) , 100) vsym ( bool ) : whether to voigt symmetrize during the optimization procedure verbose ( bool ) : whether to populate verbosely"""
# auto - detect voigt notation # TODO : refactor rank inheritance to make this easier indices = np . array ( indices ) if voigt_rank : shape = ( [ 3 ] * ( voigt_rank % 2 ) + [ 6 ] * ( voigt_rank // 2 ) ) else : shape = np . ceil ( np . max ( indices + 1 , axis = 0 ) / 3. ) * 3 base = np . zeros ( shape . astype ( int ) ) for v , idx in zip ( values , indices ) : base [ tuple ( idx ) ] = v if 6 in shape : obj = cls . from_voigt ( base ) else : obj = cls ( base ) if populate : assert structure , "Populate option must include structure input" obj = obj . populate ( structure , vsym = vsym , verbose = verbose ) elif structure : obj = obj . fit_to_structure ( structure ) return obj
def _temporary_filenames ( total ) : """Context manager to create temporary files and remove them after use ."""
temp_files = [ _get_temporary_filename ( 'optimage-' ) for i in range ( total ) ] yield temp_files for temp_file in temp_files : try : os . remove ( temp_file ) except OSError : # Continue in case we could not remove the file . One reason is that # the fail was never created . pass
def decode ( value , strip = False ) : """Python 2/3 friendly decoding of output . Args : value ( str | unicode | bytes | None ) : The value to decode . strip ( bool ) : If True , ` strip ( ) ` the returned string . ( Default value = False ) Returns : str : Decoded value , if applicable ."""
if value is None : return None if isinstance ( value , bytes ) and not isinstance ( value , unicode ) : value = value . decode ( "utf-8" ) if strip : return unicode ( value ) . strip ( ) return unicode ( value )
def unregister_project ( self , project_node , raise_exception = False ) : """Unregisters given : class : ` umbra . components . factory . scriptProject . nodes . ProjectNode ` class Node from the Model . : param project _ node : ProjectNode to unregister . : type project _ node : ProjectNode : param raise _ exception : Raise the exception . : type raise _ exception : bool : return : ProjectNode . : rtype : ProjectNode"""
if raise_exception : if not project_node in self . list_project_nodes ( ) : raise foundations . exceptions . ProgrammingError ( "{0} | '{1}' project 'ProjectNode' isn't registered!" . format ( self . __class__ . __name__ , project_node ) ) LOGGER . debug ( "> Unregistering '{0}' project 'ProjectNode'." . format ( project_node ) ) parent = project_node . parent row = project_node . row ( ) self . beginRemoveRows ( self . get_node_index ( parent ) , row , row ) parent . remove_child ( row ) self . endRemoveRows ( ) self . project_unregistered . emit ( project_node ) return project_node
def get_snapshot_closest_to_state_change ( self , state_change_identifier : int , ) -> Tuple [ int , Any ] : """Get snapshots earlier than state _ change with provided ID ."""
row = super ( ) . get_snapshot_closest_to_state_change ( state_change_identifier ) if row [ 1 ] : last_applied_state_change_id = row [ 0 ] snapshot_state = self . serializer . deserialize ( row [ 1 ] ) result = ( last_applied_state_change_id , snapshot_state ) else : result = ( 0 , None ) return result
def _is_mwtab ( string ) : """Test if input string is in ` mwtab ` format . : param string : Input string . : type string : : py : class : ` str ` or : py : class : ` bytes ` : return : Input string if in mwTab format or False otherwise . : rtype : : py : class : ` str ` or : py : obj : ` False `"""
if isinstance ( string , str ) : lines = string . split ( "\n" ) elif isinstance ( string , bytes ) : lines = string . decode ( "utf-8" ) . split ( "\n" ) else : raise TypeError ( "Expecting <class 'str'> or <class 'bytes'>, but {} was passed" . format ( type ( string ) ) ) lines = [ line for line in lines if line ] header = lines [ 0 ] if header . startswith ( "#METABOLOMICS WORKBENCH" ) : return "\n" . join ( lines ) return False
def relation_get ( attribute = None , unit = None , rid = None ) : """Get relation information"""
_args = [ 'relation-get' , '--format=json' ] if rid : _args . append ( '-r' ) _args . append ( rid ) _args . append ( attribute or '-' ) if unit : _args . append ( unit ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None except CalledProcessError as e : if e . returncode == 2 : return None raise
def _rd_segment ( file_name , dir_name , pb_dir , fmt , n_sig , sig_len , byte_offset , samps_per_frame , skew , sampfrom , sampto , channels , smooth_frames , ignore_skew ) : """Read the digital samples from a single segment record ' s associated dat file ( s ) . Parameters file _ name : list The names of the dat files to be read . dir _ name : str The full directory where the dat file ( s ) are located , if the dat file ( s ) are local . pb _ dir : str The physiobank directory where the dat file ( s ) are located , if the dat file ( s ) are remote . fmt : list The formats of the dat files n _ sig : int The number of signals contained in the dat file sig _ len : int The signal length ( per channel ) of the dat file byte _ offset : int The byte offset of the dat file samps _ per _ frame : list The samples / frame for each signal of the dat file skew : list The skew for the signals of the dat file sampfrom : int The starting sample number to be read from the signals sampto : int The final sample number to be read from the signals smooth _ frames : bool Whether to smooth channels with multiple samples / frame ignore _ skew : bool , optional Used when reading records with at least one skewed signal . Specifies whether to apply the skew to align the signals in the output variable ( False ) , or to ignore the skew field and load in all values contained in the dat files unaligned ( True ) . Returns signals : numpy array , or list The signals read from the dat file ( s ) . A 2d numpy array is returned if the signals have uniform samples / frame or if ` smooth _ frames ` is True . Otherwise a list of 1d numpy arrays is returned . Notes ' channels ' , ' sampfrom ' , ' sampto ' , ' smooth _ frames ' , and ' ignore _ skew ' are user desired input fields . All other parameters are specifications of the segment"""
# Avoid changing outer variables byte_offset = byte_offset [ : ] samps_per_frame = samps_per_frame [ : ] skew = skew [ : ] # Set defaults for empty fields for i in range ( n_sig ) : if byte_offset [ i ] == None : byte_offset [ i ] = 0 if samps_per_frame [ i ] == None : samps_per_frame [ i ] = 1 if skew [ i ] == None : skew [ i ] = 0 # If skew is to be ignored , set all to 0 if ignore_skew : skew = [ 0 ] * n_sig # Get the set of dat files , and the # channels that belong to each file . file_name , datchannel = describe_list_indices ( file_name ) # Some files will not be read depending on input channels . # Get the the wanted fields only . w_file_name = [ ] # one scalar per dat file w_fmt = { } # one scalar per dat file w_byte_offset = { } # one scalar per dat file w_samps_per_frame = { } # one list per dat file w_skew = { } # one list per dat file w_channel = { } # one list per dat file for fn in file_name : # intersecting dat channels between the input channels and the channels of the file idc = [ c for c in datchannel [ fn ] if c in channels ] # There is at least one wanted channel in the dat file if idc != [ ] : w_file_name . append ( fn ) w_fmt [ fn ] = fmt [ datchannel [ fn ] [ 0 ] ] w_byte_offset [ fn ] = byte_offset [ datchannel [ fn ] [ 0 ] ] w_samps_per_frame [ fn ] = [ samps_per_frame [ c ] for c in datchannel [ fn ] ] w_skew [ fn ] = [ skew [ c ] for c in datchannel [ fn ] ] w_channel [ fn ] = idc # Wanted dat channels , relative to the dat file itself r_w_channel = { } # The channels in the final output array that correspond to the read channels in each dat file out_dat_channel = { } for fn in w_channel : r_w_channel [ fn ] = [ c - min ( datchannel [ fn ] ) for c in w_channel [ fn ] ] out_dat_channel [ fn ] = [ channels . index ( c ) for c in w_channel [ fn ] ] # Signals with multiple samples / frame are smoothed , or all signals have 1 sample / frame . # Return uniform numpy array if smooth_frames or sum ( samps_per_frame ) == n_sig : # Figure out the largest required dtype for the segment to minimize memory usage max_dtype = _np_dtype ( _fmt_res ( fmt , max_res = True ) , discrete = True ) # Allocate signal array . Minimize dtype signals = np . zeros ( [ sampto - sampfrom , len ( channels ) ] , dtype = max_dtype ) # Read each wanted dat file and store signals for fn in w_file_name : signals [ : , out_dat_channel [ fn ] ] = _rd_dat_signals ( fn , dir_name , pb_dir , w_fmt [ fn ] , len ( datchannel [ fn ] ) , sig_len , w_byte_offset [ fn ] , w_samps_per_frame [ fn ] , w_skew [ fn ] , sampfrom , sampto , smooth_frames ) [ : , r_w_channel [ fn ] ] # Return each sample in signals with multiple samples / frame , without smoothing . # Return a list of numpy arrays for each signal . else : signals = [ None ] * len ( channels ) for fn in w_file_name : # Get the list of all signals contained in the dat file datsignals = _rd_dat_signals ( fn , dir_name , pb_dir , w_fmt [ fn ] , len ( datchannel [ fn ] ) , sig_len , w_byte_offset [ fn ] , w_samps_per_frame [ fn ] , w_skew [ fn ] , sampfrom , sampto , smooth_frames ) # Copy over the wanted signals for cn in range ( len ( out_dat_channel [ fn ] ) ) : signals [ out_dat_channel [ fn ] [ cn ] ] = datsignals [ r_w_channel [ fn ] [ cn ] ] return signals
def _buckets_nearly_equal ( a_dist , b_dist ) : """Determines whether two ` Distributions ` are nearly equal . Args : a _ dist ( : class : ` Distribution ` ) : an instance b _ dist ( : class : ` Distribution ` ) : another instance Return : boolean : ` True ` if the two instances are approximately equal , otherwise False"""
a_type , a_buckets = _detect_bucket_option ( a_dist ) b_type , b_buckets = _detect_bucket_option ( b_dist ) if a_type != b_type : return False elif a_type == u'linearBuckets' : return _linear_buckets_nearly_equal ( a_buckets , b_buckets ) elif a_type == u'exponentialBuckets' : return _exponential_buckets_nearly_equal ( a_buckets , b_buckets ) elif a_type == u'explicitBuckets' : return _explicit_buckets_nearly_equal ( a_buckets , b_buckets ) else : return False
def SplitV ( a , splits , axis ) : """Split op with multiple split sizes ."""
return tuple ( np . split ( np . copy ( a ) , np . cumsum ( splits ) , axis = axis ) )