signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def get_sqlserver_product_version ( engine : "Engine" ) -> Tuple [ int ] : """Gets SQL Server version information . Attempted to use ` ` dialect . server _ version _ info ` ` : . . code - block : : python from sqlalchemy import create _ engine url = " mssql + pyodbc : / / USER : PASSWORD @ ODBC _ NAME " engine = create _ engine ( url ) dialect = engine . dialect vi = dialect . server _ version _ info Unfortunately , ` ` vi = = ( ) ` ` for an SQL Server 2014 instance via ` ` mssql + pyodbc ` ` . It ' s also ` ` None ` ` for a ` ` mysql + pymysql ` ` connection . So this seems ` ` server _ version _ info ` ` is a badly supported feature . So the only other way is to ask the database directly . The problem is that this requires an : class : ` Engine ` or similar . ( The initial hope was to be able to use this from within SQL compilation hooks , to vary the SQL based on the engine version . Still , this isn ' t so bad . ) We could use either . . code - block : : sql SELECT @ @ version ; - - returns a human - readable string SELECT SERVERPROPERTY ( ' ProductVersion ' ) ; - - better The ` ` pyodbc ` ` interface will fall over with ` ` ODBC SQL type - 150 is not yet supported ` ` with that last call , though , meaning that a ` ` VARIANT ` ` is coming back , so we ` ` CAST ` ` as per the source below ."""
assert is_sqlserver ( engine ) , ( "Only call get_sqlserver_product_version() for Microsoft SQL Server " "instances." ) sql = "SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)" rp = engine . execute ( sql ) # type : ResultProxy row = rp . fetchone ( ) dotted_version = row [ 0 ] # type : str # e . g . ' 12.0.5203.0' return tuple ( int ( x ) for x in dotted_version . split ( "." ) )
def dump_service ( self , sc ) : """Read all data blocks of a given service . : meth : ` dump _ service ` reads all data blocks from the service with service code * sc * and returns a list of strings suitable for printing . The number of strings returned does not necessarily reflect the number of data blocks because a range of data blocks with equal content is reduced to fewer lines of output ."""
def lprint ( fmt , data , index ) : ispchr = lambda x : x >= 32 and x <= 126 # noqa : E731 def print_bytes ( octets ) : return ' ' . join ( [ '%02x' % x for x in octets ] ) def print_chars ( octets ) : return '' . join ( [ chr ( x ) if ispchr ( x ) else '.' for x in octets ] ) return fmt . format ( index , print_bytes ( data ) , print_chars ( data ) ) data_line_fmt = "{0:04X}: {1} |{2}|" same_line_fmt = "{0:<4s} {1} |{2}|" lines = list ( ) last_data = None same_data = 0 for i in itertools . count ( ) : # pragma : no branch assert i < 0x10000 try : this_data = self . read_without_encryption ( [ sc ] , [ BlockCode ( i ) ] ) except Type3TagCommandError : i = i - 1 break if this_data == last_data : same_data += 1 else : if same_data > 1 : lines . append ( lprint ( same_line_fmt , last_data , "*" ) ) lines . append ( lprint ( data_line_fmt , this_data , i ) ) last_data = this_data same_data = 0 if same_data > 1 : lines . append ( lprint ( same_line_fmt , last_data , "*" ) ) if same_data > 0 : lines . append ( lprint ( data_line_fmt , this_data , i ) ) return lines
def hook_output ( module : nn . Module , detach : bool = True , grad : bool = False ) -> Hook : "Return a ` Hook ` that stores activations of ` module ` in ` self . stored `"
return Hook ( module , _hook_inner , detach = detach , is_forward = not grad )
def set_components ( self , params ) : """Set the value of exogenous model elements . Element values can be passed as keyword = value pairs in the function call . Values can be numeric type or pandas Series . Series will be interpolated by integrator . Examples > > > model . set _ components ( { ' birth _ rate ' : 10 } ) > > > model . set _ components ( { ' Birth Rate ' : 10 } ) > > > br = pandas . Series ( index = range ( 30 ) , values = np . sin ( range ( 30 ) ) > > > model . set _ components ( { ' birth _ rate ' : br } )"""
# It might make sense to allow the params argument to take a pandas series , where # the indices of the series are variable names . This would make it easier to # do a Pandas apply on a DataFrame of parameter values . However , this may conflict # with a pandas series being passed in as a dictionary element . for key , value in params . items ( ) : if isinstance ( value , pd . Series ) : new_function = self . _timeseries_component ( value ) elif callable ( value ) : new_function = value else : new_function = self . _constant_component ( value ) func_name = utils . get_value_by_insensitive_key_or_value ( key , self . components . _namespace ) if func_name is None : raise NameError ( '%s is not recognized as a model component' % key ) if '_integ_' + func_name in dir ( self . components ) : # this won ' t handle other statefuls . . . warnings . warn ( "Replacing the equation of stock {} with params" . format ( key ) , stacklevel = 2 ) setattr ( self . components , func_name , new_function )
def write_config ( filename , config , mode = "w" ) : '''use configparser to write a config object to filename'''
with open ( filename , mode ) as filey : config . write ( filey ) return filename
def _get_phi ( self , C , mag ) : """Returns the magnitude dependent intra - event standard deviation ( phi ) ( equation 15)"""
if mag < 5.5 : return C [ "phi1" ] elif mag < 5.75 : return C [ "phi1" ] + ( C [ "phi2" ] - C [ "phi1" ] ) * ( ( mag - 5.5 ) / 0.25 ) else : return C [ "phi2" ]
def combined ( cls , code , path = None , extra_args = None ) : """Compile combined - json with abi , bin , devdoc , userdoc . @ param code : literal solidity code as a string . @ param path : absolute path to solidity - file . Note : code & path are mutually exclusive ! @ param extra _ args : Either a space separated string or a list of extra arguments to be passed to the solidity compiler ."""
if code and path : raise ValueError ( 'sourcecode and path are mutually exclusive.' ) if path : contracts = compile_file ( path , extra_args = extra_args ) with open ( path ) as handler : code = handler . read ( ) elif code : contracts = compile_code ( code , extra_args = extra_args ) else : raise ValueError ( 'either code or path needs to be supplied.' ) sorted_contracts = [ ] for name in solidity_names ( code ) : sorted_contracts . append ( ( name [ 1 ] , solidity_get_contract_data ( contracts , path , name [ 1 ] ) ) ) return sorted_contracts
def get_series_by_name ( self , name ) : """Returns the first : py : class : ` . Series ` of a given name , or ` ` None ` ` . : param str name : The name to search by ."""
if not isinstance ( name , str ) : raise TypeError ( "Can only search series by str name, not '%s'" % str ( name ) ) for series in self . all_series ( ) : if series . name ( ) == name : return series
def get_content ( self ) : """performs es search and gets content objects"""
if "query" in self . query : q = self . query [ "query" ] else : q = self . query search = custom_search_model ( Content , q , field_map = { "feature-type" : "feature_type.slug" , "tag" : "tags.slug" , "content-type" : "_type" , } ) return search
def detect ( ) : """Detect available plugins and return enabled / configured stats Yields tuples of the form ( section , statsgroup ) sorted by the default StatsGroup order which maybe overriden in the config file . The ' section ' is the name of the configuration section as well as the option used to enable those particular stats ."""
# Load plugins and config plugins = load ( ) config = Config ( ) # Make sure that all sections have a valid plugin type defined for section in config . sections ( ) : if section == 'general' : continue try : type_ = config . item ( section , 'type' ) except ConfigError : raise ConfigError ( "Plugin type not defined in section '{0}'." . format ( section ) ) if type_ not in plugins : raise ConfigError ( "Invalid plugin type '{0}' in section '{1}'." . format ( type_ , section ) ) # Detect classes inherited from StatsGroup and return them sorted stats = [ ] for plugin in plugins : module = getattr ( PLUGINS , plugin ) for object_name in dir ( module ) : statsgroup = getattr ( module , object_name ) # Filter out anything except for StatsGroup descendants if ( not isinstance ( statsgroup , ( type , types . ClassType ) ) or not issubclass ( statsgroup , StatsGroup ) or statsgroup is StatsGroup or statsgroup is EmptyStatsGroup ) : continue # Search config for sections with type matching the plugin , # use order provided there or class default otherwise for section in config . sections ( kind = plugin ) : try : order = int ( config . item ( section , "order" ) ) except ConfigError : order = statsgroup . order except ValueError : log . warn ( "Invalid {0} stats order: '{1}'" . format ( section , config . item ( section , "order" ) ) ) order = statsgroup . order stats . append ( ( section , statsgroup , order ) ) log . info ( "Found {0}, an instance of {1}, order {2}" . format ( section , statsgroup . __name__ , order ) ) # Custom stats are handled with a single instance if statsgroup . __name__ == "CustomStats" : break for section , statsgroup , _ in sorted ( stats , key = lambda x : x [ 2 ] ) : yield section , statsgroup
def integrate_converge ( self , crit = 1e-4 , verbose = True ) : """Integrates the model until model states are converging . : param crit : exit criteria for difference of iterated solutions [ default : 0.0001] : type crit : float : param bool verbose : information whether total elapsed time should be printed [ default : True ] : Example : > > > import climlab > > > model = climlab . EBM ( ) > > > model . global _ mean _ temperature ( ) Field ( 11.997968598413685) > > > model . integrate _ converge ( ) Total elapsed time is 10.0 years . > > > model . global _ mean _ temperature ( ) Field ( 14.288155406577301)"""
# implemented by m - kreuzer for varname , value in self . state . items ( ) : value_old = copy . deepcopy ( value ) self . integrate_years ( 1 , verbose = False ) while np . max ( np . abs ( value_old - value ) ) > crit : value_old = copy . deepcopy ( value ) self . integrate_years ( 1 , verbose = False ) if verbose == True : print ( "Total elapsed time is %s years." % str ( self . time [ 'days_elapsed' ] / const . days_per_year ) )
def join ( cls , root , subkey ) : """Rebuild a full declaration name from its components . for every string x , we have ` join ( split ( x ) ) = = x ` ."""
if subkey is None : return root return enums . SPLITTER . join ( ( root , subkey ) )
def nl_family ( self , value ) : """Family setter ."""
self . bytearray [ self . _get_slicers ( 0 ) ] = bytearray ( c_uint ( value or 0 ) )
def all ( self , query , ** kwargs ) : """https : / / api . slack . com / methods / search . all"""
self . url = 'https://slack.com/api/search.all' return super ( Search , self ) . search_from_url ( query , ** kwargs )
def _parse_and_sort_accept_header ( accept_header ) : """Parse and sort the accept header items . > > > _ parse _ and _ sort _ accept _ header ( ' application / json ; q = 0.5 , text / * ' ) [ ( ' text / * ' , 1.0 ) , ( ' application / json ' , 0.5 ) ]"""
return sorted ( [ _split_into_mimetype_and_priority ( x ) for x in accept_header . split ( ',' ) ] , key = lambda x : x [ 1 ] , reverse = True )
def via ( self , * args ) : """Creates an empty error to record in the stack trace"""
error = None if len ( self . errors ) > 0 : error = self . _err ( "via" , * args ) return error
def _scons_user_error ( e ) : """Handle user errors . Print out a message and a description of the error , along with the line number and routine where it occured . The file and line number will be the deepest stack frame that is not part of SCons itself ."""
global print_stacktrace etype , value , tb = sys . exc_info ( ) if print_stacktrace : traceback . print_exception ( etype , value , tb ) filename , lineno , routine , dummy = find_deepest_user_frame ( traceback . extract_tb ( tb ) ) sys . stderr . write ( "\nscons: *** %s\n" % value ) sys . stderr . write ( 'File "%s", line %d, in %s\n' % ( filename , lineno , routine ) ) sys . exit ( 2 )
def is_valid_coll ( self , coll ) : """Determines if the collection name for a request is valid ( exists ) : param str coll : The name of the collection to check : return : True if the collection is valid , false otherwise : rtype : bool"""
# if coll = = self . all _ coll : # return True return ( coll in self . warcserver . list_fixed_routes ( ) or coll in self . warcserver . list_dynamic_routes ( ) )
def rowgroupmap ( table , key , mapper , header = None , presorted = False , buffersize = None , tempdir = None , cache = True ) : """Group rows under the given key then apply ` mapper ` to yield zero or more output rows for each input group of rows ."""
return RowGroupMapView ( table , key , mapper , header = header , presorted = presorted , buffersize = buffersize , tempdir = tempdir , cache = cache )
def from_array ( array ) : """Deserialize a new Message from a given dictionary . : return : new Message instance . : rtype : Message"""
if array is None or not array : return None # end if assert_type_or_raise ( array , dict , parameter_name = "array" ) from . . receivable . peer import User , Chat from . . receivable . media import Animation , Audio , Contact , Document , Game , Location , MessageEntity , PhotoSize from . . receivable . media import Sticker , Venue , Video , VideoNote , Voice from . . receivable . payments import Invoice , SuccessfulPayment from . . receivable . passport import PassportData data = { } data [ 'message_id' ] = int ( array . get ( 'message_id' ) ) data [ 'date' ] = int ( array . get ( 'date' ) ) data [ 'chat' ] = Chat . from_array ( array . get ( 'chat' ) ) data [ 'from_peer' ] = User . from_array ( array . get ( 'from' ) ) if array . get ( 'from' ) is not None else None data [ 'forward_from' ] = User . from_array ( array . get ( 'forward_from' ) ) if array . get ( 'forward_from' ) is not None else None data [ 'forward_from_chat' ] = Chat . from_array ( array . get ( 'forward_from_chat' ) ) if array . get ( 'forward_from_chat' ) is not None else None data [ 'forward_from_message_id' ] = int ( array . get ( 'forward_from_message_id' ) ) if array . get ( 'forward_from_message_id' ) is not None else None data [ 'forward_signature' ] = u ( array . get ( 'forward_signature' ) ) if array . get ( 'forward_signature' ) is not None else None data [ 'forward_date' ] = int ( array . get ( 'forward_date' ) ) if array . get ( 'forward_date' ) is not None else None data [ 'reply_to_message' ] = Message . from_array ( array . get ( 'reply_to_message' ) ) if array . get ( 'reply_to_message' ) is not None else None data [ 'edit_date' ] = int ( array . get ( 'edit_date' ) ) if array . get ( 'edit_date' ) is not None else None data [ 'media_group_id' ] = u ( array . get ( 'media_group_id' ) ) if array . get ( 'media_group_id' ) is not None else None data [ 'author_signature' ] = u ( array . get ( 'author_signature' ) ) if array . get ( 'author_signature' ) is not None else None data [ 'text' ] = u ( array . get ( 'text' ) ) if array . get ( 'text' ) is not None else None data [ 'entities' ] = MessageEntity . from_array_list ( array . get ( 'entities' ) , list_level = 1 ) if array . get ( 'entities' ) is not None else None data [ 'caption_entities' ] = MessageEntity . from_array_list ( array . get ( 'caption_entities' ) , list_level = 1 ) if array . get ( 'caption_entities' ) is not None else None data [ 'audio' ] = Audio . from_array ( array . get ( 'audio' ) ) if array . get ( 'audio' ) is not None else None data [ 'document' ] = Document . from_array ( array . get ( 'document' ) ) if array . get ( 'document' ) is not None else None data [ 'animation' ] = Animation . from_array ( array . get ( 'animation' ) ) if array . get ( 'animation' ) is not None else None data [ 'game' ] = Game . from_array ( array . get ( 'game' ) ) if array . get ( 'game' ) is not None else None data [ 'photo' ] = PhotoSize . from_array_list ( array . get ( 'photo' ) , list_level = 1 ) if array . get ( 'photo' ) is not None else None data [ 'sticker' ] = Sticker . from_array ( array . get ( 'sticker' ) ) if array . get ( 'sticker' ) is not None else None data [ 'video' ] = Video . from_array ( array . get ( 'video' ) ) if array . get ( 'video' ) is not None else None data [ 'voice' ] = Voice . from_array ( array . get ( 'voice' ) ) if array . get ( 'voice' ) is not None else None data [ 'video_note' ] = VideoNote . from_array ( array . get ( 'video_note' ) ) if array . get ( 'video_note' ) is not None else None data [ 'caption' ] = u ( array . get ( 'caption' ) ) if array . get ( 'caption' ) is not None else None data [ 'contact' ] = Contact . from_array ( array . get ( 'contact' ) ) if array . get ( 'contact' ) is not None else None data [ 'location' ] = Location . from_array ( array . get ( 'location' ) ) if array . get ( 'location' ) is not None else None data [ 'venue' ] = Venue . from_array ( array . get ( 'venue' ) ) if array . get ( 'venue' ) is not None else None data [ 'new_chat_members' ] = User . from_array_list ( array . get ( 'new_chat_members' ) , list_level = 1 ) if array . get ( 'new_chat_members' ) is not None else None data [ 'left_chat_member' ] = User . from_array ( array . get ( 'left_chat_member' ) ) if array . get ( 'left_chat_member' ) is not None else None data [ 'new_chat_title' ] = u ( array . get ( 'new_chat_title' ) ) if array . get ( 'new_chat_title' ) is not None else None data [ 'new_chat_photo' ] = PhotoSize . from_array_list ( array . get ( 'new_chat_photo' ) , list_level = 1 ) if array . get ( 'new_chat_photo' ) is not None else None data [ 'delete_chat_photo' ] = bool ( array . get ( 'delete_chat_photo' ) ) if array . get ( 'delete_chat_photo' ) is not None else None data [ 'group_chat_created' ] = bool ( array . get ( 'group_chat_created' ) ) if array . get ( 'group_chat_created' ) is not None else None data [ 'supergroup_chat_created' ] = bool ( array . get ( 'supergroup_chat_created' ) ) if array . get ( 'supergroup_chat_created' ) is not None else None data [ 'channel_chat_created' ] = bool ( array . get ( 'channel_chat_created' ) ) if array . get ( 'channel_chat_created' ) is not None else None data [ 'migrate_to_chat_id' ] = int ( array . get ( 'migrate_to_chat_id' ) ) if array . get ( 'migrate_to_chat_id' ) is not None else None data [ 'migrate_from_chat_id' ] = int ( array . get ( 'migrate_from_chat_id' ) ) if array . get ( 'migrate_from_chat_id' ) is not None else None data [ 'pinned_message' ] = Message . from_array ( array . get ( 'pinned_message' ) ) if array . get ( 'pinned_message' ) is not None else None data [ 'invoice' ] = Invoice . from_array ( array . get ( 'invoice' ) ) if array . get ( 'invoice' ) is not None else None data [ 'successful_payment' ] = SuccessfulPayment . from_array ( array . get ( 'successful_payment' ) ) if array . get ( 'successful_payment' ) is not None else None data [ 'connected_website' ] = u ( array . get ( 'connected_website' ) ) if array . get ( 'connected_website' ) is not None else None data [ 'passport_data' ] = PassportData . from_array ( array . get ( 'passport_data' ) ) if array . get ( 'passport_data' ) is not None else None data [ '_raw' ] = array return Message ( ** data )
def debug_option ( f ) : """Configures - - debug option for CLI : param f : Callback Function to be passed to Click"""
def callback ( ctx , param , value ) : state = ctx . ensure_object ( Context ) state . debug = value return value return click . option ( '--debug' , expose_value = False , is_flag = True , envvar = "SAM_DEBUG" , help = 'Turn on debug logging to print debug message generated by SAM CLI.' , callback = callback ) ( f )
def _delete ( self , ** kwargs ) : """wrapped with delete , override that in a subclass to customize"""
requests_params = self . _handle_requests_params ( kwargs ) delete_uri = self . _meta_data [ 'uri' ] session = self . _meta_data [ 'bigip' ] . _meta_data [ 'icr_session' ] # Check the generation for match before delete force = self . _check_force_arg ( kwargs . pop ( 'force' , True ) ) if not force : self . _check_generation ( ) response = session . delete ( delete_uri , ** requests_params ) if response . status_code == 200 : self . __dict__ = { 'deleted' : True }
def pop ( self , index = - 1 ) : """Retrieve the value at * index * , remove it from the collection , and return it ."""
if index == 0 : return self . _pop_left ( ) elif index == - 1 : return self . _pop_right ( ) else : return self . _pop_middle ( index )
def _quantize_symbol ( sym , excluded_symbols = None , offline_params = None , quantized_dtype = 'int8' ) : """Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network . Parameters sym : Symbol FP32 neural network symbol . excluded _ sym _ names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized . offline _ params : list of strs Names of the parameters that users want to quantize offline . It ' s always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided . quantized _ dtype : str The quantized destination type for input data ."""
num_excluded_symbols = 0 if excluded_symbols is not None : assert isinstance ( excluded_symbols , list ) num_excluded_symbols = len ( excluded_symbols ) else : excluded_symbols = [ ] num_offline = 0 offline = [ ] if offline_params is not None : num_offline = len ( offline_params ) for k in offline_params : offline . append ( c_str ( k ) ) out = SymbolHandle ( ) check_call ( _LIB . MXQuantizeSymbol ( sym . handle , ctypes . byref ( out ) , mx_uint ( num_excluded_symbols ) , c_str_array ( excluded_symbols ) , mx_uint ( num_offline ) , c_array ( ctypes . c_char_p , offline ) , c_str ( quantized_dtype ) , ctypes . c_bool ( True ) ) ) return Symbol ( out )
def handle_annotation_list ( self , line : str , position : int , tokens : ParseResults ) -> ParseResults : """Handle statements like ` ` DEFINE ANNOTATION X AS LIST { " Y " , " Z " , . . . } ` ` . : raises : RedefinedAnnotationError"""
annotation = tokens [ 'name' ] self . raise_for_redefined_annotation ( line , position , annotation ) self . annotation_to_local [ annotation ] = set ( tokens [ 'values' ] ) return tokens
def convert_separable_convolution ( builder , layer , input_names , output_names , keras_layer ) : """Convert separable convolution layer from keras to coreml . Parameters keras _ layer : layer A keras layer object . builder : NeuralNetworkBuilder A neural network builder object ."""
_check_data_format ( keras_layer ) # Get input and output names input_name , output_name = ( input_names [ 0 ] , output_names [ 0 ] ) has_bias = keras_layer . use_bias # Get the weights from _ keras . weight_list = keras_layer . get_weights ( ) output_blob_shape = list ( filter ( None , keras_layer . output_shape ) ) output_channels = output_blob_shape [ - 1 ] # D : depth mutliplier # w [ 0 ] is ( H , W , Cin , D ) # w [ 1 ] is ( 1,1 , Cin * D , Cout ) W0 = weight_list [ 0 ] W1 = weight_list [ 1 ] height , width , input_channels , depth_mult = W0 . shape b = weight_list [ 2 ] if has_bias else None W0 = _np . reshape ( W0 , ( height , width , 1 , input_channels * depth_mult ) ) stride_height , stride_width = keras_layer . strides # Dilations if ( type ( keras_layer . dilation_rate ) is list ) or ( type ( keras_layer . dilation_rate ) is tuple ) : dilations = [ keras_layer . dilation_rate [ 0 ] , keras_layer . dilation_rate [ 1 ] ] else : dilations = [ keras_layer . dilation_rate , keras_layer . dilation_rate ] intermediate_name = output_name + '_intermin_' builder . add_convolution ( name = layer + '_step_1' , kernel_channels = 1 , output_channels = input_channels * depth_mult , height = height , width = width , stride_height = stride_height , stride_width = stride_width , border_mode = keras_layer . padding , groups = input_channels , W = W0 , b = None , has_bias = False , is_deconv = False , output_shape = None , input_name = input_name , output_name = intermediate_name , dilation_factors = dilations ) builder . add_convolution ( name = layer + '_step_2' , kernel_channels = input_channels * depth_mult , output_channels = output_channels , height = 1 , width = 1 , stride_height = 1 , stride_width = 1 , border_mode = keras_layer . padding , groups = 1 , W = W1 , b = b , has_bias = has_bias , is_deconv = False , output_shape = None , input_name = intermediate_name , output_name = output_name , dilation_factors = [ 1 , 1 ] )
def WriteFileHeader ( self , arcname = None , compress_type = None , st = None ) : """Writes a file header ."""
if not self . _stream : raise ArchiveAlreadyClosedError ( "Attempting to write to a ZIP archive that was already closed." ) self . cur_zinfo = self . _GenerateZipInfo ( arcname = arcname , compress_type = compress_type , st = st ) self . cur_file_size = 0 self . cur_compress_size = 0 if self . cur_zinfo . compress_type == zipfile . ZIP_DEFLATED : self . cur_cmpr = zlib . compressobj ( zlib . Z_DEFAULT_COMPRESSION , zlib . DEFLATED , - 15 ) else : self . cur_cmpr = None self . cur_crc = 0 if not self . _stream : raise ArchiveAlreadyClosedError ( "Attempting to write to a ZIP archive that was already closed." ) self . cur_zinfo . header_offset = self . _stream . tell ( ) # Call _ writeCheck ( self . cur _ zinfo ) to do sanity checking on zinfo structure # that we ' ve constructed . self . _zip_fd . _writecheck ( self . cur_zinfo ) # pylint : disable = protected - access # Mark ZipFile as dirty . We have to keep self . _ zip _ fd ' s internal state # coherent so that it behaves correctly when close ( ) is called . self . _zip_fd . _didModify = True # pylint : disable = protected - access # Write FileHeader now . It ' s incomplete , but CRC and uncompressed / compressed # sized will be written later in data descriptor . self . _stream . write ( self . cur_zinfo . FileHeader ( ) ) return self . _stream . GetValueAndReset ( )
def add_service ( service , zone = None , permanent = True ) : '''Add a service for zone . If zone is omitted , default zone will be used . CLI Example : . . code - block : : bash salt ' * ' firewalld . add _ service ssh To assign a service to a specific zone : . . code - block : : bash salt ' * ' firewalld . add _ service ssh my _ zone'''
if zone : cmd = '--zone={0} --add-service={1}' . format ( zone , service ) else : cmd = '--add-service={0}' . format ( service ) if permanent : cmd += ' --permanent' return __firewall_cmd ( cmd )
def string_to_version ( verstring ) : """Return a tuple of ( epoch , version , release ) from a version string This function replaces rpmUtils . miscutils . stringToVersion , see https : / / bugzilla . redhat . com / 1364504"""
# is there an epoch ? components = verstring . split ( ':' ) if len ( components ) > 1 : epoch = components [ 0 ] else : epoch = 0 remaining = components [ : 2 ] [ 0 ] . split ( '-' ) version = remaining [ 0 ] release = remaining [ 1 ] return ( epoch , version , release )
def transformChildrenFromNative ( self , clearBehavior = True ) : """Recursively transform native children to vanilla representations ."""
for childArray in self . contents . values ( ) : for child in childArray : child = child . transformFromNative ( ) child . transformChildrenFromNative ( clearBehavior ) if clearBehavior : child . behavior = None child . parentBehavior = None
def predict ( self , pairs ) : """Predicts the learned metric between input pairs . ( For now it just calls decision function ) . Returns the learned metric value between samples in every pair . It should ideally be low for similar samples and high for dissimilar samples . Parameters pairs : array - like , shape = ( n _ pairs , 2 , n _ features ) or ( n _ pairs , 2) 3D Array of pairs to predict , with each row corresponding to two points , or 2D array of indices of pairs if the metric learner uses a preprocessor . Returns y _ predicted : ` numpy . ndarray ` of floats , shape = ( n _ constraints , ) The predicted learned metric value between samples in every pair ."""
check_is_fitted ( self , [ 'threshold_' , 'transformer_' ] ) return 2 * ( - self . decision_function ( pairs ) <= self . threshold_ ) - 1
def _compile_type ( self , schema ) : """Compile type schema : plain type matching"""
# Prepare self self . compiled_type = const . COMPILED_TYPE . TYPE self . name = get_type_name ( schema ) # Error partials err_type = self . Invalid ( _ ( u'Wrong type' ) , self . name ) # Type check function if six . PY2 and schema is basestring : # Relaxed rule for Python2 basestring typecheck = lambda v : isinstance ( v , schema ) else : # Strict type check for everything else typecheck = lambda v : type ( v ) == schema # Matcher if self . matcher : def match_type ( v ) : return typecheck ( v ) , v return match_type # Validator def validate_type ( v ) : # Type check if not typecheck ( v ) : # expected = < type > , provided = < type > raise err_type ( get_type_name ( type ( v ) ) ) # Fine return v return validate_type
def grant_winsta_and_desktop ( th ) : '''Grant the token ' s user access to the current process ' s window station and desktop .'''
current_sid = win32security . GetTokenInformation ( th , win32security . TokenUser ) [ 0 ] # Add permissions for the sid to the current windows station and thread id . # This prevents windows error 0xC0000142. winsta = win32process . GetProcessWindowStation ( ) set_user_perm ( winsta , WINSTA_ALL , current_sid ) desktop = win32service . GetThreadDesktop ( win32api . GetCurrentThreadId ( ) ) set_user_perm ( desktop , DESKTOP_ALL , current_sid )
def _read_track ( chunk ) : """Retuns a list of midi events and tempo change events"""
TEMPO , MIDI = range ( 2 ) # Deviations : The running status should be reset on non midi events , but # some files contain meta events inbetween . # TODO : Offset and time signature are not considered . tempos = [ ] events = [ ] chunk = bytearray ( chunk ) deltasum = 0 status = 0 off = 0 while off < len ( chunk ) : delta , off = _var_int ( chunk , off ) deltasum += delta event_type = chunk [ off ] off += 1 if event_type == 0xFF : meta_type = chunk [ off ] off += 1 num , off = _var_int ( chunk , off ) # TODO : support offset / time signature if meta_type == 0x51 : data = chunk [ off : off + num ] if len ( data ) != 3 : raise SMFError tempo = struct . unpack ( ">I" , b"\x00" + bytes ( data ) ) [ 0 ] tempos . append ( ( deltasum , TEMPO , tempo ) ) off += num elif event_type in ( 0xF0 , 0xF7 ) : val , off = _var_int ( chunk , off ) off += val else : if event_type < 0x80 : # if < 0x80 take the type from the previous midi event off += 1 event_type = status elif event_type < 0xF0 : off += 2 status = event_type else : raise SMFError ( "invalid event" ) if event_type >> 4 in ( 0xD , 0xC ) : off -= 1 events . append ( ( deltasum , MIDI , delta ) ) return events , tempos
def rg ( self ) : """Brazilian RG , return plain numbers . Check : https : / / www . ngmatematica . com / 2014/02 / como - determinar - o - digito - verificador - do . html"""
digits = self . generator . random . sample ( range ( 0 , 9 ) , 8 ) checksum = sum ( i * digits [ i - 2 ] for i in range ( 2 , 10 ) ) last_digit = 11 - ( checksum % 11 ) if last_digit == 10 : digits . append ( 'X' ) elif last_digit == 11 : digits . append ( 0 ) else : digits . append ( last_digit ) return '' . join ( map ( str , digits ) )
def get_margin_requirement ( self ) : """Get margin requirements for intrument ( futures only ) : Retruns : margin : dict margin requirements for instrument ( all values are ` ` None ` ` for non - futures instruments )"""
contract = self . get_contract ( ) if contract . m_secType == "FUT" : return futures . get_ib_futures ( contract . m_symbol , contract . m_exchange ) # else . . . return { "exchange" : None , "symbol" : None , "description" : None , "class" : None , "intraday_initial" : None , "intraday_maintenance" : None , "overnight_initial" : None , "overnight_maintenance" : None , "currency" : None , }
def count ( self ) : """Counts the number of rows of the main dataframe"""
try : num = len ( self . df . index ) except Exception as e : self . err ( e , "Can not count data" ) return self . ok ( "Found" , num , "rows in the dataframe" )
def unjoin_domain ( username = None , password = None , domain = None , workgroup = 'WORKGROUP' , disable = False , restart = False ) : # pylint : disable = anomalous - backslash - in - string '''Unjoin a computer from an Active Directory Domain . Requires a restart . Args : username ( str ) : Username of an account which is authorized to manage computer accounts on the domain . Needs to be a fully qualified name like ` ` user @ domain . tld ` ` or ` ` domain . tld \\ user ` ` . If the domain is not specified , the passed domain will be used . If the computer account doesn ' t need to be disabled after the computer is unjoined , this can be ` ` None ` ` . password ( str ) : The password of the specified user domain ( str ) : The domain from which to unjoin the computer . Can be ` ` None ` ` workgroup ( str ) : The workgroup to join the computer to . Default is ` ` WORKGROUP ` ` . . versionadded : : 2015.8.2/2015.5.7 disable ( bool ) : ` ` True ` ` to disable the computer account in Active Directory . Default is ` ` False ` ` restart ( bool ) : ` ` True ` ` will restart the computer after successful unjoin . Default is ` ` False ` ` . . versionadded : : 2015.8.2/2015.5.7 Returns : dict : Returns a dictionary if successful , otherwise ` ` False ` ` CLI Example : . . code - block : : bash salt ' minion - id ' system . unjoin _ domain restart = True salt ' minion - id ' system . unjoin _ domain username = ' unjoinuser ' \ password = ' unjoinpassword ' disable = True \ restart = True'''
# pylint : enable = anomalous - backslash - in - string if six . PY2 : username = _to_unicode ( username ) password = _to_unicode ( password ) domain = _to_unicode ( domain ) status = get_domain_workgroup ( ) if 'Workgroup' in status : if status [ 'Workgroup' ] == workgroup : return 'Already joined to {0}' . format ( workgroup ) if username and '\\' not in username and '@' not in username : if domain : username = '{0}@{1}' . format ( username , domain ) else : return 'Must specify domain if not supplied in username' if username and password is None : return 'Must specify a password if you pass a username' NETSETUP_ACCT_DELETE = 0x4 # pylint : disable = invalid - name unjoin_options = 0x0 if disable : unjoin_options |= NETSETUP_ACCT_DELETE with salt . utils . winapi . Com ( ) : conn = wmi . WMI ( ) comp = conn . Win32_ComputerSystem ( ) [ 0 ] err = comp . UnjoinDomainOrWorkgroup ( Password = password , UserName = username , FUnjoinOptions = unjoin_options ) # you have to do this because UnjoinDomainOrWorkgroup returns a # strangely formatted value that looks like ( 0 , ) if not err [ 0 ] : err = comp . JoinDomainOrWorkgroup ( Name = workgroup ) if not err [ 0 ] : ret = { 'Workgroup' : workgroup , 'Restart' : False } if restart : ret [ 'Restart' ] = reboot ( ) return ret else : log . error ( win32api . FormatMessage ( err [ 0 ] ) . rstrip ( ) ) log . error ( 'Failed to join the computer to %s' , workgroup ) return False else : log . error ( win32api . FormatMessage ( err [ 0 ] ) . rstrip ( ) ) log . error ( 'Failed to unjoin computer from %s' , status [ 'Domain' ] ) return False
def dataset_filepath ( filename , dataset_name = None , task = None , ** kwargs ) : """Get the path of the corresponding dataset file . Parameters filename : str The name of the file . dataset _ name : str , optional The name of the dataset . Used to define a sub - directory to contain all instances of the dataset . If not given , a dataset - specific directory is not created . task : str , optional The task for which the dataset in the desired path is used for . If not given , a path for the corresponding task - agnostic directory is returned . * * kwargs : extra keyword arguments Extra keyword arguments , representing additional attributes of the dataset , are used to generate additional sub - folders on the path . For example , providing ' lang = en ' will results in a path such as ' / barn _ base _ dir / regression / lang _ en / mydataset . csv ' . Hierarchy always matches lexicographical order of keyword argument names , so ' lang = en ' and ' animal = dog ' will result in a path such as ' barn _ base _ dir / task _ name / animal _ dof / lang _ en / dset . csv ' . Returns str The path to the desired dataset file ."""
dataset_dir_path = dataset_dirpath ( dataset_name = dataset_name , task = task , ** kwargs ) return os . path . join ( dataset_dir_path , filename )
def notblocked ( page ) : """Determine if given url is a page that should be in sitemap ."""
for blocked in PAGES_TO_BLOCK : if blocked [ 0 ] != '*' : blocked = '*' + blocked rx = re . compile ( blocked . replace ( '*' , '[^$]*' ) ) if rx . match ( page ) : return False return True
def enable_backups ( self ) : """Enable Backups for this Instance . When enabled , we will automatically backup your Instance ' s data so that it can be restored at a later date . For more information on Instance ' s Backups service and pricing , see our ` Backups Page ` _ . . _ Backups Page : https : / / www . linode . com / backups"""
self . _client . post ( "{}/backups/enable" . format ( Instance . api_endpoint ) , model = self ) self . invalidate ( ) return True
def coombs_winners ( self , profile ) : """Returns an integer list that represents all possible winners of a profile under Coombs rule . : ivar Profile profile : A Profile object that represents an election profile ."""
elecType = profile . getElecType ( ) if elecType == "soc" or elecType == "csv" : return self . coombssoc_winners ( profile ) elif elecType == "toc" : return self . coombstoc_winners ( profile ) else : print ( "ERROR: unsupported profile type" ) exit ( )
def get_all ( ) : '''Return all available boot services CLI Example : . . code - block : : bash salt ' * ' service . get _ all'''
ret = set ( ) lines = glob . glob ( '/etc/init.d/*' ) for line in lines : service = line . split ( '/etc/init.d/' ) [ 1 ] # Remove README . If it ' s an enabled service , it will be added back in . if service != 'README' : ret . add ( service ) return sorted ( ret | set ( get_enabled ( ) ) )
def copy_file ( self , filepath ) : """Returns flag which says to copy rather than link a file ."""
copy_file = False try : copy_file = self . data [ filepath ] [ 'copy' ] except KeyError : return False return copy_file
def close ( self ) : '''Releasing hardware resources .'''
try : self . dut . close ( ) except Exception : logging . warning ( 'Closing DUT was not successful' ) else : logging . debug ( 'Closed DUT' )
def cmd_link_attributes ( self , args ) : '''change optional link attributes'''
link = args [ 0 ] attributes = args [ 1 ] print ( "Setting link %s attributes (%s)" % ( link , attributes ) ) self . link_attributes ( link , attributes )
def plot_sampler ( sampler , suptitle = None , labels = None , bins = 50 , plot_samples = False , plot_hist = True , plot_chains = True , burn = 0 , chain_mask = None , temp_idx = 0 , weights = None , cutoff_weight = None , cmap = 'gray_r' , hist_color = 'k' , chain_alpha = 0.1 , points = None , covs = None , colors = None , ci = [ 0.95 ] , max_hist_ticks = None , max_chain_ticks = 6 , label_chain_y = False , hide_chain_yticklabels = False , chain_ytick_pad = 2.0 , label_fontsize = None , ticklabel_fontsize = None , chain_label_fontsize = None , chain_ticklabel_fontsize = None , xticklabel_angle = 90.0 , bottom_sep = 0.075 , suptitle_space = 0.1 , fixed_height = None , fixed_width = None , l = 0.1 , r = 0.9 , t1 = None , b1 = None , t2 = 0.2 , b2 = 0.1 , ax_space = 0.1 ) : """Plot the results of MCMC sampler ( posterior and chains ) . Loosely based on triangle . py . Provides extensive options to format the plot . Parameters sampler : : py : class : ` emcee . Sampler ` instance or array , ( ` n _ temps ` , ` n _ chains ` , ` n _ samp ` , ` n _ dim ` ) , ( ` n _ chains ` , ` n _ samp ` , ` n _ dim ` ) or ( ` n _ samp ` , ` n _ dim ` ) The sampler to plot the chains / marginals of . Can also be an array of samples which matches the shape of the ` chain ` attribute that would be present in a : py : class : ` emcee . Sampler ` instance . suptitle : str , optional The figure title to place at the top . Default is no title . labels : list of str , optional The labels to use for each of the free parameters . Default is to leave the axes unlabeled . bins : int , optional Number of bins to use for the histograms . Default is 50. plot _ samples : bool , optional If True , the samples are plotted as individual points . Default is False . plot _ hist : bool , optional If True , histograms are plotted . Default is True . plot _ chains : bool , optional If True , plot the sampler chains at the bottom . Default is True . burn : int , optional The number of samples to burn before making the marginal histograms . Default is zero ( use all samples ) . chain _ mask : ( index ) array , optional Mask identifying the chains to keep before plotting , in case there are bad chains . Default is to use all chains . temp _ idx : int , optional Index of the temperature to plot when plotting a : py : class : ` emcee . PTSampler ` . Default is 0 ( samples from the posterior ) . weights : array , ( ` n _ temps ` , ` n _ chains ` , ` n _ samp ` ) , ( ` n _ chains ` , ` n _ samp ` ) or ( ` n _ samp ` , ) , optional The weight for each sample . This is useful for post - processing the output from MultiNest sampling , for instance . Default is to not weight the samples . cutoff _ weight : float , optional If ` weights ` and ` cutoff _ weight ` are present , points with ` weights < cutoff _ weight * weights . max ( ) ` will be excluded . Default is to plot all points . cmap : str , optional The colormap to use for the histograms . Default is ' gray _ r ' . hist _ color : str , optional The color to use for the univariate histograms . Default is ' k ' . chain _ alpha : float , optional The transparency to use for the plots of the individual chains . Setting this to something low lets you better visualize what is going on . Default is 0.1. points : array , ( ` D ` , ) or ( ` N ` , ` D ` ) , optional Array of point ( s ) to plot onto each marginal and chain . Default is None . covs : array , ( ` D ` , ` D ` ) or ( ` N ` , ` D ` , ` D ` ) , optional Covariance matrix or array of covariance matrices to plot onto each marginal . If you do not want to plot a covariance matrix for a specific point , set its corresponding entry to ` None ` . Default is to not plot confidence ellipses for any points . colors : array of str , ( ` N ` , ) , optional The colors to use for the points in ` points ` . Default is to use the standard matplotlib RGBCMYK cycle . ci : array , ( ` num _ ci ` , ) , optional List of confidence intervals to plot for each non - ` None ` entry in ` covs ` . Default is 0.95 ( just plot the 95 percent confidence interval ) . max _ hist _ ticks : int , optional The maximum number of ticks for the histogram plots . Default is None ( no limit ) . max _ chain _ ticks : int , optional The maximum number of y - axis ticks for the chain plots . Default is 6. label _ chain _ y : bool , optional If True , the chain plots will have y axis labels . Default is False . hide _ chain _ yticklabels : bool , optional If True , hide the y axis tick labels for the chain plots . Default is False ( show y tick labels ) . chain _ ytick _ pad : float , optional The padding ( in points ) between the y - axis tick labels and the axis for the chain plots . Default is 2.0. label _ fontsize : float , optional The font size ( in points ) to use for the axis labels . Default is ` axes . labelsize ` . ticklabel _ fontsize : float , optional The font size ( in points ) to use for the axis tick labels . Default is ` xtick . labelsize ` . chain _ label _ fontsize : float , optional The font size ( in points ) to use for the labels of the chain axes . Default is ` axes . labelsize ` . chain _ ticklabel _ fontsize : float , optional The font size ( in points ) to use for the chain axis tick labels . Default is ` xtick . labelsize ` . xticklabel _ angle : float , optional The angle to rotate the x tick labels , in degrees . Default is 90. bottom _ sep : float , optional The separation ( in relative figure units ) between the chains and the marginals . Default is 0.075. suptitle _ space : float , optional The amount of space ( in relative figure units ) to leave for a figure title . Default is 0.1. fixed _ height : float , optional The desired figure height ( in inches ) . Default is to automatically adjust based on ` fixed _ width ` to make the subplots square . fixed _ width : float , optional The desired figure width ( in inches ) . Default is ` figure . figsize [ 0 ] ` . l : float , optional The location ( in relative figure units ) of the left margin . Default is 0.1. r : float , optional The location ( in relative figure units ) of the right margin . Default is 0.9. t1 : float , optional The location ( in relative figure units ) of the top of the grid of histograms . Overrides ` suptitle _ space ` if present . b1 : float , optional The location ( in relative figure units ) of the bottom of the grid of histograms . Overrides ` bottom _ sep ` if present . Defaults to 0.1 if ` plot _ chains ` is False . t2 : float , optional The location ( in relative figure units ) of the top of the grid of chain plots . Default is 0.2. b2 : float , optional The location ( in relative figure units ) of the bottom of the grid of chain plots . Default is 0.1. ax _ space : float , optional The ` w _ space ` and ` h _ space ` to use ( in relative figure units ) . Default is 0.1."""
masked_weights = None if points is not None : points = scipy . atleast_2d ( points ) if covs is not None and len ( covs ) != len ( points ) : raise ValueError ( "If covariance matrices are provided, len(covs) must equal len(points)!" ) elif covs is None : covs = [ None , ] * len ( points ) if colors is None : c_cycle = itertools . cycle ( [ 'b' , 'g' , 'r' , 'c' , 'm' , 'y' , 'k' ] ) colors = [ c_cycle . next ( ) for p in points ] # Create axes : try : k = sampler . flatchain . shape [ - 1 ] except AttributeError : # Assumes array input is only case where there is no " flatchain " attribute . k = sampler . shape [ - 1 ] if labels is None : labels = [ '' ] * k # Set up geometry : # plot _ chains = # True : False : # We retain support for the original suptitle _ space keyword , but can # override with t1 as needed : if t1 is None : t1 = 1 - suptitle_space # We retain support for the original bottom _ sep keyword , but can override # with b1 as needed : if b1 is None : if plot_chains : b1 = t2 + bottom_sep else : b1 = 0.1 if fixed_height is None and fixed_width is None : # Default : use matplotlib ' s default width , handle remaining parameters # with the fixed width case below : fixed_width = matplotlib . rcParams [ 'figure.figsize' ] [ 0 ] if fixed_height is None and fixed_width is not None : # Only width specified , compute height to yield square histograms : fixed_height = fixed_width * ( r - l ) / ( t1 - b1 ) elif fixed_height is not None and fixed_width is None : # Only height specified , compute width to yield square histograms fixed_width = fixed_height * ( t1 - b1 ) / ( r - l ) # Otherwise width and height are fixed , and we may not have square # histograms , at the user ' s discretion . wspace = ax_space hspace = ax_space # gs1 is the histograms , gs2 is the chains : f = plt . figure ( figsize = ( fixed_width , fixed_height ) ) gs1 = mplgs . GridSpec ( k , k ) gs1 . update ( bottom = b1 , top = t1 , left = l , right = r , wspace = wspace , hspace = hspace ) if plot_chains : gs2 = mplgs . GridSpec ( 1 , k ) gs2 . update ( bottom = b2 , top = t2 , left = l , right = r , wspace = wspace , hspace = hspace ) axes = [ ] # j is the row , i is the column . for j in xrange ( 0 , k + int ( plot_chains ) ) : row = [ ] for i in xrange ( 0 , k ) : if i > j : row . append ( None ) else : sharey = row [ - 1 ] if i > 0 and i < j and j < k else None sharex = axes [ - 1 ] [ i ] if j > i and j < k else ( row [ - 1 ] if i > 0 and j == k else None ) gs = gs1 [ j , i ] if j < k else gs2 [ : , i ] row . append ( f . add_subplot ( gs , sharey = sharey , sharex = sharex ) ) if j < k and ticklabel_fontsize is not None : row [ - 1 ] . tick_params ( labelsize = ticklabel_fontsize ) elif j >= k and chain_ticklabel_fontsize is not None : row [ - 1 ] . tick_params ( labelsize = chain_ticklabel_fontsize ) axes . append ( row ) axes = scipy . asarray ( axes ) # Update axes with the data : if isinstance ( sampler , emcee . EnsembleSampler ) : if chain_mask is None : chain_mask = scipy . ones ( sampler . chain . shape [ 0 ] , dtype = bool ) flat_trace = sampler . chain [ chain_mask , burn : , : ] flat_trace = flat_trace . reshape ( ( - 1 , k ) ) elif isinstance ( sampler , emcee . PTSampler ) : if chain_mask is None : chain_mask = scipy . ones ( sampler . nwalkers , dtype = bool ) flat_trace = sampler . chain [ temp_idx , chain_mask , burn : , : ] flat_trace = flat_trace . reshape ( ( - 1 , k ) ) elif isinstance ( sampler , scipy . ndarray ) : if sampler . ndim == 4 : if chain_mask is None : chain_mask = scipy . ones ( sampler . shape [ 1 ] , dtype = bool ) flat_trace = sampler [ temp_idx , chain_mask , burn : , : ] flat_trace = flat_trace . reshape ( ( - 1 , k ) ) if weights is not None : weights = weights [ temp_idx , chain_mask , burn : ] weights = weights . ravel ( ) elif sampler . ndim == 3 : if chain_mask is None : chain_mask = scipy . ones ( sampler . shape [ 0 ] , dtype = bool ) flat_trace = sampler [ chain_mask , burn : , : ] flat_trace = flat_trace . reshape ( ( - 1 , k ) ) if weights is not None : weights = weights [ chain_mask , burn : ] weights = weights . ravel ( ) elif sampler . ndim == 2 : flat_trace = sampler [ burn : , : ] flat_trace = flat_trace . reshape ( ( - 1 , k ) ) if weights is not None : weights = weights [ burn : ] weights = weights . ravel ( ) if cutoff_weight is not None and weights is not None : mask = weights >= cutoff_weight * weights . max ( ) flat_trace = flat_trace [ mask , : ] masked_weights = weights [ mask ] else : masked_weights = weights else : raise ValueError ( "Unknown sampler class: %s" % ( type ( sampler ) , ) ) # j is the row , i is the column . for i in xrange ( 0 , k ) : axes [ i , i ] . clear ( ) if plot_hist : axes [ i , i ] . hist ( flat_trace [ : , i ] , bins = bins , color = hist_color , weights = masked_weights , normed = True , histtype = 'stepfilled' ) if plot_samples : axes [ i , i ] . plot ( flat_trace [ : , i ] , scipy . zeros_like ( flat_trace [ : , i ] ) , ',' , alpha = 0.1 ) if points is not None : # axvline can only take a scalar x , so we have to loop : for p , c , cov in zip ( points , colors , covs ) : axes [ i , i ] . axvline ( x = p [ i ] , linewidth = 3 , color = c ) if cov is not None : xlim = axes [ i , i ] . get_xlim ( ) i_grid = scipy . linspace ( xlim [ 0 ] , xlim [ 1 ] , 100 ) axes [ i , i ] . plot ( i_grid , scipy . stats . norm . pdf ( i_grid , loc = p [ i ] , scale = scipy . sqrt ( cov [ i , i ] ) ) , c , linewidth = 3.0 ) axes [ i , i ] . set_xlim ( xlim ) if i == k - 1 : axes [ i , i ] . set_xlabel ( labels [ i ] , fontsize = label_fontsize ) plt . setp ( axes [ i , i ] . xaxis . get_majorticklabels ( ) , rotation = xticklabel_angle ) if i < k - 1 : plt . setp ( axes [ i , i ] . get_xticklabels ( ) , visible = False ) plt . setp ( axes [ i , i ] . get_yticklabels ( ) , visible = False ) for j in xrange ( i + 1 , k ) : axes [ j , i ] . clear ( ) if plot_hist : ct , x , y , im = axes [ j , i ] . hist2d ( flat_trace [ : , i ] , flat_trace [ : , j ] , bins = bins , cmap = cmap , weights = masked_weights ) if plot_samples : axes [ j , i ] . plot ( flat_trace [ : , i ] , flat_trace [ : , j ] , ',' , alpha = 0.1 ) if points is not None : for p , c , cov in zip ( points , colors , covs ) : axes [ j , i ] . plot ( p [ i ] , p [ j ] , 'o' , color = c ) if cov is not None : Sigma = scipy . asarray ( [ [ cov [ i , i ] , cov [ i , j ] ] , [ cov [ j , i ] , cov [ j , j ] ] ] , dtype = float ) lam , v = scipy . linalg . eigh ( Sigma ) chi2 = [ - scipy . log ( 1.0 - cival ) * 2.0 for cival in ci ] a = [ 2.0 * scipy . sqrt ( chi2val * lam [ - 1 ] ) for chi2val in chi2 ] b = [ 2.0 * scipy . sqrt ( chi2val * lam [ - 2 ] ) for chi2val in chi2 ] ang = scipy . arctan2 ( v [ 1 , - 1 ] , v [ 0 , - 1 ] ) for aval , bval in zip ( a , b ) : ell = mplp . Ellipse ( [ p [ i ] , p [ j ] ] , aval , bval , angle = scipy . degrees ( ang ) , facecolor = 'none' , edgecolor = c , linewidth = 3 ) axes [ j , i ] . add_artist ( ell ) # axes [ j , i ] . plot ( points [ i ] , points [ j ] , ' o ' ) # xmid = 0.5 * ( x [ 1 : ] + x [ : - 1 ] ) # ymid = 0.5 * ( y [ 1 : ] + y [ : - 1 ] ) # axes [ j , i ] . contour ( xmid , ymid , ct . T , colors = ' k ' ) if j < k - 1 : plt . setp ( axes [ j , i ] . get_xticklabels ( ) , visible = False ) if i != 0 : plt . setp ( axes [ j , i ] . get_yticklabels ( ) , visible = False ) if i == 0 : axes [ j , i ] . set_ylabel ( labels [ j ] , fontsize = label_fontsize ) if j == k - 1 : axes [ j , i ] . set_xlabel ( labels [ i ] , fontsize = label_fontsize ) plt . setp ( axes [ j , i ] . xaxis . get_majorticklabels ( ) , rotation = xticklabel_angle ) if plot_chains : axes [ - 1 , i ] . clear ( ) if isinstance ( sampler , emcee . EnsembleSampler ) : axes [ - 1 , i ] . plot ( sampler . chain [ : , : , i ] . T , alpha = chain_alpha ) elif isinstance ( sampler , emcee . PTSampler ) : axes [ - 1 , i ] . plot ( sampler . chain [ temp_idx , : , : , i ] . T , alpha = chain_alpha ) else : if sampler . ndim == 4 : axes [ - 1 , i ] . plot ( sampler [ temp_idx , : , : , i ] . T , alpha = chain_alpha ) elif sampler . ndim == 3 : axes [ - 1 , i ] . plot ( sampler [ : , : , i ] . T , alpha = chain_alpha ) elif sampler . ndim == 2 : axes [ - 1 , i ] . plot ( sampler [ : , i ] . T , alpha = chain_alpha ) # Plot the weights on top of the chains : if weights is not None : a_wt = axes [ - 1 , i ] . twinx ( ) a_wt . plot ( weights , alpha = chain_alpha , linestyle = '--' , color = 'r' ) plt . setp ( a_wt . yaxis . get_majorticklabels ( ) , visible = False ) a_wt . yaxis . set_ticks_position ( 'none' ) # Plot the cutoff weight as a horizontal line and the first sample # which is included as a vertical bar . Note that this won ' t be quite # the right behavior if the weights are not roughly monotonic . if cutoff_weight is not None : a_wt . axhline ( cutoff_weight * weights . max ( ) , linestyle = '-' , color = 'r' ) wi , = scipy . where ( weights >= cutoff_weight * weights . max ( ) ) a_wt . axvline ( wi [ 0 ] , linestyle = '-' , color = 'r' ) if burn > 0 : axes [ - 1 , i ] . axvline ( burn , color = 'r' , linewidth = 3 ) if points is not None : for p , c in zip ( points , colors ) : axes [ - 1 , i ] . axhline ( y = p [ i ] , linewidth = 3 , color = c ) # Reset the xlim since it seems to get messed up : axes [ - 1 , i ] . set_xlim ( left = 0 ) # try : # [ axes [ - 1 , i ] . axhline ( y = pt , linewidth = 3 ) for pt in points [ i ] ] # except TypeError : # axes [ - 1 , i ] . axhline ( y = points [ i ] , linewidth = 3) if label_chain_y : axes [ - 1 , i ] . set_ylabel ( labels [ i ] , fontsize = chain_label_fontsize ) axes [ - 1 , i ] . set_xlabel ( 'step' , fontsize = chain_label_fontsize ) plt . setp ( axes [ - 1 , i ] . xaxis . get_majorticklabels ( ) , rotation = xticklabel_angle ) for tick in axes [ - 1 , i ] . get_yaxis ( ) . get_major_ticks ( ) : tick . set_pad ( chain_ytick_pad ) tick . label1 = tick . _get_text1 ( ) for i in xrange ( 0 , k ) : if max_hist_ticks is not None : axes [ k - 1 , i ] . xaxis . set_major_locator ( plt . MaxNLocator ( nbins = max_hist_ticks - 1 ) ) axes [ i , 0 ] . yaxis . set_major_locator ( plt . MaxNLocator ( nbins = max_hist_ticks - 1 ) ) if plot_chains and max_chain_ticks is not None : axes [ k , i ] . yaxis . set_major_locator ( plt . MaxNLocator ( nbins = max_chain_ticks - 1 ) ) axes [ k , i ] . xaxis . set_major_locator ( plt . MaxNLocator ( nbins = max_chain_ticks - 1 ) ) if plot_chains and hide_chain_yticklabels : plt . setp ( axes [ k , i ] . get_yticklabels ( ) , visible = False ) if suptitle is not None : f . suptitle ( suptitle ) f . canvas . draw ( ) return f
def remove_fact ( self , fact_id ) : """Remove fact from storage by it ' s ID"""
self . start_transaction ( ) fact = self . __get_fact ( fact_id ) if fact : self . __remove_fact ( fact_id ) self . facts_changed ( ) self . end_transaction ( )
def inherited_labels ( cls ) : """Return list of labels from nodes class hierarchy . : return : list"""
return [ scls . __label__ for scls in cls . mro ( ) if hasattr ( scls , '__label__' ) and not hasattr ( scls , '__abstract_node__' ) ]
def get_translated_file ( fapi , file_uri , locale , retrieval_type , include_original_strings , use_cache , cache_dir = None ) : """Returns a translated file from smartling"""
file_data = None cache_name = str ( file_uri ) + "." + str ( locale ) + "." + str ( retrieval_type ) + "." + str ( include_original_strings ) cache_file = os . path . join ( cache_dir , sha1 ( cache_name ) ) if cache_dir else None if use_cache and os . path . exists ( cache_file ) : print ( "Using cache file %s for %s translation file: %s" % ( cache_file , locale , file_uri ) ) file_data = read_from_file ( cache_file ) elif not use_cache : ( file_data , code ) = fapi . get ( file_uri , locale , retrievalType = retrieval_type , includeOriginalStrings = include_original_strings ) file_data = str ( file_data ) . strip ( ) if cache_file and code == 200 and len ( file_data ) > 0 : print ( "Chaching to %s for %s translation file: %s" % ( cache_file , locale , file_uri ) ) write_to_file ( cache_file , file_data ) if not file_data or len ( file_data ) == 0 : print ( "%s translation not found for %s" % ( locale , file_uri ) ) return None return file_data
def insertRow ( self , row , item , parent ) : """Insert a single item before the given row in the child items of the parent specified . : param row : the index where the rows get inserted : type row : int : param item : the item to insert . When creating the item , make sure it ' s parent is None . If not it will defeat the purpose of this function . : type item : : class : ` TreeItem ` : param parent : the parent : type parent : : class : ` QtCore . QModelIndex ` : returns : Returns true if the row is inserted ; otherwise returns false . : rtype : bool : raises : None"""
item . set_model ( self ) if parent . isValid ( ) : parentitem = parent . internalPointer ( ) else : parentitem = self . _root self . beginInsertRows ( parent , row , row ) item . _parent = parentitem if parentitem : parentitem . childItems . insert ( row , item ) self . endInsertRows ( ) return True
def get_specific_nodes ( self , node , names ) : """Given a node and a sequence of strings in ` names ` , return a dictionary containing the names as keys and child ` ELEMENT _ NODEs ` , that have a ` tagName ` equal to the name ."""
nodes = [ ( x . tagName , x ) for x in node . childNodes if x . nodeType == x . ELEMENT_NODE and x . tagName in names ] return dict ( nodes )
def remove_access ( self , ** kwargs ) : # noqa : E501 """Removes the specified ids from the given dashboards ' ACL # noqa : E501 # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . remove _ access ( async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param list [ ACL ] body : : return : None If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . remove_access_with_http_info ( ** kwargs ) # noqa : E501 else : ( data ) = self . remove_access_with_http_info ( ** kwargs ) # noqa : E501 return data
def universe ( self ) : """Data universe available at the current time . Universe contains the data passed in when creating a Backtest . Use this data to determine strategy logic ."""
# avoid windowing every time # if calling and on same date return # cached value if self . now == self . _last_chk : return self . _funiverse else : self . _last_chk = self . now self . _funiverse = self . _universe . loc [ : self . now ] return self . _funiverse
def get_attribute ( self , obj , attribute ) : """Returns single object attribute . : param obj : requested object . : param attribute : requested attribute to query . : returns : returned value . : rtype : str"""
raw_return = self . send_command_return ( obj , attribute , '?' ) if len ( raw_return ) > 2 and raw_return [ 0 ] == '"' and raw_return [ - 1 ] == '"' : return raw_return [ 1 : - 1 ] return raw_return
def run_role ( self , name , options = None , content = None ) : """Generate a role node . options : dict key value arguments . content : content content of the directive Returns node : docutil Node Node generated by the arguments ."""
if options is None : options = { } if content is None : content = [ ] role_fn , _ = role ( name , self . language , self . node . line , self . reporter ) vec , _ = role_fn ( name , rawtext = str ( content ) , text = str ( content ) , lineno = self . node . line , inliner = self . memo . inliner , options = options , content = content ) assert len ( vec ) == 1 , 'only support one list in role' return vec [ 0 ]
def set_topic_partitions ( self , * topics ) : """Set the topic / partitions to consume Optionally specify offsets to start from Accepts types : * str ( utf - 8 ) : topic name ( will consume all available partitions ) * tuple : ( topic , partition ) * dict : - { topic : partition } - { topic : [ partition list ] } - { topic : ( partition tuple , ) } Optionally , offsets can be specified directly : * tuple : ( topic , partition , offset ) * dict : { ( topic , partition ) : offset , . . . } Example : . . code : : python kafka = KafkaConsumer ( ) # Consume topic1 - all ; topic2 - partition2 ; topic3 - partition0 kafka . set _ topic _ partitions ( " topic1 " , ( " topic2 " , 2 ) , { " topic3 " : 0 } ) # Consume topic1-0 starting at offset 12 , and topic2-1 at offset 45 # using tuples - - kafka . set _ topic _ partitions ( ( " topic1 " , 0 , 12 ) , ( " topic2 " , 1 , 45 ) ) # using dict - - kafka . set _ topic _ partitions ( { ( " topic1 " , 0 ) : 12 , ( " topic2 " , 1 ) : 45 } )"""
self . _topics = [ ] self . _client . load_metadata_for_topics ( ) # Setup offsets self . _offsets = OffsetsStruct ( fetch = dict ( ) , commit = dict ( ) , highwater = dict ( ) , task_done = dict ( ) ) # Handle different topic types for arg in topics : # Topic name str - - all partitions if isinstance ( arg , ( six . string_types , six . binary_type ) ) : topic = kafka_bytestring ( arg ) for partition in self . _client . get_partition_ids_for_topic ( topic ) : self . _consume_topic_partition ( topic , partition ) # ( topic , partition [ , offset ] ) tuple elif isinstance ( arg , tuple ) : topic = kafka_bytestring ( arg [ 0 ] ) partition = arg [ 1 ] self . _consume_topic_partition ( topic , partition ) if len ( arg ) == 3 : offset = arg [ 2 ] self . _offsets . fetch [ ( topic , partition ) ] = offset # { topic : partitions , . . . } dict elif isinstance ( arg , dict ) : for key , value in six . iteritems ( arg ) : # key can be string ( a topic ) if isinstance ( key , ( six . string_types , six . binary_type ) ) : topic = kafka_bytestring ( key ) # topic : partition if isinstance ( value , int ) : self . _consume_topic_partition ( topic , value ) # topic : [ partition1 , partition2 , . . . ] elif isinstance ( value , ( list , tuple ) ) : for partition in value : self . _consume_topic_partition ( topic , partition ) else : raise KafkaConfigurationError ( 'Unknown topic type ' '(dict key must be int or list/tuple of ints)' ) # ( topic , partition ) : offset elif isinstance ( key , tuple ) : topic = kafka_bytestring ( key [ 0 ] ) partition = key [ 1 ] self . _consume_topic_partition ( topic , partition ) self . _offsets . fetch [ ( topic , partition ) ] = value else : raise KafkaConfigurationError ( 'Unknown topic type (%s)' % type ( arg ) ) # If we have a consumer group , try to fetch stored offsets if self . _config [ 'group_id' ] : self . _get_commit_offsets ( ) # Update missing fetch / commit offsets for topic_partition in self . _topics : # Commit offsets default is None if topic_partition not in self . _offsets . commit : self . _offsets . commit [ topic_partition ] = None # Skip if we already have a fetch offset from user args if topic_partition not in self . _offsets . fetch : # Fetch offsets default is ( 1 ) commit if self . _offsets . commit [ topic_partition ] is not None : self . _offsets . fetch [ topic_partition ] = self . _offsets . commit [ topic_partition ] # or ( 2 ) auto reset else : self . _offsets . fetch [ topic_partition ] = self . _reset_partition_offset ( topic_partition ) # highwater marks ( received from server on fetch response ) # and task _ done ( set locally by user ) # should always get initialized to None self . _reset_highwater_offsets ( ) self . _reset_task_done_offsets ( ) # Reset message iterator in case we were in the middle of one self . _reset_message_iterator ( )
def query_starts_with ( query , prefixes ) : """Check if the query starts with any item from * prefixes * ."""
prefixes = [ prefix . lower ( ) for prefix in prefixes ] formatted_sql = sqlparse . format ( query . lower ( ) , strip_comments = True ) return bool ( formatted_sql ) and formatted_sql . split ( ) [ 0 ] in prefixes
def make_splice_junction_df ( fn , type = 'gene' ) : """Read the Gencode gtf file and make a pandas dataframe describing the splice junctions Parameters filename : str of filename Filename of the Gencode gtf file Returns df : pandas . DataFrame Dataframe of splice junctions with the following columns ' gene ' , ' chrom ' , ' start ' , ' end ' , ' strand ' , ' chrom : start ' , ' chrom : end ' , ' donor ' , ' acceptor ' , ' intron '"""
import itertools as it import HTSeq import numpy as np # GFF _ Reader has an option for end _ included . However , I think it is # backwards . So if your gtf is end - inclusive , you want the default # ( end _ included = False ) . With this , one will NOT be subtracted from the end # coordinate . gffI = it . islice ( HTSeq . GFF_Reader ( fn ) , None ) juncL = [ ] eof = False entry = gffI . next ( ) count = 1 last_count = 1 while not eof : if entry . type == 'transcript' : exonL = [ ] entry = gffI . next ( ) count += 1 gene = entry . attr [ 'gene_id' ] strand = entry . iv . strand while not eof and entry . type != 'transcript' : if entry . type == 'exon' : exonL . append ( entry ) try : entry = gffI . next ( ) count += 1 except StopIteration : eof = True # The gencode gtf file has one based , end inclusive coordinates for # exons . HTSeq represents intervals as zero based , end exclusive . # We need one - based , end inclusive to compare with STAR output . if len ( exonL ) > 1 : chrom = exonL [ 0 ] . iv . chrom # On the minus strand , order of exons in gtf file is reversed . if strand == '-' : exonL . reverse ( ) # We take the exclusive end of the exon intervals and add one to # make the one - based start of the intron . startL = [ x . iv . end + 1 for x in exonL [ : - 1 ] ] # The zero - based inclusive start of the exon is the one - based # inclusive end of the intron . endL = [ x . iv . start for x in exonL [ 1 : ] ] for i in range ( len ( startL ) ) : start = startL [ i ] end = endL [ i ] jxn = '{0}:{1}-{2}:{3}' . format ( chrom , start , end , strand ) chrstart = '{}:{}' . format ( chrom , start ) chrend = '{}:{}' . format ( chrom , end ) donor = _gencode_donor ( chrom , start , end , strand ) acceptor = _gencode_acceptor ( chrom , start , end , strand ) intron = '{}:{}-{}' . format ( chrom , start , end ) juncL . append ( [ jxn , gene , chrom , str ( start ) , str ( end ) , strand , chrstart , chrend , donor , acceptor , intron ] ) else : try : entry = gffI . next ( ) count += 1 except StopIteration : eof = True last_count += 1 header = [ 'gene' , 'chrom' , 'start' , 'end' , 'strand' , 'chrom:start' , 'chrom:end' , 'donor' , 'acceptor' , 'intron' ] juncA = np . array ( juncL ) df = pd . DataFrame ( juncA [ : , 1 : ] , index = juncA [ : , 0 ] , columns = header ) . drop_duplicates ( ) df [ 'start' ] = df . start . astype ( int ) df [ 'end' ] = df . end . astype ( int ) return df
def upstream ( self , f , n = 1 ) : """find n upstream features where upstream is determined by the strand of the query Feature f Overlapping features are not considered . f : a Feature object n : the number of features to return"""
if f . strand == - 1 : return self . right ( f , n ) return self . left ( f , n )
def combine_hex ( data ) : '''Combine list of integer values to one big integer'''
output = 0x00 for i , value in enumerate ( reversed ( data ) ) : output |= ( value << i * 8 ) return output
def getfield ( self , pkt , s ) : """If the decryption of the content did not fail with a CipherError , we begin a loop on the clear content in order to get as much messages as possible , of the type advertised in the record header . This is notably important for several TLS handshake implementations , which may for instance pack a server _ hello , a certificate , a server _ key _ exchange and a server _ hello _ done , all in one record . Each parsed message may update the TLS context through their method . post _ dissection _ tls _ session _ update ( ) . If the decryption failed with a CipherError , presumably because we missed the session keys , we signal it by returning a _ TLSEncryptedContent packet which simply contains the ciphered data ."""
tmp_len = self . length_from ( pkt ) lst = [ ] ret = b"" remain = s if tmp_len is not None : remain , ret = s [ : tmp_len ] , s [ tmp_len : ] if remain == b"" : if ( ( ( pkt . tls_session . tls_version or 0x0303 ) > 0x0200 ) and hasattr ( pkt , "type" ) and pkt . type == 23 ) : return ret , [ TLSApplicationData ( data = b"" ) ] else : return ret , [ Raw ( load = b"" ) ] if False in six . itervalues ( pkt . tls_session . rcs . cipher . ready ) : return ret , _TLSEncryptedContent ( remain ) else : while remain : raw_msg = remain p = self . m2i ( pkt , remain ) if Padding in p : pad = p [ Padding ] remain = pad . load del ( pad . underlayer . payload ) if len ( remain ) != 0 : raw_msg = raw_msg [ : - len ( remain ) ] else : remain = b"" if isinstance ( p , _GenericTLSSessionInheritance ) : if not p . tls_session . frozen : p . post_dissection_tls_session_update ( raw_msg ) lst . append ( p ) return remain + ret , lst
def process_app_config_section ( config , app_config ) : """Processes the app section from a configuration data dict . : param config : The config reference of the object that will hold the configuration data from the config _ data . : param app _ config : App section from a config data dict ."""
if 'addresses' in app_config : config . app [ 'addresses' ] = app_config [ 'addresses' ] if 'component' in app_config : config . app [ 'component' ] = app_config [ 'component' ] if 'data' in app_config : if 'sources' in app_config [ 'data' ] : config . app [ 'data' ] [ 'sources' ] = app_config [ 'data' ] [ 'sources' ] if 'id' in app_config : config . app [ 'id' ] = app_config [ 'id' ] if 'login' in app_config : if 'urls' in app_config [ 'login' ] : for url in app_config [ 'login' ] [ 'urls' ] : config . app [ 'login' ] [ 'urls' ] [ url [ 'name' ] ] = url [ 'value' ] if 'pythonpath' in app_config : config . app [ 'pythonpath' ] = app_config [ 'pythonpath' ] if 'port' in app_config : config . app [ 'port' ] = app_config [ 'port' ] if 'process' in app_config : if 'num_processes' in app_config [ 'process' ] : config . app [ 'process' ] [ 'num_processes' ] = app_config [ 'process' ] [ 'num_processes' ] if 'url_root_path' in app_config : root_url = app_config [ 'url_root_path' ] . strip ( ) if root_url [ 0 ] == "/" : root_url = root_url [ 1 : ] if root_url == "" : root_url = None config . app [ 'url_root_path' ] = root_url if 'settings' in app_config : config . app [ 'settings' ] = app_config [ 'settings' ] if 'socket' in app_config : config . app [ 'socket' ] = app_config [ 'socket' ] if 'static_path' in app_config : config . app [ 'static_path' ] = app_config [ 'static_path' ] if 'static_url_prefix' in app_config : config . app [ 'static_url_prefix' ] = app_config [ 'static_url_prefix' ] if 'type' in app_config : config . app [ 'type' ] = app_config [ 'type' ] if 'types' in app_config : for app_type in app_config [ 'types' ] : app_type [ 'launcher' ] = get_config_from_package ( app_type [ 'launcher' ] ) config . app [ 'types' ] [ app_type [ 'name' ] ] = app_type if 'wait_before_shutdown' in app_config : config . app [ 'wait_before_shutdown' ] = app_config [ 'wait_before_shutdown' ]
def getAllReadGroups ( self ) : """Get all read groups in a read group set"""
for dataset in self . getAllDatasets ( ) : iterator = self . _client . search_read_group_sets ( dataset_id = dataset . id ) for readGroupSet in iterator : readGroupSet = self . _client . get_read_group_set ( readGroupSet . id ) for readGroup in readGroupSet . read_groups : yield readGroup . id
def make_driveritem_deviceitem_devicename ( device_name , condition = 'is' , negate = False , preserve_case = False ) : """Create a node for DriverItem / DeviceItem / DeviceName : return : A IndicatorItem represented as an Element node"""
document = 'DriverItem' search = 'DriverItem/DeviceItem/DeviceName' content_type = 'string' content = device_name ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case ) return ii_node
def _setup_schema ( Base , session ) : """Create a function which adds ` _ _ marshmallow _ _ ` attribute to all signal models ."""
def create_schema_class ( m ) : if hasattr ( m , 'send_signalbus_message' ) : # Signal models should not use the SQLAlchemy session . class Meta ( object ) : model = m else : class Meta ( object ) : model = m sqla_session = session schema_class_name = '%sSchema' % m . __name__ return type ( schema_class_name , ( ModelSchema , ) , { 'Meta' : Meta } ) def setup_schema_fn ( ) : for model in Base . _decl_class_registry . values ( ) : if hasattr ( model , '__tablename__' ) : if model . __name__ . endswith ( "Schema" ) : raise ModelConversionError ( 'Unexpected model name: "{}". ' 'For safety, _setup_schema() can not be used when a ' 'model class ends with "Schema".' . format ( model . __name__ ) ) schema_class = getattr ( model , '__marshmallow__' , None ) if schema_class is None : schema_class = model . __marshmallow__ = create_schema_class ( model ) if hasattr ( model , 'send_signalbus_message' ) : setattr ( model , '__marshmallow_schema__' , schema_class ( ) ) return setup_schema_fn
def create_job ( self , phases , name = None , input = None ) : """CreateJob https : / / apidocs . joyent . com / manta / api . html # CreateJob"""
log . debug ( 'CreateJob' ) path = '/%s/jobs' % self . account body = { "phases" : phases } if name : body [ "name" ] = name if input : body [ "input" ] = input headers = { "Content-Type" : "application/json" } res , content = self . _request ( path , "POST" , body = json . dumps ( body ) , headers = headers ) if res [ "status" ] != '201' : raise errors . MantaAPIError ( res , content ) location = res [ "location" ] assert res [ "location" ] job_id = res [ "location" ] . rsplit ( '/' , 1 ) [ - 1 ] return job_id
def remap_file_lines ( from_path , to_path , line_map_list ) : """Adds line _ map list to the list of association of from _ file to to to _ file"""
from_path = pyc2py ( from_path ) cache_file ( to_path ) remap_entry = file2file_remap_lines . get ( to_path ) if remap_entry : new_list = list ( remap_entry . from_to_pairs ) + list ( line_map_list ) else : new_list = line_map_list # FIXME : look for duplicates ? file2file_remap_lines [ to_path ] = RemapLineEntry ( from_path , tuple ( sorted ( new_list , key = lambda t : t [ 0 ] ) ) ) return
def write_tile_if_changed ( store , tile_data , coord , format ) : """Only write tile data if different from existing . Try to read the tile data from the store first . If the existing data matches , don ' t write . Returns whether the tile was written ."""
existing_data = store . read_tile ( coord , format ) if not existing_data or not tiles_are_equal ( existing_data , tile_data , format ) : store . write_tile ( tile_data , coord , format ) return True else : return False
def from_json ( cls , json_info ) : """Build a Result instance from a json string ."""
if json_info is None : return None return ResultRecord ( trial_id = json_info [ "trial_id" ] , timesteps_total = json_info [ "timesteps_total" ] , done = json_info . get ( "done" , None ) , episode_reward_mean = json_info . get ( "episode_reward_mean" , None ) , mean_accuracy = json_info . get ( "mean_accuracy" , None ) , mean_loss = json_info . get ( "mean_loss" , None ) , trainning_iteration = json_info . get ( "training_iteration" , None ) , timesteps_this_iter = json_info . get ( "timesteps_this_iter" , None ) , time_this_iter_s = json_info . get ( "time_this_iter_s" , None ) , time_total_s = json_info . get ( "time_total_s" , None ) , date = json_info . get ( "date" , None ) , hostname = json_info . get ( "hostname" , None ) , node_ip = json_info . get ( "node_ip" , None ) , config = json_info . get ( "config" , None ) )
def connect ( self , cback , subscribers = None , instance = None ) : """Add a function or a method as an handler of this signal . Any handler added can be a coroutine . : param cback : the callback ( or * handler * ) to be added to the set : returns : ` ` None ` ` or the value returned by the corresponding wrapper"""
if subscribers is None : subscribers = self . subscribers # wrapper if self . _fconnect is not None : def _connect ( cback ) : self . _connect ( subscribers , cback ) notify = partial ( self . _notify_one , instance ) if instance is not None : result = self . _fconnect ( instance , cback , subscribers , _connect , notify ) else : result = self . _fconnect ( cback , subscribers , _connect , notify ) if inspect . isawaitable ( result ) : result = pull_result ( result ) else : self . _connect ( subscribers , cback ) result = None return result
def _evaluate ( self , R , phi = 0. , t = 0. ) : """NAME : _ evaluate PURPOSE : evaluate the potential at R , phi , t INPUT : R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT : Phi ( R , phi , t ) HISTORY : 2011-03-27 - Started - Bovy ( NYU )"""
return self . _A * math . exp ( - ( t - self . _to ) ** 2. / 2. / self . _sigma2 ) / self . _alpha * math . cos ( self . _alpha * math . log ( R ) - self . _m * ( phi - self . _omegas * t - self . _gamma ) )
def add_slot_ports ( self , slot ) : """Add the ports to be added for a adapter card : param str slot : Slot name"""
slot_nb = int ( slot [ 4 ] ) # slot _ adapter = None # if slot in self . node [ ' properties ' ] : # slot _ adapter = self . node [ ' properties ' ] [ slot ] # elif self . device _ info [ ' model ' ] = = ' c7200 ' : # if self . device _ info [ ' npe ' ] = = ' npe - g2 ' : # slot _ adapter = ' C7200 - IO - GE - E ' # else : # slot _ adapter = ' C7200 - IO - 2FE ' slot_adapter = self . node [ 'properties' ] [ slot ] num_ports = ADAPTER_MATRIX [ slot_adapter ] [ 'ports' ] port_type = ADAPTER_MATRIX [ slot_adapter ] [ 'type' ] ports = [ ] for i in range ( num_ports ) : port_name = PORT_TYPES [ port_type ] + '%s/%s' % ( slot_nb , i ) port_temp = { 'name' : port_name , 'id' : self . port_id , 'port_number' : i , 'slot_number' : slot_nb } ports . append ( port_temp ) self . port_id += 1 self . node [ 'ports' ] . extend ( ports )
def populate_from_trace_graph ( self , graph : TraceGraph ) -> None : """Populates this graph from the given one based on affected _ files"""
# Track which trace frames have been visited as we populate the full # traces of the graph . self . _visited_trace_frame_ids : Set [ int ] = set ( ) self . _populate_affected_issues ( graph ) if not self . _affected_issues_only : # Finds issues from the conditions and saves them . # Also saves traces that have been trimmed to the affected # conditions . self . _populate_issues_from_affected_trace_frames ( graph ) # Traces populated above may be missing all traces because # _ populate _ issues _ from _ affected _ trace _ frames only populates # traces that reach the affected conditions in one direction . We # may need to populate traces in other directions too . # For example : # Issue _ x reaches affected _ file _ x via postcondition _ x ( forward # trace , i . e . trace leading to source ) . None of its backward # traces ( leading to sinks ) reach the affected files . # _ populate _ issues _ from _ affected _ trace _ frames would have copied its # forward traces and trimmed it to those reaching postcondition _ x . # We cannot blindly populate all forward traces in this case as # branches not leading to postcondition _ x are unnecessary . # However , in this specific example , all backward traces are needed # to give a complete picture of which sinks the issue reaches . # The following ensures that . for instance_id in self . _issue_instances . keys ( ) : first_hop_ids = self . _issue_instance_trace_frame_assoc [ instance_id ] fwd_trace_ids = { tf_id for tf_id in first_hop_ids if self . _trace_frames [ tf_id ] . kind == TraceKind . POSTCONDITION } bwd_trace_ids = { tf_id for tf_id in first_hop_ids if self . _trace_frames [ tf_id ] . kind == TraceKind . PRECONDITION } if len ( fwd_trace_ids ) == 0 : self . _populate_issue_trace ( graph , instance_id , TraceKind . POSTCONDITION ) if len ( bwd_trace_ids ) == 0 : self . _populate_issue_trace ( graph , instance_id , TraceKind . PRECONDITION )
def update_properties ( self , new_properties ) : """Update config properties values Property name must be equal to ' Section _ option ' of config property : param new _ properties : dict with new properties values"""
[ self . _update_property_from_dict ( section , option , new_properties ) for section in self . sections ( ) for option in self . options ( section ) ]
def signed_token_generator ( private_pem , ** kwargs ) : """: param private _ pem :"""
def signed_token_generator ( request ) : request . claims = kwargs return common . generate_signed_token ( private_pem , request ) return signed_token_generator
def get_blob_hash ( self , h = hashlib . md5 ) : """get hash instance of blob content : param h : callable hash generator : type h : builtin _ function _ or _ method : rtype : _ hashlib . HASH : return : hash instance"""
assert callable ( h ) return h ( self . get_blob_data ( ) )
def role_update ( self , role_id , data , ** kwargs ) : "https : / / developer . zendesk . com / rest _ api / docs / chat / roles # update - role"
api_path = "/api/v2/roles/{role_id}" api_path = api_path . format ( role_id = role_id ) return self . call ( api_path , method = "PUT" , data = data , ** kwargs )
def _validate_monotonic ( self ) : """Validate on is _ monotonic ."""
if not self . _on . is_monotonic : formatted = self . on or 'index' raise ValueError ( "{0} must be " "monotonic" . format ( formatted ) )
def _normalize_subplot_ids ( fig ) : """Make sure a layout subplot property is initialized for every subplot that is referenced by a trace in the figure . For example , if a figure contains a ` scatterpolar ` trace with the ` subplot ` property set to ` polar3 ` , this function will make sure the figure ' s layout has a ` polar3 ` property , and will initialize it to an empty dict if it does not Note : This function mutates the input figure dict Parameters fig : dict A plotly figure dict"""
layout = fig . setdefault ( 'layout' , { } ) for trace in fig . get ( 'data' , None ) : trace_type = trace . get ( 'type' , 'scatter' ) subplot_types = _trace_to_subplot . get ( trace_type , [ ] ) for subplot_type in subplot_types : subplot_prop_name = _get_subplot_prop_name ( subplot_type ) subplot_val_prefix = _get_subplot_val_prefix ( subplot_type ) subplot_val = trace . get ( subplot_prop_name , subplot_val_prefix ) # extract trailing number ( if any ) subplot_number = _get_subplot_number ( subplot_val ) if subplot_number > 1 : layout_prop_name = subplot_type + str ( subplot_number ) else : layout_prop_name = subplot_type if layout_prop_name not in layout : layout [ layout_prop_name ] = { }
def items ( self ) : """Returns a depth - first flat list of all items in the document"""
l = [ ] for e in self . data : l += e . items ( ) return l
def field_values ( self ) : """Access the field _ values : returns : twilio . rest . autopilot . v1 . assistant . field _ type . field _ value . FieldValueList : rtype : twilio . rest . autopilot . v1 . assistant . field _ type . field _ value . FieldValueList"""
if self . _field_values is None : self . _field_values = FieldValueList ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , field_type_sid = self . _solution [ 'sid' ] , ) return self . _field_values
def construct_url ( self ) : """Construct a full plex request URI , with ` params ` ."""
path = [ self . path ] path . extend ( [ str ( x ) for x in self . params ] ) url = self . client . base_url + '/' . join ( x for x in path if x ) query = self . kwargs . get ( 'query' ) if query : # Dict - > List if type ( query ) is dict : query = query . items ( ) # Remove items with ` None ` value query = [ ( k , v ) for ( k , v ) in query if v is not None ] # Encode query , append to URL url += '?' + urlencode ( query ) return url
def _html_image ( page ) : """returns HTML img tag"""
source = _image ( page ) if not source : return alt = page . data . get ( 'label' ) or page . data . get ( 'title' ) img = "<img src=\"%s\"" % source img += " alt=\"%s\" title=\"%s\" " % ( alt , alt ) img += "align=\"right\" width=\"240\">" return img
def has_item ( self , hash_key , range_key = None , consistent_read = False ) : """Checks the table to see if the Item with the specified ` ` hash _ key ` ` exists . This may save a tiny bit of time / bandwidth over a straight : py : meth : ` get _ item ` if you have no intention to touch the data that is returned , since this method specifically tells Amazon not to return anything but the Item ' s key . : type hash _ key : int | long | float | str | unicode : param hash _ key : The HashKey of the requested item . The type of the value must match the type defined in the schema for the table . : type range _ key : int | long | float | str | unicode : param range _ key : The optional RangeKey of the requested item . The type of the value must match the type defined in the schema for the table . : type consistent _ read : bool : param consistent _ read : If True , a consistent read request is issued . Otherwise , an eventually consistent request is issued . : rtype : bool : returns : ` ` True ` ` if the Item exists , ` ` False ` ` if not ."""
try : # Attempt to get the key . If it can ' t be found , it ' ll raise # an exception . self . get_item ( hash_key , range_key = range_key , # This minimizes the size of the response body . attributes_to_get = [ hash_key ] , consistent_read = consistent_read ) except dynamodb_exceptions . DynamoDBKeyNotFoundError : # Key doesn ' t exist . return False return True
def configure ( name , host , port , auth , current ) : '''Configure is used to add various ES ports you are working on . The user can add as many es ports as the one wants , but one will remain active at one point .'''
Config = ConfigParser . ConfigParser ( ) if not os . path . exists ( os . path . dirname ( filename ) ) : try : os . makedirs ( os . path . dirname ( filename ) ) except Exception as e : click . echo ( e ) return section_name = None if ( current . lower ( ) == 'y' ) : section_name = 'Current' change_current ( ) else : section_name = name . capitalize ( ) cfgfile = open ( filename , 'a' ) Config . add_section ( section_name ) Config . set ( section_name , 'host' , host ) Config . set ( section_name , 'port' , port ) Config . set ( section_name , 'auth' , auth ) Config . set ( section_name , 'name' , name . capitalize ( ) ) Config . write ( cfgfile ) cfgfile . close ( )
def read_rows ( self , read_position , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) : """Reads rows from the table in the format prescribed by the read session . Each response contains one or more table rows , up to a maximum of 10 MiB per response ; read requests which attempt to read individual rows larger than this will fail . Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream . This number is computed based on the total table size and the number of active streams in the read session , and may change as other streams continue to read data . Example : > > > from google . cloud import bigquery _ storage _ v1beta1 > > > client = bigquery _ storage _ v1beta1 . BigQueryStorageClient ( ) > > > # TODO : Initialize ` read _ position ` : > > > read _ position = { } > > > for element in client . read _ rows ( read _ position ) : . . . # process element . . . pass Args : read _ position ( Union [ dict , ~ google . cloud . bigquery _ storage _ v1beta1 . types . StreamPosition ] ) : Required . Identifier of the position in the stream to start reading from . The offset requested must be less than the last row read from ReadRows . Requesting a larger offset is undefined . If a dict is provided , it must be of the same form as the protobuf message : class : ` ~ google . cloud . bigquery _ storage _ v1beta1 . types . StreamPosition ` retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used to retry requests . If ` ` None ` ` is specified , requests will not be retried . timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait for the request to complete . Note that if ` ` retry ` ` is specified , the timeout applies to each individual attempt . metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata that is provided to the method . Returns : Iterable [ ~ google . cloud . bigquery _ storage _ v1beta1 . types . ReadRowsResponse ] . Raises : google . api _ core . exceptions . GoogleAPICallError : If the request failed for any reason . google . api _ core . exceptions . RetryError : If the request failed due to a retryable error and retry attempts failed . ValueError : If the parameters are invalid ."""
# Wrap the transport method to add retry and timeout logic . if "read_rows" not in self . _inner_api_calls : self . _inner_api_calls [ "read_rows" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . read_rows , default_retry = self . _method_configs [ "ReadRows" ] . retry , default_timeout = self . _method_configs [ "ReadRows" ] . timeout , client_info = self . _client_info , ) request = storage_pb2 . ReadRowsRequest ( read_position = read_position ) if metadata is None : metadata = [ ] metadata = list ( metadata ) try : routing_header = [ ( "read_position.stream.name" , read_position . stream . name ) ] except AttributeError : pass else : routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( # pragma : no cover routing_header ) metadata . append ( routing_metadata ) # pragma : no cover return self . _inner_api_calls [ "read_rows" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
def _construct_jbor ( job_id , field_name_and_maybe_index ) : ''': param job _ id : Job ID : type job _ id : string : param field _ name _ and _ maybe _ index : Field name , plus possibly " . N " where N is an array index : type field _ name _ and _ maybe _ index : string : returns : dict of JBOR'''
link = { "$dnanexus_link" : { "job" : job_id } } if '.' in field_name_and_maybe_index : split_by_dot = field_name_and_maybe_index . rsplit ( '.' , 1 ) link [ "$dnanexus_link" ] [ "field" ] = split_by_dot [ 0 ] link [ "$dnanexus_link" ] [ "index" ] = int ( split_by_dot [ 1 ] ) else : link [ "$dnanexus_link" ] [ "field" ] = field_name_and_maybe_index return link
async def _send_sack ( self ) : """Build and send a selective acknowledgement ( SACK ) chunk ."""
gaps = [ ] gap_next = None for tsn in sorted ( self . _sack_misordered ) : pos = ( tsn - self . _last_received_tsn ) % SCTP_TSN_MODULO if tsn == gap_next : gaps [ - 1 ] [ 1 ] = pos else : gaps . append ( [ pos , pos ] ) gap_next = tsn_plus_one ( tsn ) sack = SackChunk ( ) sack . cumulative_tsn = self . _last_received_tsn sack . advertised_rwnd = max ( 0 , self . _advertised_rwnd ) sack . duplicates = self . _sack_duplicates [ : ] sack . gaps = [ tuple ( x ) for x in gaps ] await self . _send_chunk ( sack ) self . _sack_duplicates . clear ( ) self . _sack_needed = False
def focusOutEvent ( self , event ) : """Overloads the focus out event to cancel editing when the widget loses focus . : param event | < QFocusEvent >"""
super ( XNavigationEdit , self ) . focusOutEvent ( event ) self . cancelEdit ( )
def setImportDataInterface ( self , values ) : """Return the current list of import data interfaces"""
exims = self . getImportDataInterfacesList ( ) new_values = [ value for value in values if value in exims ] if len ( new_values ) < len ( values ) : logger . warn ( "Some Interfaces weren't added..." ) self . Schema ( ) . getField ( 'ImportDataInterface' ) . set ( self , new_values )
def post ( self , path = '' , retry = 0 , ** data ) : """Post an item to the Graph API . : param path : A string describing the path to the item . : param retry : An integer describing how many times the request may be retried . : param data : Graph API parameters such as ' message ' or ' source ' . See ` Facebook ' s Graph API documentation < http : / / developers . facebook . com / docs / reference / api / > ` _ for an exhaustive list of options ."""
response = self . _query ( method = 'POST' , path = path , data = data , retry = retry ) if response is False : raise FacebookError ( 'Could not post to "%s"' % path ) return response
def Parse ( self , stat , file_object , knowledge_base ) : """Parse the sshd configuration . Process each of the lines in the configuration file . Assembes an sshd _ config file into a dictionary with the configuration keyword as the key , and the configuration settings as value ( s ) . Args : stat : unused file _ object : An open configuration file object . knowledge _ base : unused Yields : The configuration as an rdfvalue ."""
_ , _ = stat , knowledge_base # Clean out any residual state . self . _field_parser . Flush ( ) lines = [ l . strip ( ) for l in utils . ReadFileBytesAsUnicode ( file_object ) . splitlines ( ) ] for line in lines : # Remove comments ( will break if it includes a quoted / escaped # ) line = line . split ( "#" ) [ 0 ] . strip ( ) if line : self . _field_parser . ParseLine ( line ) for result in self . _field_parser . GenerateResults ( ) : yield result
def tile_to_quadkey ( tile , level ) : """Transform tile coordinates to a quadkey"""
tile_x = tile [ 0 ] tile_y = tile [ 1 ] quadkey = "" for i in xrange ( level ) : bit = level - i digit = ord ( '0' ) mask = 1 << ( bit - 1 ) # if ( bit - 1 ) > 0 else 1 > > ( bit - 1) if ( tile_x & mask ) is not 0 : digit += 1 if ( tile_y & mask ) is not 0 : digit += 2 quadkey += chr ( digit ) return quadkey
def upTo ( self , key ) : """Returns the urn up to given level using URN Constants : param key : Identifier of the wished resource using URN constants : type key : int : returns : String representation of the partial URN requested : rtype : str : Example : > > > a = URN ( urn = " urn : cts : latinLit : phi1294 . phi002 . perseus - lat2:1.1 " ) > > > a . upTo ( URN . TEXTGROUP ) = = " urn : cts : latinLit : phi1294" """
middle = [ component for component in [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] , self . __parsed [ "version" ] ] if component is not None ] if key == URN . COMPLETE : return self . __str__ ( ) elif key == URN . NAMESPACE : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] ] ) elif key == URN . TEXTGROUP and self . __parsed [ "textgroup" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , self . __parsed [ "textgroup" ] ] ) elif key == URN . WORK and self . __parsed [ "work" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] ] ) ] ) elif key == URN . VERSION and self . __parsed [ "version" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) ] ) elif key == URN . NO_PASSAGE and self . __parsed [ "work" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) ] ) elif key == URN . PASSAGE and self . __parsed [ "reference" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference ) ] ) elif key == URN . PASSAGE_START and self . __parsed [ "reference" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference . start ) ] ) elif key == URN . PASSAGE_END and self . __parsed [ "reference" ] and self . reference . end is not None : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference . end ) ] ) else : raise KeyError ( "Provided key is not recognized." )
def post ( self , resource ) : """Creates a new instance of the resource . Args : resource - gophish . models . Model - The resource instance"""
response = self . api . execute ( "POST" , self . endpoint , json = ( resource . as_dict ( ) ) ) if not response . ok : raise Error . parse ( response . json ( ) ) return self . _cls . parse ( response . json ( ) )
def generate_barcodes ( nIds , codeLen = 12 ) : """Given a list of sample IDs generate unique n - base barcodes for each . Note that only 4 ^ n unique barcodes are possible ."""
def next_code ( b , c , i ) : return c [ : i ] + b + ( c [ i + 1 : ] if i < - 1 else '' ) def rand_base ( ) : return random . choice ( [ 'A' , 'T' , 'C' , 'G' ] ) def rand_seq ( n ) : return '' . join ( [ rand_base ( ) for _ in range ( n ) ] ) # homopolymer filter regex : match if 4 identical bases in a row hpf = re . compile ( 'aaaa|cccc|gggg|tttt' , re . IGNORECASE ) while True : codes = [ rand_seq ( codeLen ) ] if ( hpf . search ( codes [ 0 ] ) is None ) : break idx = 0 while len ( codes ) < nIds : idx -= 1 if idx < - codeLen : idx = - 1 codes . append ( rand_seq ( codeLen ) ) else : nc = next_code ( rand_base ( ) , codes [ - 1 ] , idx ) if hpf . search ( nc ) is None : codes . append ( nc ) codes = list ( set ( codes ) ) return codes
def next ( self ) : """Return the next page . The page label is defined in ` ` settings . NEXT _ LABEL ` ` . Return an empty string if current page is the last ."""
if self . _page . has_next ( ) : return self . _endless_page ( self . _page . next_page_number ( ) , label = settings . NEXT_LABEL ) return ''
def run ( items ) : """Run MetaSV if we have enough supported callers , adding output to the set of calls ."""
assert len ( items ) == 1 , "Expect one input to MetaSV ensemble calling" data = items [ 0 ] work_dir = _sv_workdir ( data ) out_file = os . path . join ( work_dir , "variants.vcf.gz" ) cmd = _get_cmd ( ) + [ "--sample" , dd . get_sample_name ( data ) , "--reference" , dd . get_ref_file ( data ) , "--bam" , dd . get_align_bam ( data ) , "--outdir" , work_dir ] methods = [ ] for call in data . get ( "sv" , [ ] ) : vcf_file = call . get ( "vcf_file" , call . get ( "vrn_file" , None ) ) if call [ "variantcaller" ] in SUPPORTED and call [ "variantcaller" ] not in methods and vcf_file is not None : methods . append ( call [ "variantcaller" ] ) cmd += [ "--%s_vcf" % call [ "variantcaller" ] , vcf_file ] if len ( methods ) >= MIN_CALLERS : if not utils . file_exists ( out_file ) : tx_work_dir = utils . safe_makedir ( os . path . join ( work_dir , "raw" ) ) ins_stats = shared . calc_paired_insert_stats_save ( dd . get_align_bam ( data ) , os . path . join ( tx_work_dir , "insert-stats.yaml" ) ) cmd += [ "--workdir" , tx_work_dir , "--num_threads" , str ( dd . get_num_cores ( data ) ) ] cmd += [ "--spades" , utils . which ( "spades.py" ) , "--age" , utils . which ( "age_align" ) ] cmd += [ "--assembly_max_tools=1" , "--assembly_pad=500" ] cmd += [ "--boost_sc" , "--isize_mean" , ins_stats [ "mean" ] , "--isize_sd" , ins_stats [ "std" ] ] do . run ( cmd , "Combine variant calls with MetaSV" ) filters = ( "(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || " "(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)" ) filter_file = vfilter . cutoff_w_expression ( out_file , filters , data , name = "ReassemblyStats" , limit_regions = None ) effects_vcf , _ = effects . add_to_vcf ( filter_file , data , "snpeff" ) data [ "sv" ] . append ( { "variantcaller" : "metasv" , "vrn_file" : effects_vcf or filter_file } ) return [ data ]
def flip ( self ) : """This will switch major / minor around , regardless of frequency truth . This is intended for forcing one of two populations to relate correctly to the same genotype definitions . When flipped , Ps and Qs will be backward , and the maf will no longer relate to the " minor " allele frequency . However , it does allow clients to use the same calls for each population without having to perform checks during those calculations ."""
maj_count = self . maj_allele_count self . maj_allele_count = self . min_allele_count self . min_allele_count = maj_count alleles = self . alleles self . alleles = [ alleles [ 1 ] , alleles [ 0 ] ]