signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
async def kickban ( self , channel , target , reason = None , range = 0 ) : """Kick and ban user from channel ."""
await self . ban ( channel , target , range ) await self . kick ( channel , target , reason )
def _check_once ( self ) : """A single attempt to call ismaster . Returns a ServerDescription , or raises an exception ."""
address = self . _server_description . address if self . _publish : self . _listeners . publish_server_heartbeat_started ( address ) with self . _pool . get_socket ( { } ) as sock_info : response , round_trip_time = self . _check_with_socket ( sock_info ) self . _avg_round_trip_time . add_sample ( round_trip_time ) sd = ServerDescription ( address = address , ismaster = response , round_trip_time = self . _avg_round_trip_time . get ( ) ) if self . _publish : self . _listeners . publish_server_heartbeat_succeeded ( address , round_trip_time , response ) return sd
def check ( table = 'filter' , chain = None , rule = None , family = 'ipv4' ) : '''Check for the existence of a rule in the table and chain This function accepts a rule in a standard nftables command format , starting with the chain . Trying to force users to adapt to a new method of creating rules would be irritating at best , and we already have a parser that can handle it . CLI Example : . . code - block : : bash salt ' * ' nftables . check filter input \ rule = ' tcp dport 22 log accept ' IPv6: salt ' * ' nftables . check filter input \ rule = ' tcp dport 22 log accept ' \ family = ipv6'''
ret = { 'comment' : '' , 'result' : False } if not chain : ret [ 'comment' ] = 'Chain needs to be specified' return ret if not rule : ret [ 'comment' ] = 'Rule needs to be specified' return ret res = check_table ( table , family = family ) if not res [ 'result' ] : return res res = check_chain ( table , chain , family = family ) if not res [ 'result' ] : return res nft_family = _NFTABLES_FAMILIES [ family ] cmd = '{0} --handle --numeric --numeric --numeric list chain {1} {2} {3}' . format ( _nftables_cmd ( ) , nft_family , table , chain ) search_rule = '{0} #' . format ( rule ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) . find ( search_rule ) if out == - 1 : ret [ 'comment' ] = 'Rule {0} in chain {1} in table {2} in family {3} does not exist' . format ( rule , chain , table , family ) else : ret [ 'comment' ] = 'Rule {0} in chain {1} in table {2} in family {3} exists' . format ( rule , chain , table , family ) ret [ 'result' ] = True return ret
def list_npm_modules ( collector , no_print = False ) : """List the npm modules that get installed in a docker image for the react server"""
default = ReactServer ( ) . default_npm_deps ( ) for _ , module in sorted ( collector . configuration [ "__active_modules__" ] . items ( ) ) : default . update ( module . npm_deps ( ) ) if not no_print : print ( json . dumps ( default , indent = 4 , sort_keys = True ) ) return default
def _JzStaeckelIntegrandSquared ( v , E , Lz , I3V , delta , u0 , cosh2u0 , sinh2u0 , potu0pi2 , pot ) : # potu0pi2 = potentialStaeckel ( u0 , nu . pi / 2 . , pot , delta ) """The J _ z integrand : p _ v ( v ) / 2 / delta ^ 2"""
sin2v = nu . sin ( v ) ** 2. dV = cosh2u0 * potu0pi2 - ( sinh2u0 + sin2v ) * potentialStaeckel ( u0 , v , pot , delta ) return E * sin2v + I3V + dV - Lz ** 2. / 2. / delta ** 2. / sin2v
def CreateDefaultPartition ( client , ad_group_id ) : """Creates a default partition . Args : client : an AdWordsClient instance . ad _ group _ id : an integer ID for an ad group ."""
ad_group_criterion_service = client . GetService ( 'AdGroupCriterionService' , version = 'v201809' ) operations = [ { 'operator' : 'ADD' , 'operand' : { 'xsi_type' : 'BiddableAdGroupCriterion' , 'adGroupId' : ad_group_id , # Make sure that caseValue and parentCriterionId are left unspecified . # This makes this partition as generic as possible to use as a # fallback when others don ' t match . 'criterion' : { 'xsi_type' : 'ProductPartition' , 'partitionType' : 'UNIT' } , 'biddingStrategyConfiguration' : { 'bids' : [ { 'xsi_type' : 'CpcBid' , 'bid' : { 'microAmount' : 500000 } } ] } } } ] ad_group_criterion = ad_group_criterion_service . mutate ( operations ) [ 'value' ] [ 0 ] print ( 'Ad group criterion with ID "%d" in ad group with ID "%d" was added.' % ( ad_group_criterion [ 'criterion' ] [ 'id' ] , ad_group_criterion [ 'adGroupId' ] ) )
def get_themes ( urls ) : '''takes in dict of names and urls , downloads and saves files'''
length = len ( urls ) counter = 1 widgets = [ 'Fetching themes:' , Percentage ( ) , ' ' , Bar ( marker = '-' ) , ' ' , ETA ( ) ] pbar = ProgressBar ( widgets = widgets , maxval = length ) . start ( ) for i in urls . keys ( ) : href = 'http://dotshare.it/dots/%s/0/raw/' % urls [ i ] theme = urllib . urlopen ( href ) . read ( ) f = open ( THEMEDIR + i , 'w' ) f . write ( theme ) f . close ( ) pbar . update ( counter ) counter += 1 pbar . finish ( )
def update ( self , * args , ** kwargs ) : """Update the dictionary with the key / value pairs from * other * , overwriting existing keys . * update * accepts either another dictionary object or an iterable of key / value pairs ( as a tuple or other iterable of length two ) . If keyword arguments are specified , the dictionary is then updated with those key / value pairs : ` ` d . update ( red = 1 , blue = 2 ) ` ` ."""
if not self : self . _dict_update ( * args , ** kwargs ) self . _list_update ( self . _iter ( ) ) return if not kwargs and len ( args ) == 1 and isinstance ( args [ 0 ] , dict ) : pairs = args [ 0 ] else : pairs = dict ( * args , ** kwargs ) if ( 10 * len ( pairs ) ) > len ( self ) : self . _dict_update ( pairs ) self . _list_clear ( ) self . _list_update ( self . _iter ( ) ) else : for key in pairs : self [ key ] = pairs [ key ]
def createProfile ( self , profile = None , clearLayout = True ) : """Prompts the user to create a new profile ."""
if profile : prof = profile elif not self . viewWidget ( ) or clearLayout : prof = XViewProfile ( ) else : prof = self . viewWidget ( ) . saveProfile ( ) blocked = self . signalsBlocked ( ) self . blockSignals ( False ) changed = self . editProfile ( prof ) self . blockSignals ( blocked ) if not changed : return act = self . addProfile ( prof ) act . setChecked ( True ) # update the interface if self . viewWidget ( ) and ( profile or clearLayout ) : self . viewWidget ( ) . restoreProfile ( prof ) if not self . signalsBlocked ( ) : self . profileCreated . emit ( prof ) self . profilesChanged . emit ( )
def runLateralDisambiguation ( noiseLevel = None , profile = False ) : """Runs a simple experiment where two objects share a ( location , feature ) pair . At inference , one column sees that ambiguous pair , and the other sees a unique one . We should see the first column rapidly converge to a unique representation . Parameters : @ param noiseLevel ( float ) Noise level to add to the locations and features during inference @ param profile ( bool ) If True , the network will be profiled after learning and inference"""
exp = L4L2Experiment ( "lateral_disambiguation" , numCorticalColumns = 2 , ) objects = createObjectMachine ( machineType = "simple" , numInputBits = 20 , sensorInputSize = 1024 , externalInputSize = 1024 , numCorticalColumns = 2 , ) objects . addObject ( [ ( 1 , 1 ) , ( 2 , 2 ) ] ) objects . addObject ( [ ( 1 , 1 ) , ( 3 , 2 ) ] ) exp . learnObjects ( objects . provideObjectsToLearn ( ) ) if profile : exp . printProfile ( ) inferConfig = { "noiseLevel" : noiseLevel , "numSteps" : 6 , "pairs" : { # this should activate 0 and 1 0 : [ ( 1 , 1 ) , ( 1 , 1 ) , ( 1 , 1 ) , ( 1 , 1 ) , ( 1 , 1 ) , ( 1 , 1 ) ] , # this should activate 1 1 : [ ( 3 , 2 ) , ( 3 , 2 ) , ( 3 , 2 ) , ( 3 , 2 ) , ( 3 , 2 ) , ( 3 , 2 ) ] } } exp . infer ( objects . provideObjectToInfer ( inferConfig ) , objectName = 1 ) if profile : exp . printProfile ( ) exp . plotInferenceStats ( fields = [ "L2 Representation" , "Overlap L2 with object" , "L4 Representation" ] , onePlot = False , )
def get_location ( self , obj ) : """return user ' s location"""
if not obj . city and not obj . country : return None elif obj . city and obj . country : return '%s, %s' % ( obj . city , obj . country ) elif obj . city or obj . country : return obj . city or obj . country
def role ( self , role_name ) : """Set role of current column : param role _ name : name of the role to be selected . : return :"""
field_name = self . name field_roles = { field_name : MLField . translate_role_name ( role_name ) } if field_roles : return _change_singleton_roles ( self , field_roles , True ) else : return self
def _process_cascaded_category_contents ( records ) : """Travel from categories to subcontributions , flattening the whole event structure . Yields everything that it finds ( except for elements whose protection has changed but are not inheriting their protection settings from anywhere ) . : param records : queue records to process"""
category_prot_records = { rec . category_id for rec in records if rec . type == EntryType . category and rec . change == ChangeType . protection_changed } category_move_records = { rec . category_id for rec in records if rec . type == EntryType . category and rec . change == ChangeType . moved } changed_events = set ( ) category_prot_records -= category_move_records # A move already implies sending the whole record # Protection changes are handled differently , as there may not be the need to re - generate the record if category_prot_records : for categ in Category . find ( Category . id . in_ ( category_prot_records ) ) : cte = categ . get_protection_parent_cte ( ) # Update only children that inherit inheriting_categ_children = ( Event . query . join ( cte , db . and_ ( ( Event . category_id == cte . c . id ) , ( cte . c . protection_parent == categ . id ) ) ) ) inheriting_direct_children = Event . find ( ( Event . category_id == categ . id ) & Event . is_inheriting ) changed_events . update ( itertools . chain ( inheriting_direct_children , inheriting_categ_children ) ) # Add move operations and explicitly - passed event records if category_move_records : changed_events . update ( Event . find ( Event . category_chain_overlaps ( category_move_records ) ) ) for elem in _process_cascaded_event_contents ( records , additional_events = changed_events ) : yield elem
def validate_config ( conf_dict ) : """Validate configuration . : param conf _ dict : test configuration . : type conf _ dict : { } : raise InvalidConfigurationError :"""
# TASK improve validation if APPLICATIONS not in conf_dict . keys ( ) : raise InvalidConfigurationError ( 'Missing application configuration.' ) if SEED_FILES not in conf_dict . keys ( ) : raise InvalidConfigurationError ( 'Missing seed file configuration.' ) if RUNS not in conf_dict . keys ( ) : conf_dict [ RUNS ] = DEFAULT_RUNS if PROCESSES not in conf_dict . keys ( ) : conf_dict [ PROCESSES ] = DEFAULT_PROCESSES if PROCESSORS not in conf_dict . keys ( ) : conf_dict [ PROCESSORS ] = DEFAULT_PROCESSORS return
def add_scan_alarm ( self , scan_id , host = '' , name = '' , value = '' , port = '' , test_id = '' , severity = '' , qod = '' ) : """Adds an alarm result to scan _ id scan ."""
self . scan_collection . add_result ( scan_id , ResultType . ALARM , host , name , value , port , test_id , severity , qod )
def next_object ( self ) : """Get next GridOut object from cursor ."""
grid_out = super ( self . __class__ , self ) . next_object ( ) if grid_out : grid_out_class = create_class_with_framework ( AgnosticGridOut , self . _framework , self . __module__ ) return grid_out_class ( self . collection , delegate = grid_out ) else : # Exhausted . return None
def is_ancestor_of_bank ( self , id_ , bank_id ) : """Tests if an ` ` Id ` ` is an ancestor of a bank . arg : id ( osid . id . Id ) : an ` ` Id ` ` arg : bank _ id ( osid . id . Id ) : the ` ` Id ` ` of a bank return : ( boolean ) - ` ` true ` ` if this ` ` id ` ` is an ancestor of ` ` bank _ id , ` ` ` ` false ` ` otherwise raise : NotFound - ` ` bank _ id ` ` is not found raise : NullArgument - ` ` bank _ id ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure * compliance : mandatory - - This method must be implemented . * * implementation notes * : If ` ` id ` ` not found return ` ` false ` ` ."""
# Implemented from template for # osid . resource . BinHierarchySession . is _ ancestor _ of _ bin if self . _catalog_session is not None : return self . _catalog_session . is_ancestor_of_catalog ( id_ = id_ , catalog_id = bank_id ) return self . _hierarchy_session . is_ancestor ( id_ = id_ , ancestor_id = bank_id )
def setxattr ( self , req , ino , name , value , flags ) : """Set an extended attribute Valid replies : reply _ err"""
self . reply_err ( req , errno . ENOSYS )
def refresh_menu ( self ) : """Refresh context menu"""
index = self . currentIndex ( ) condition = index . isValid ( ) self . edit_action . setEnabled ( condition ) self . remove_action . setEnabled ( condition ) self . refresh_plot_entries ( index )
def set_cell ( self , column_family_id , column , value , timestamp = None ) : """Sets a value in this row . The cell is determined by the ` ` row _ key ` ` of this : class : ` DirectRow ` and the ` ` column ` ` . The ` ` column ` ` must be in an existing : class : ` . ColumnFamily ` ( as determined by ` ` column _ family _ id ` ` ) . . . note : : This method adds a mutation to the accumulated mutations on this row , but does not make an API request . To actually send an API request ( with the mutations ) to the Google Cloud Bigtable API , call : meth : ` commit ` . For example : . . literalinclude : : snippets _ table . py : start - after : [ START bigtable _ row _ set _ cell ] : end - before : [ END bigtable _ row _ set _ cell ] : type column _ family _ id : str : param column _ family _ id : The column family that contains the column . Must be of the form ` ` [ _ a - zA - Z0-9 ] [ - _ . a - zA - Z0-9 ] * ` ` . : type column : bytes : param column : The column within the column family where the cell is located . : type value : bytes or : class : ` int ` : param value : The value to set in the cell . If an integer is used , will be interpreted as a 64 - bit big - endian signed integer ( 8 bytes ) . : type timestamp : : class : ` datetime . datetime ` : param timestamp : ( Optional ) The timestamp of the operation ."""
self . _set_cell ( column_family_id , column , value , timestamp = timestamp , state = None )
def temperature_data_from_csv ( filepath_or_buffer , tz = None , date_col = "dt" , temp_col = "tempF" , gzipped = False , freq = None , ** kwargs ) : """Load temperature data from a CSV file . Default format : : dt , tempF 2017-01-01T00:00:00 + 00:00,21 2017-01-01T01:00:00 + 00:00,22.5 2017-01-01T02:00:00 + 00:00,23.5 Parameters filepath _ or _ buffer : : any : ` str ` or file - handle File path or object . tz : : any : ` str ` , optional E . g . , ` ` ' UTC ' ` ` or ` ` ' US / Pacific ' ` ` date _ col : : any : ` str ` , optional , default ` ` ' dt ' ` ` Date period start column . temp _ col : : any : ` str ` , optional , default ` ` ' tempF ' ` ` Temperature column . gzipped : : any : ` bool ` , optional Whether file is gzipped . freq : : any : ` str ` , optional If given , apply frequency to data using : any : ` pandas . Series . resample ` . * * kwargs Extra keyword arguments to pass to : any : ` pandas . read _ csv ` , such as ` ` sep = ' | ' ` ` ."""
read_csv_kwargs = { "usecols" : [ date_col , temp_col ] , "dtype" : { temp_col : np . float64 } , "parse_dates" : [ date_col ] , "index_col" : date_col , } if gzipped : read_csv_kwargs . update ( { "compression" : "gzip" } ) # allow passing extra kwargs read_csv_kwargs . update ( kwargs ) if tz is None : tz = "UTC" df = pd . read_csv ( filepath_or_buffer , ** read_csv_kwargs ) . tz_localize ( tz ) if freq == "hourly" : df = df . resample ( "H" ) . sum ( ) return df [ temp_col ]
def disconnect ( self , name = None ) : """Clear internal Channel cache , allowing currently unused channels to be implictly closed . : param str name : None , to clear the entire cache , or a name string to clear only a certain entry ."""
if name is None : self . _channels = { } else : self . _channels . pop ( name ) if self . _ctxt is not None : self . _ctxt . disconnect ( name )
def addSubEditor ( self , subEditor , isFocusProxy = False ) : """Adds a sub editor to the layout ( at the right but before the reset button ) Will add the necessary event filter to handle tabs and sets the strong focus so that events will not propagate to the tree view . If isFocusProxy is True the sub editor will be the focus proxy of the CTI ."""
self . hBoxLayout . insertWidget ( len ( self . _subEditors ) , subEditor ) self . _subEditors . append ( subEditor ) subEditor . installEventFilter ( self ) subEditor . setFocusPolicy ( Qt . StrongFocus ) if isFocusProxy : self . setFocusProxy ( subEditor ) return subEditor
def xpathNextChild ( self , cur ) : """Traversal function for the " child " direction The child axis contains the children of the context node in document order ."""
if cur is None : cur__o = None else : cur__o = cur . _o ret = libxml2mod . xmlXPathNextChild ( self . _o , cur__o ) if ret is None : raise xpathError ( 'xmlXPathNextChild() failed' ) __tmp = xmlNode ( _obj = ret ) return __tmp
def import_data_from_uris ( self , source_uris , dataset , table , schema = None , job = None , source_format = None , create_disposition = None , write_disposition = None , encoding = None , ignore_unknown_values = None , max_bad_records = None , allow_jagged_rows = None , allow_quoted_newlines = None , field_delimiter = None , quote = None , skip_leading_rows = None , project_id = None , ) : """Imports data into a BigQuery table from cloud storage . Optional arguments that are not specified are determined by BigQuery as described : https : / / developers . google . com / bigquery / docs / reference / v2 / jobs Parameters source _ urls : list A ` ` list ` ` of ` ` str ` ` objects representing the urls on cloud storage of the form : gs : / / bucket / filename dataset : str String id of the dataset table : str String id of the table schema : list , optional Represents the BigQuery schema job : str , optional Identifies the job ( a unique job id is automatically generated if not provided ) source _ format : str , optional One of the JOB _ SOURCE _ FORMAT _ * constants create _ disposition : str , optional One of the JOB _ CREATE _ * constants write _ disposition : str , optional One of the JOB _ WRITE _ * constants encoding : str , optional One of the JOB _ ENCODING _ * constants ignore _ unknown _ values : bool , optional Whether or not to ignore unknown values max _ bad _ records : int , optional Maximum number of bad records allow _ jagged _ rows : bool , optional For csv only allow _ quoted _ newlines : bool , optional For csv only field _ delimiter : str , optional For csv only quote : str , optional Quote character for csv only skip _ leading _ rows : int , optional For csv only project _ id : str , optional String id of the project Returns dict A BigQuery job response Raises JobInsertException on http / auth failures or error in result"""
source_uris = source_uris if isinstance ( source_uris , list ) else [ source_uris ] project_id = self . _get_project_id ( project_id ) configuration = { "destinationTable" : { "projectId" : project_id , "tableId" : table , "datasetId" : dataset } , "sourceUris" : source_uris , } if max_bad_records : configuration [ 'maxBadRecords' ] = max_bad_records if ignore_unknown_values : configuration [ 'ignoreUnknownValues' ] = ignore_unknown_values if create_disposition : configuration [ 'createDisposition' ] = create_disposition if write_disposition : configuration [ 'writeDisposition' ] = write_disposition if encoding : configuration [ 'encoding' ] = encoding if schema : configuration [ 'schema' ] = { 'fields' : schema } if source_format : configuration [ 'sourceFormat' ] = source_format if not job : hex = self . _generate_hex_for_uris ( source_uris ) job = "{dataset}-{table}-{digest}" . format ( dataset = dataset , table = table , digest = hex ) if source_format == JOB_SOURCE_FORMAT_CSV : if field_delimiter : configuration [ 'fieldDelimiter' ] = field_delimiter if allow_jagged_rows : configuration [ 'allowJaggedRows' ] = allow_jagged_rows if allow_quoted_newlines : configuration [ 'allowQuotedNewlines' ] = allow_quoted_newlines if quote : configuration [ 'quote' ] = quote if skip_leading_rows : configuration [ 'skipLeadingRows' ] = skip_leading_rows elif field_delimiter or allow_jagged_rows or allow_quoted_newlines or quote or skip_leading_rows : all_values = dict ( field_delimiter = field_delimiter , allow_jagged_rows = allow_jagged_rows , allow_quoted_newlines = allow_quoted_newlines , skip_leading_rows = skip_leading_rows , quote = quote ) non_null_values = dict ( ( k , v ) for k , v in list ( all_values . items ( ) ) if v ) raise Exception ( "Parameters field_delimiter, allow_jagged_rows, " "allow_quoted_newlines, quote and " "skip_leading_rows are only allowed when " "source_format=JOB_SOURCE_FORMAT_CSV: %s" % non_null_values ) body = { "configuration" : { 'load' : configuration } , "jobReference" : self . _get_job_reference ( job ) } logger . debug ( "Creating load job %s" % body ) job_resource = self . _insert_job ( body ) self . _raise_insert_exception_if_error ( job_resource ) return job_resource
def text ( self ) : """Get value of output stream ( StringIO ) ."""
if self . out : self . out . close ( ) # pragma : nocover return self . fp . getvalue ( )
def get_wikidata_qnum ( wikiarticle , wikisite ) : """Retrieve the Query number for a wikidata database of metadata about a particular article > > > print ( get _ wikidata _ qnum ( wikiarticle = " Andromeda Galaxy " , wikisite = " enwiki " ) ) Q2469"""
resp = requests . get ( 'https://www.wikidata.org/w/api.php' , timeout = 5 , params = { 'action' : 'wbgetentities' , 'titles' : wikiarticle , 'sites' : wikisite , 'props' : '' , 'format' : 'json' } ) . json ( ) return list ( resp [ 'entities' ] ) [ 0 ]
def disconnect ( self ) : """Disconnect from the device that we are currently connected to ."""
if not self . connected : raise HardwareError ( "Cannot disconnect when we are not connected" ) # Close the streaming and tracing interfaces when we disconnect self . _reports = None self . _traces = None self . _loop . run_coroutine ( self . adapter . disconnect ( 0 ) ) self . connected = False self . connection_interrupted = False self . connection_string = None
def build_elastic_query ( doc ) : """Build a query which follows ElasticSearch syntax from doc . 1 . Converts { " q " : " cricket " } to the below elastic query : : " query " : { " filtered " : { " query " : { " query _ string " : { " query " : " cricket " , " lenient " : false , " default _ operator " : " AND " 2 . Converts a faceted query : : { " q " : " cricket " , " type " : [ ' text ' ] , " source " : " AAP " } to the below elastic query : : " query " : { " filtered " : { " filter " : { " and " : [ { " terms " : { " type " : [ " text " ] } } , { " term " : { " source " : " AAP " } } " query " : { " query _ string " : { " query " : " cricket " , " lenient " : false , " default _ operator " : " AND " : param doc : A document object which is inline with the syntax specified in the examples . It ' s the developer responsibility to pass right object . : returns ElasticSearch query"""
elastic_query , filters = { "query" : { "filtered" : { } } } , [ ] for key in doc . keys ( ) : if key == 'q' : elastic_query [ 'query' ] [ 'filtered' ] [ 'query' ] = _build_query_string ( doc [ 'q' ] ) else : _value = doc [ key ] filters . append ( { "terms" : { key : _value } } if isinstance ( _value , list ) else { "term" : { key : _value } } ) set_filters ( elastic_query , filters ) return elastic_query
async def dict ( self , full ) : '''Open a HiveDict at the given full path .'''
node = await self . open ( full ) return await HiveDict . anit ( self , node )
def _is_declaration ( self , name , value ) : """Determines if a class attribute is a field value declaration . Based on the name and value of the class attribute , return ` ` True ` ` if it looks like a declaration of a default field value , ` ` False ` ` if it is private ( name starts with ' _ ' ) or a classmethod or staticmethod ."""
if isinstance ( value , ( classmethod , staticmethod ) ) : return False elif enums . get_builder_phase ( value ) : # All objects with a defined ' builder phase ' are declarations . return True return not name . startswith ( "_" )
def register ( class_ = None , ** kwargs ) : """Registers a dataset with segment specific hyperparameters . When passing keyword arguments to ` register ` , they are checked to be valid keyword arguments for the registered Dataset class constructor and are saved in the registry . Registered keyword arguments can be retrieved with the ` list _ datasets ` function . All arguments that result in creation of separate datasets should be registered . Examples are datasets divided in different segments or categories , or datasets containing multiple languages . Once registered , an instance can be created by calling : func : ` ~ gluonnlp . data . create ` with the class name . Parameters * * kwargs : list or tuple of allowed argument values For each keyword argument , it ' s value must be a list or tuple of the allowed argument values . Examples > > > @ gluonnlp . data . register ( segment = [ ' train ' , ' test ' , ' dev ' ] ) . . . class MyDataset ( gluon . data . Dataset ) : . . . def _ _ init _ _ ( self , segment = ' train ' ) : . . . pass > > > my _ dataset = gluonnlp . data . create ( ' MyDataset ' ) > > > print ( type ( my _ dataset ) ) < class ' MyDataset ' >"""
def _real_register ( class_ ) : # Assert that the passed kwargs are meaningful for kwarg_name , values in kwargs . items ( ) : try : real_args = inspect . getfullargspec ( class_ ) . args except AttributeError : # pylint : disable = deprecated - method real_args = inspect . getargspec ( class_ . __init__ ) . args if not kwarg_name in real_args : raise RuntimeError ( ( '{} is not a valid argument for {}. ' 'Only valid arguments can be registered.' ) . format ( kwarg_name , class_ . __name__ ) ) if not isinstance ( values , ( list , tuple ) ) : raise RuntimeError ( ( '{} should be a list of ' 'valid arguments for {}. ' ) . format ( values , kwarg_name ) ) # Save the kwargs associated with this class _ _REGSITRY_NAME_KWARGS [ class_ ] = kwargs register_ = registry . get_register_func ( Dataset , 'dataset' ) return register_ ( class_ ) if class_ is not None : # Decorator was called without arguments return _real_register ( class_ ) return _real_register
def _group_changes ( cur , wanted , remove = False ) : '''Determine if the groups need to be changed'''
old = set ( cur ) new = set ( wanted ) if ( remove and old != new ) or ( not remove and not new . issubset ( old ) ) : return True return False
def get_objective_query_session ( self , proxy ) : """Gets the ` ` OsidSession ` ` associated with the objective query service . arg : proxy ( osid . proxy . Proxy ) : a proxy return : ( osid . learning . ObjectiveQuerySession ) - an ` ` ObjectiveQuerySession ` ` raise : NullArgument - ` ` proxy ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : Unimplemented - ` ` supports _ objective _ query ( ) ` ` is ` ` false ` ` * compliance : optional - - This method must be implemented if ` ` supports _ objective _ query ( ) ` ` is ` ` true ` ` . *"""
if not self . supports_objective_query ( ) : raise errors . Unimplemented ( ) # pylint : disable = no - member return sessions . ObjectiveQuerySession ( proxy = proxy , runtime = self . _runtime )
def search ( self ) : """Search srt in project for cells matching list of terms ."""
matches = [ ] for pattern in Config . patterns : matches += self . termfinder ( pattern ) return sorted ( set ( matches ) , key = int )
def failUnlessWarns ( self , category , message , filename , f , * args , ** kwargs ) : """Fail if the given function doesn ' t generate the specified warning when called . It calls the function , checks the warning , and forwards the result of the function if everything is fine . @ param category : the category of the warning to check . @ param message : the output message of the warning to check . @ param filename : the filename where the warning should come from . @ param f : the function which is supposed to generate the warning . @ type f : any callable . @ param args : the arguments to C { f } . @ param kwargs : the keywords arguments to C { f } . @ return : the result of the original function C { f } ."""
warningsShown = [ ] def warnExplicit ( * args ) : warningsShown . append ( args ) origExplicit = warnings . warn_explicit try : warnings . warn_explicit = warnExplicit result = f ( * args , ** kwargs ) finally : warnings . warn_explicit = origExplicit if not warningsShown : self . fail ( "No warnings emitted" ) first = warningsShown [ 0 ] for other in warningsShown [ 1 : ] : if other [ : 2 ] != first [ : 2 ] : self . fail ( "Can't handle different warnings" ) gotMessage , gotCategory , gotFilename , lineno = first [ : 4 ] self . assertEqual ( gotMessage , message ) self . assertIdentical ( gotCategory , category ) # Use starts with because of . pyc / . pyo issues . self . failUnless ( filename . startswith ( gotFilename ) , 'Warning in %r, expected %r' % ( gotFilename , filename ) ) # It would be nice to be able to check the line number as well , but # different configurations actually end up reporting different line # numbers ( generally the variation is only 1 line , but that ' s enough # to fail the test erroneously . . . ) . # self . assertEqual ( lineno , xxx ) return result
def clean_expired_tokens ( opts ) : '''Clean expired tokens from the master'''
loadauth = salt . auth . LoadAuth ( opts ) for tok in loadauth . list_tokens ( ) : token_data = loadauth . get_tok ( tok ) if 'expire' not in token_data or token_data . get ( 'expire' , 0 ) < time . time ( ) : loadauth . rm_token ( tok )
def log ( self , msg , level = "info" ) : """log this information to syslog or user provided logfile ."""
if not self . config . get ( "log_file" ) : # If level was given as a str then convert to actual level level = LOG_LEVELS . get ( level , level ) syslog ( level , u"{}" . format ( msg ) ) else : # Binary mode so fs encoding setting is not an issue with open ( self . config [ "log_file" ] , "ab" ) as f : log_time = time . strftime ( "%Y-%m-%d %H:%M:%S" ) # nice formating of data structures using pretty print if isinstance ( msg , ( dict , list , set , tuple ) ) : msg = pformat ( msg ) # if multiline then start the data output on a fresh line # to aid readability . if "\n" in msg : msg = u"\n" + msg out = u"{} {} {}\n" . format ( log_time , level . upper ( ) , msg ) try : # Encode unicode strings to bytes f . write ( out . encode ( "utf-8" ) ) except ( AttributeError , UnicodeDecodeError ) : # Write any byte strings straight to log f . write ( out )
def body ( self , data , data_type , ** kwargs ) : """Serialize data intended for a request body . : param data : The data to be serialized . : param str data _ type : The type to be serialized from . : rtype : dict : raises : SerializationError if serialization fails . : raises : ValueError if data is None"""
if data is None : raise ValidationError ( "required" , "body" , True ) # Just in case this is a dict internal_data_type = data_type . strip ( '[]{}' ) internal_data_type = self . dependencies . get ( internal_data_type , None ) if internal_data_type and not isinstance ( internal_data_type , Enum ) : try : deserializer = Deserializer ( self . dependencies ) # Since it ' s on serialization , it ' s almost sure that format is not JSON REST # We ' re not able to deal with additional properties for now . deserializer . additional_properties_detection = False if issubclass ( internal_data_type , Model ) and internal_data_type . is_xml_model ( ) : deserializer . key_extractors = [ attribute_key_case_insensitive_extractor , ] else : deserializer . key_extractors = [ rest_key_case_insensitive_extractor , attribute_key_case_insensitive_extractor , last_rest_key_case_insensitive_extractor ] data = deserializer . _deserialize ( data_type , data ) except DeserializationError as err : raise_with_traceback ( SerializationError , "Unable to build a model: " + str ( err ) , err ) if self . client_side_validation : errors = _recursive_validate ( data_type , data_type , data ) if errors : raise errors [ 0 ] return self . _serialize ( data , data_type , ** kwargs )
def get_instances ( self ) : """Returns a list of the instances of all the configured nodes ."""
return [ c . get ( 'instance' ) for c in self . runtime . _nodes . values ( ) if c . get ( 'instance' ) ]
def _load ( self ) : """Function to collect reference data and connect it to the instance as attributes . Internal function , does not usually need to be called by the user , as it is called automatically when an attribute is requested . : return None"""
data = get_data ( self . endpoint , self . id_ , force_lookup = self . __force_lookup ) # Make our custom objects from the data . for key , val in data . items ( ) : if key == 'location_area_encounters' and self . endpoint == 'pokemon' : params = val . split ( '/' ) [ - 3 : ] ep , id_ , subr = params encounters = get_data ( ep , int ( id_ ) , subr ) data [ key ] = [ _make_obj ( enc ) for enc in encounters ] continue if isinstance ( val , dict ) : data [ key ] = _make_obj ( val ) elif isinstance ( val , list ) : data [ key ] = [ _make_obj ( i ) for i in val ] self . __dict__ . update ( data ) return None
def refreshResults ( self ) : """Joins together the queries from the fixed system , the search , and the query builder to generate a query for the browser to display ."""
if ( self . currentMode ( ) == XOrbBrowserWidget . Mode . Detail ) : self . refreshDetails ( ) elif ( self . currentMode ( ) == XOrbBrowserWidget . Mode . Card ) : self . refreshCards ( ) else : self . refreshThumbnails ( )
def subset_vcf ( in_file , region , out_file , config ) : """Subset VCF in the given region , handling bgzip and indexing of input ."""
work_file = vcfutils . bgzip_and_index ( in_file , config ) if not file_exists ( out_file ) : with file_transaction ( config , out_file ) as tx_out_file : bcftools = config_utils . get_program ( "bcftools" , config ) region_str = bamprep . region_to_gatk ( region ) cmd = "{bcftools} view -r {region_str} {work_file} > {tx_out_file}" do . run ( cmd . format ( ** locals ( ) ) , "subset %s: %s" % ( os . path . basename ( work_file ) , region_str ) ) return out_file
def process_event ( self , event ) : """User input for the main map view ."""
if isinstance ( event , KeyboardEvent ) : if event . key_code in [ Screen . ctrl ( "m" ) , Screen . ctrl ( "j" ) ] : self . _scene . add_effect ( EnterLocation ( self . _screen , self . _longitude , self . _latitude , self . _on_new_location ) ) elif event . key_code in [ ord ( 'q' ) , ord ( 'Q' ) , Screen . ctrl ( "c" ) ] : raise StopApplication ( "User quit" ) elif event . key_code in [ ord ( 't' ) , ord ( 'T' ) ] : self . _satellite = not self . _satellite if self . _satellite : self . _size = _START_SIZE elif event . key_code == ord ( "?" ) : self . _scene . add_effect ( PopUpDialog ( self . _screen , _HELP , [ "OK" ] ) ) elif event . key_code == ord ( "+" ) and self . _zoom <= 20 : if self . _desired_zoom < 20 : self . _desired_zoom += 1 elif event . key_code == ord ( "-" ) and self . _zoom >= 0 : if self . _desired_zoom > 0 : self . _desired_zoom -= 1 elif event . key_code == ord ( "0" ) : self . _desired_zoom = 0 elif event . key_code == ord ( "9" ) : self . _desired_zoom = 20 elif event . key_code == Screen . KEY_LEFT : self . _desired_longitude -= 360 / 2 ** self . _zoom / self . _size * 10 elif event . key_code == Screen . KEY_RIGHT : self . _desired_longitude += 360 / 2 ** self . _zoom / self . _size * 10 elif event . key_code == Screen . KEY_UP : self . _desired_latitude = self . _inc_lat ( self . _desired_latitude , - self . _size / 10 ) elif event . key_code == Screen . KEY_DOWN : self . _desired_latitude = self . _inc_lat ( self . _desired_latitude , self . _size / 10 ) else : return # Trigger a reload of the tiles and redraw map self . _updated . set ( ) self . _screen . force_update ( )
def wait_for_parent_image_build ( self , nvr ) : """Given image NVR , wait for the build that produced it to show up in koji . If it doesn ' t within the timeout , raise an error . : return build info dict with ' nvr ' and ' id ' keys"""
self . log . info ( 'Waiting for Koji build for parent image %s' , nvr ) poll_start = time . time ( ) while time . time ( ) - poll_start < self . poll_timeout : build = self . koji_session . getBuild ( nvr ) if build : self . log . info ( 'Parent image Koji build found with id %s' , build . get ( 'id' ) ) if build [ 'state' ] != koji . BUILD_STATES [ 'COMPLETE' ] : exc_msg = ( 'Parent image Koji build for {} with id {} state is not COMPLETE.' ) raise KojiParentBuildMissing ( exc_msg . format ( nvr , build . get ( 'id' ) ) ) return build time . sleep ( self . poll_interval ) raise KojiParentBuildMissing ( 'Parent image Koji build NOT found for {}!' . format ( nvr ) )
def to_native_types ( self , slicer = None , na_rep = None , date_format = None , quoting = None , ** kwargs ) : """convert to our native types format , slicing if desired"""
values = self . values i8values = self . values . view ( 'i8' ) if slicer is not None : values = values [ ... , slicer ] i8values = i8values [ ... , slicer ] from pandas . io . formats . format import _get_format_datetime64_from_values fmt = _get_format_datetime64_from_values ( values , date_format ) result = tslib . format_array_from_datetime ( i8values . ravel ( ) , tz = getattr ( self . values , 'tz' , None ) , format = fmt , na_rep = na_rep ) . reshape ( i8values . shape ) return np . atleast_2d ( result )
def optimize_layout_handler ( rule , handler ) : """Produce an " optimized " version of handler for the dispatcher to limit reference lookups ."""
def runner ( walk , dispatcher , node ) : yield LayoutChunk ( rule , handler , node ) return runner
def _comparator_lt ( filter_value , tested_value ) : """Tests if the filter value is strictly greater than the tested value tested _ value < filter _ value"""
if is_string ( filter_value ) : value_type = type ( tested_value ) try : # Try a conversion filter_value = value_type ( filter_value ) except ( TypeError , ValueError ) : if value_type is int : # Integer / float comparison trick try : filter_value = float ( filter_value ) except ( TypeError , ValueError ) : # None - float value return False else : # Incompatible type return False try : return tested_value < filter_value except TypeError : # Incompatible type return False
def __stream_request ( self , method , url , request_args , headers = None ) : """_ _ stream _ request . make a ' stream ' request . This method is called by the ' request ' method after it has determined which call applies : regular or streaming ."""
headers = headers if headers else { } response = self . __request ( method , url , request_args , headers = headers , stream = True ) lines = response . iter_lines ( ITER_LINES_CHUNKSIZE ) for line in lines : if line : data = json . loads ( line . decode ( "utf-8" ) ) yield data
def render_string ( self , template_name , ** kwargs ) : """This method was rewritten to support multiple template engine ( Determine by ` TEMPLATE _ ENGINE ` setting , could be ` tornado ` and ` jinja2 ` ) , it will only affect on template rendering process , ui modules feature , which is mostly exposed in ` render ` method , is kept to be used as normal ."""
if 'tornado' == settings [ 'TEMPLATE_ENGINE' ] : return super ( BaseHandler , self ) . render_string ( template_name , ** kwargs ) elif 'jinja2' == settings [ 'TEMPLATE_ENGINE' ] : return jinja2_render ( template_name , ** kwargs ) else : raise errors . SettingsError ( '%s is not a supported TEMPLATE_ENGINE, should be `tornado` or `jinja2`' % settings [ 'TEMPLATE_ENGINE' ] )
def _is_json_serialized_jws ( self , json_jws ) : """Check if we ' ve got a JSON serialized signed JWT . : param json _ jws : The message : return : True / False"""
json_ser_keys = { "payload" , "signatures" } flattened_json_ser_keys = { "payload" , "signature" } if not json_ser_keys . issubset ( json_jws . keys ( ) ) and not flattened_json_ser_keys . issubset ( json_jws . keys ( ) ) : return False return True
def _fetch ( self ) : """Internal helper that fetches the ring from Redis , including only active nodes / replicas . Returns a list of tuples ( start , replica ) ( see _ fetch _ all docs for more details ) ."""
now = time . time ( ) expiry_time = now - NODE_TIMEOUT data = self . conn . zrangebyscore ( self . key , expiry_time , 'INF' ) ring = [ ] for node_data in data : start , replica = node_data . decode ( ) . split ( ':' , 1 ) ring . append ( ( int ( start ) , replica ) ) ring = sorted ( ring , key = operator . itemgetter ( 0 ) ) return ring
def run ( self , statement , parameters = None , ** kwparameters ) : """Run a Cypher statement within the context of this transaction . The statement is sent to the server lazily , when its result is consumed . To force the statement to be sent to the server , use the : meth : ` . Transaction . sync ` method . Cypher is typically expressed as a statement template plus a set of named parameters . In Python , parameters may be expressed through a dictionary of parameters , through individual parameter arguments , or as a mixture of both . For example , the ` run ` statements below are all equivalent : : > > > statement = " CREATE ( a : Person { name : { name } , age : { age } } ) " > > > tx . run ( statement , { " name " : " Alice " , " age " : 33 } ) > > > tx . run ( statement , { " name " : " Alice " } , age = 33) > > > tx . run ( statement , name = " Alice " , age = 33) Parameter values can be of any type supported by the Neo4j type system . In Python , this includes : class : ` bool ` , : class : ` int ` , : class : ` str ` , : class : ` list ` and : class : ` dict ` . Note however that : class : ` list ` properties must be homogenous . : param statement : template Cypher statement : param parameters : dictionary of parameters : param kwparameters : additional keyword parameters : returns : : class : ` . StatementResult ` object : raise TransactionError : if the transaction is closed"""
self . _assert_open ( ) return self . session . run ( statement , parameters , ** kwparameters )
def get ( self , run_id ) : """Get a single run from the database . : param run _ id : The ID of the run . : return : The whole object from the database . : raise NotFoundError when not found"""
id = self . _parse_id ( run_id ) run = self . generic_dao . find_record ( self . collection_name , { "_id" : id } ) if run is None : raise NotFoundError ( "Run %s not found." % run_id ) return run
def _adjust_rowcol ( self , insertion_point , no_to_insert , axis , tab = None ) : """Adjusts row and column sizes on insertion / deletion"""
if axis == 2 : self . _shift_rowcol ( insertion_point , no_to_insert ) return assert axis in ( 0 , 1 ) cell_sizes = self . col_widths if axis else self . row_heights set_cell_size = self . set_col_width if axis else self . set_row_height new_sizes = { } del_sizes = [ ] for pos , table in cell_sizes : if pos > insertion_point and ( tab is None or tab == table ) : if 0 <= pos + no_to_insert < self . shape [ axis ] : new_sizes [ ( pos + no_to_insert , table ) ] = cell_sizes [ ( pos , table ) ] del_sizes . append ( ( pos , table ) ) for pos , table in new_sizes : set_cell_size ( pos , table , new_sizes [ ( pos , table ) ] ) for pos , table in del_sizes : if ( pos , table ) not in new_sizes : set_cell_size ( pos , table , None )
def reset ( self ) : """Resets the environment , and returns the state . If it is a single agent environment , it returns that state for that agent . Otherwise , it returns a dict from agent name to state . Returns : tuple or dict : For single agent environment , returns the same as ` step ` . For multi - agent environment , returns the same as ` tick ` ."""
self . _reset_ptr [ 0 ] = True self . _commands . clear ( ) for _ in range ( self . _pre_start_steps + 1 ) : self . tick ( ) return self . _default_state_fn ( )
def edit ( cls , record , parent = None , uifile = '' , commit = True ) : """Prompts the user to edit the inputed record . : param record | < orb . Table > parent | < QWidget > : return < bool > | accepted"""
# create the dialog dlg = QDialog ( parent ) dlg . setWindowTitle ( 'Edit %s' % record . schema ( ) . name ( ) ) # create the widget cls = record . schema ( ) . property ( 'widgetClass' , cls ) widget = cls ( dlg ) if ( uifile ) : widget . setUiFile ( uifile ) widget . setRecord ( record ) widget . layout ( ) . setContentsMargins ( 0 , 0 , 0 , 0 ) # create buttons opts = QDialogButtonBox . Save | QDialogButtonBox . Cancel btns = QDialogButtonBox ( opts , Qt . Horizontal , dlg ) # create layout layout = QVBoxLayout ( ) layout . addWidget ( widget ) layout . addWidget ( btns ) dlg . setLayout ( layout ) dlg . adjustSize ( ) # create connections # btns . accepted . connect ( widget . save ) btns . rejected . connect ( dlg . reject ) widget . saved . connect ( dlg . accept ) if ( dlg . exec_ ( ) ) : if commit : result = widget . record ( ) . commit ( ) if 'errored' in result : QMessageBox . information ( self . window ( ) , 'Error Committing to Database' , result [ 'errored' ] ) return False return True return False
def do_check_pep8 ( files , status ) : """Run the python pep8 tool against the filst of supplied files . Append any linting errors to the returned status list Args : files ( str ) : list of files to run pep8 against status ( list ) : list of pre - receive check failures to eventually print to the user Returns : status list of current pre - redeive check failures . Might be an empty list ."""
for file_name in files : args = [ 'flake8' , '--max-line-length=120' , '{0}' . format ( file_name ) ] output = run ( * args ) if output : status . append ( "Python PEP8/Flake8: {0}: {1}" . format ( file_name , output ) ) return status
def is_url ( path ) : """Test if path represents a valid URL . : param str path : Path to file . : return : True if path is valid url string , False otherwise . : rtype : : py : obj : ` True ` or : py : obj : ` False `"""
try : parse_result = urlparse ( path ) return all ( ( parse_result . scheme , parse_result . netloc , parse_result . path ) ) except ValueError : return False
def group_min ( groups , data ) : """Given a list of groups , find the minimum element of data within each group Parameters groups : ( n , ) sequence of ( q , ) int Indexes of each group corresponding to each element in data data : ( m , ) The data that groups indexes reference Returns minimums : ( n , ) Minimum value of data per group"""
# sort with major key groups , minor key data order = np . lexsort ( ( data , groups ) ) groups = groups [ order ] # this is only needed if groups is unsorted data = data [ order ] # construct an index which marks borders between groups index = np . empty ( len ( groups ) , 'bool' ) index [ 0 ] = True index [ 1 : ] = groups [ 1 : ] != groups [ : - 1 ] return data [ index ]
def to_python ( self , value ) : """Validates that the input can be converted to a datetime . Returns a Python datetime . datetime object ."""
if value in validators . EMPTY_VALUES : return None if isinstance ( value , datetime . datetime ) : return value if isinstance ( value , datetime . date ) : return datetime . datetime ( value . year , value . month , value . day ) if isinstance ( value , list ) : # Input comes from a 2 SplitDateTimeWidgets , for example . So , # it ' s four components : start date and time , and end date and time . if len ( value ) != 4 : raise ValidationError ( self . error_messages [ 'invalid' ] ) if value [ 0 ] in validators . EMPTY_VALUES and value [ 1 ] in validators . EMPTY_VALUES and value [ 2 ] in validators . EMPTY_VALUES and value [ 3 ] in validators . EMPTY_VALUES : return None start_value = '%s %s' % tuple ( value [ : 2 ] ) end_value = '%s %s' % tuple ( value [ 2 : ] ) start_datetime = None end_datetime = None for format in self . input_formats or formats . get_format ( 'DATETIME_INPUT_FORMATS' ) : try : start_datetime = datetime . datetime ( * time . strptime ( start_value , format ) [ : 6 ] ) except ValueError : continue for format in self . input_formats or formats . get_format ( 'DATETIME_INPUT_FORMATS' ) : try : end_datetime = datetime . datetime ( * time . strptime ( end_value , format ) [ : 6 ] ) except ValueError : continue return ( start_datetime , end_datetime )
def adrinverter ( v_dc , p_dc , inverter , vtol = 0.10 ) : r'''Converts DC power and voltage to AC power using Anton Driesse ' s Grid - Connected PV Inverter efficiency model Parameters v _ dc : numeric A scalar or pandas series of DC voltages , in volts , which are provided as input to the inverter . If Vdc and Pdc are vectors , they must be of the same size . v _ dc must be > = 0 . ( V ) p _ dc : numeric A scalar or pandas series of DC powers , in watts , which are provided as input to the inverter . If Vdc and Pdc are vectors , they must be of the same size . p _ dc must be > = 0 . ( W ) inverter : dict - like A dict - like object defining the inverter to be used , giving the inverter performance parameters according to the model developed by Anton Driesse [ 1 ] . A set of inverter performance parameters may be loaded from the supplied data table using retrievesam . See Notes for required keys . vtol : numeric , default 0.1 A unit - less fraction that determines how far the efficiency model is allowed to extrapolate beyond the inverter ' s normal input voltage operating range . 0.0 < = vtol < = 1.0 Returns ac _ power : numeric A numpy array or pandas series of modeled AC power output given the input DC voltage , v _ dc , and input DC power , p _ dc . When ac _ power would be greater than pac _ max , it is set to p _ max to represent inverter " clipping " . When ac _ power would be less than - p _ nt ( energy consumed rather than produced ) then ac _ power is set to - p _ nt to represent nightly power losses . ac _ power is not adjusted for maximum power point tracking ( MPPT ) voltage windows or maximum current limits of the inverter . Notes Required inverter keys are : Column Description p _ nom The nominal power value used to normalize all power values , typically the DC power needed to produce maximum AC power output , ( W ) . v _ nom The nominal DC voltage value used to normalize DC voltage values , typically the level at which the highest efficiency is achieved , ( V ) . pac _ max The maximum AC output power value , used to clip the output if needed , ( W ) . ce _ list This is a list of 9 coefficients that capture the influence of input voltage and power on inverter losses , and thereby efficiency . p _ nt ac - power consumed by inverter at night ( night tare ) to maintain circuitry required to sense PV array voltage , ( W ) . References [1 ] Beyond the Curves : Modeling the Electrical Efficiency of Photovoltaic Inverters , PVSC 2008 , Anton Driesse et . al . See also sapm singlediode'''
p_nom = inverter [ 'Pnom' ] v_nom = inverter [ 'Vnom' ] pac_max = inverter [ 'Pacmax' ] p_nt = inverter [ 'Pnt' ] ce_list = inverter [ 'ADRCoefficients' ] v_max = inverter [ 'Vmax' ] v_min = inverter [ 'Vmin' ] vdc_max = inverter [ 'Vdcmax' ] mppt_hi = inverter [ 'MPPTHi' ] mppt_low = inverter [ 'MPPTLow' ] v_lim_upper = float ( np . nanmax ( [ v_max , vdc_max , mppt_hi ] ) * ( 1 + vtol ) ) v_lim_lower = float ( np . nanmax ( [ v_min , mppt_low ] ) * ( 1 - vtol ) ) pdc = p_dc / p_nom vdc = v_dc / v_nom # zero voltage will lead to division by zero , but since power is # set to night time value later , these errors can be safely ignored with np . errstate ( invalid = 'ignore' , divide = 'ignore' ) : poly = np . array ( [ pdc ** 0 , # replace with np . ones _ like ? pdc , pdc ** 2 , vdc - 1 , pdc * ( vdc - 1 ) , pdc ** 2 * ( vdc - 1 ) , 1. / vdc - 1 , # divide by 0 pdc * ( 1. / vdc - 1 ) , # invalid 0 . / 0 . - - > nan pdc ** 2 * ( 1. / vdc - 1 ) ] ) # divide by 0 p_loss = np . dot ( np . array ( ce_list ) , poly ) ac_power = p_nom * ( pdc - p_loss ) p_nt = - 1 * np . absolute ( p_nt ) # set output to nan where input is outside of limits # errstate silences case where input is nan with np . errstate ( invalid = 'ignore' ) : invalid = ( v_lim_upper < v_dc ) | ( v_dc < v_lim_lower ) ac_power = np . where ( invalid , np . nan , ac_power ) # set night values ac_power = np . where ( vdc == 0 , p_nt , ac_power ) ac_power = np . maximum ( ac_power , p_nt ) # set max ac output ac_power = np . minimum ( ac_power , pac_max ) if isinstance ( p_dc , pd . Series ) : ac_power = pd . Series ( ac_power , index = pdc . index ) return ac_power
def reproduce ( self , survivors , p_crossover , two_point_crossover = False , target_size = None ) : """Reproduces the population from a pool of surviving chromosomes until a target population size is met . Offspring are created by selecting a survivor . Survivors with higher fitness have a greater chance to be selected for reproduction . Genetic crossover events may occur for each offspring created . Crossover mates are randomly selected from the pool of survivors . Crossover points are randomly selected from the length of the crossed chromosomes . If crossover does not occur , an offspring is an exact copy of the selected survivor . Crossover only affects the DNA of the offspring , not the survivors / parents . survivors : pool of parent chromosomes to reproduce from p _ crossover : probability in [ 0 , 1 ] that a crossover event will occur for each offspring two _ point _ crossover ( default = False ) : whether 2 - point crossover is used ; default is 1 - point target _ size ( default = original population size ) : target population size return : list of survivors plus any offspring"""
assert 0 <= p_crossover <= 1 if not target_size : target_size = self . orig_pop_size num_survivors = len ( survivors ) # compute reproduction cumulative probabilities # weakest member gets p = 0 but can be crossed - over with cdf = compute_fitness_cdf ( survivors , self ) offspring = [ ] while num_survivors + len ( offspring ) < target_size : # pick a survivor to reproduce c1 = weighted_choice ( survivors , cdf ) . copy ( ) # crossover if random . random ( ) < p_crossover : # randomly pick a crossover mate from survivors # same chromosome can be c1 and c2 c2 = random . choice ( survivors ) . copy ( ) point1 = random . randrange ( 0 , c1 . length ) point2 = random . randrange ( point1 + 1 , c1 . length + 1 ) if two_point_crossover else None c1 . crossover ( c2 , point1 , point2 ) offspring . append ( c1 ) return survivors + offspring
def pull ( self , image = None , name = None , pull_folder = '' , ext = "simg" , force = False , capture = False , name_by_commit = False , name_by_hash = False , stream = False ) : '''pull will pull a singularity hub or Docker image Parameters image : the complete image uri . If not provided , the client loaded is used pull _ folder : if not defined , pulls to $ PWD ( ' ' ) . If defined , pulls to user specified location instead . Docker and Singularity Hub Naming name : a custom name to use , to override default ext : if no name specified , the default extension to use .'''
from spython . utils import check_install check_install ( ) cmd = self . _init_command ( 'pull' ) # If Singularity version > 3.0 , we have sif format if 'version 3' in self . version ( ) : ext = 'sif' # No image provided , default to use the client ' s loaded image if image is None : image = self . _get_uri ( ) # If it ' s still None , no go ! if image is None : bot . exit ( 'You must provide an image uri, or use client.load() first.' ) # Singularity Only supports shub and Docker pull if not re . search ( '^(shub|docker)://' , image ) : bot . exit ( "pull only valid for docker and shub. Use sregistry client." ) # Did the user ask for a custom pull folder ? if pull_folder : self . setenv ( 'SINGULARITY_PULLFOLDER' , pull_folder ) # If we still don ' t have a custom name , base off of image uri . # Determine how to tell client to name the image , preference to hash if name_by_hash is True : cmd . append ( '--hash' ) elif name_by_commit is True : cmd . append ( '--commit' ) elif name is None : name = self . _get_filename ( image , ext ) # Only add name if we aren ' t naming by hash or commit if not name_by_commit and not name_by_hash : # Regression Singularity 3 . * onward , PULLFOLDER not honored # https : / / github . com / sylabs / singularity / issues / 2788 if pull_folder and 'version 3' in self . version ( ) : pull_folder_name = os . path . join ( pull_folder , os . path . basename ( name ) ) cmd = cmd + [ "--name" , pull_folder_name ] else : cmd = cmd + [ "--name" , name ] if force is True : cmd = cmd + [ "--force" ] cmd . append ( image ) bot . info ( ' ' . join ( cmd ) ) # If name is still None , make empty string if name is None : name = '' final_image = os . path . join ( pull_folder , name ) # Option 1 : For hash or commit , need return value to get final _ image if name_by_commit or name_by_hash : # Set pull to temporary location tmp_folder = tempfile . mkdtemp ( ) self . setenv ( 'SINGULARITY_PULLFOLDER' , tmp_folder ) self . _run_command ( cmd , capture = capture ) try : tmp_image = os . path . join ( tmp_folder , os . listdir ( tmp_folder ) [ 0 ] ) final_image = os . path . join ( pull_folder , os . path . basename ( tmp_image ) ) shutil . move ( tmp_image , final_image ) shutil . rmtree ( tmp_folder ) except : bot . error ( 'Issue pulling image with commit or hash, try without?' ) # Option 2 : Streaming we just run to show user elif stream is False : self . _run_command ( cmd , capture = capture ) # Option 3 : A custom name we can predict ( not commit / hash ) and can also show else : return final_image , stream_command ( cmd , sudo = False ) if os . path . exists ( final_image ) : bot . info ( final_image ) return final_image
def get_build_configuration_set ( id = None , name = None ) : """Get a specific BuildConfigurationSet by name or ID"""
content = get_build_configuration_set_raw ( id , name ) if content : return utils . format_json ( content )
async def send ( from_addr , to_addrs , subject = "Ellis" , msg = "" , ** kwargs ) : """Sends an e - mail to the provided address . : param from _ addr : E - mail address of the sender . : type from _ addr : str : param to _ addrs : E - mail address ( es ) of the receiver ( s ) . : type to _ addrs : list or str : param msg : Message to be sent . : type msg : str"""
async with SMTP ( ) as client : msg = "Subject: {0}\n\n{1}" . format ( subject , msg ) if kwargs : # To append kwargs to the given message , we first # transform it into a more human friendly string : values = "\n" . join ( [ "{0}: {1}" . format ( k , v ) for k , v in kwargs . items ( ) ] ) # Actually append caught values to the message : msg = ( "{0}\n\nThe following variables have been caught:" "\n{1}" . format ( msg , values ) ) try : await client . sendmail ( from_addr , to_addrs , msg ) except : # FIXME : print a friendly message to stdout . raise
def update ( self , friendly_name = values . unset , unique_name = values . unset , attributes = values . unset ) : """Update the ChannelInstance : param unicode friendly _ name : A human - readable name for the Channel . : param unicode unique _ name : A unique , addressable name for the Channel . : param unicode attributes : An optional metadata field you can use to store any data you wish . : returns : Updated ChannelInstance : rtype : twilio . rest . chat . v1 . service . channel . ChannelInstance"""
data = values . of ( { 'FriendlyName' : friendly_name , 'UniqueName' : unique_name , 'Attributes' : attributes , } ) payload = self . _version . update ( 'POST' , self . _uri , data = data , ) return ChannelInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , sid = self . _solution [ 'sid' ] , )
def get_inst_info ( qry_string ) : """Get details for instances that match the qry _ string . Execute a query against the AWS EC2 client object , that is based on the contents of qry _ string . Args : qry _ string ( str ) : the query to be used against the aws ec2 client . Returns : qry _ results ( dict ) : raw information returned from AWS ."""
qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval ( qry_real ) # pylint : disable = eval - used return qry_results
def on_lstClassifications_itemSelectionChanged ( self ) : """Update classification description label and unlock the Next button . . . note : : This is an automatic Qt slot executed when the field selection changes ."""
self . clear_further_steps ( ) classification = self . selected_classification ( ) # Exit if no selection if not classification : return # Set description label self . lblDescribeClassification . setText ( classification [ "description" ] ) # Enable the next button self . parent . pbnNext . setEnabled ( True )
def CreateTask ( self , session_identifier ) : """Creates a task . Args : session _ identifier ( str ) : the identifier of the session the task is part of . Returns : Task : task attribute container ."""
task = tasks . Task ( session_identifier ) logger . debug ( 'Created task: {0:s}.' . format ( task . identifier ) ) with self . _lock : self . _tasks_queued [ task . identifier ] = task self . _total_number_of_tasks += 1 self . SampleTaskStatus ( task , 'created' ) return task
def get_kwargs ( self ) : """Returns kwargs for both publisher and subscriber classes"""
return { 'host' : self . host , 'port' : self . port , 'channel' : self . channel , 'password' : self . password }
def sd ( self ) : '''Calculate standard deviation of timeseries'''
v = self . var ( ) if len ( v ) : return np . sqrt ( v ) else : return None
def replace_each ( text , items , count = None , strip = False ) : '''Like ` ` replace ` ` , where each occurrence in ` ` items ` ` is a 2 - tuple of ` ` ( old , new ) ` ` pair .'''
for a , b in items : text = replace ( text , a , b , count = count , strip = strip ) return text
def divide ( self , other , inplace = True ) : """Returns the division of two gaussian distributions . Parameters other : GaussianDistribution The GaussianDistribution to be divided . inplace : boolean If True , modifies the distribution itself , otherwise returns a new GaussianDistribution object . Returns CanonicalDistribution or None : if inplace = True ( default ) returns None . if inplace = False returns a new CanonicalDistribution instance . Examples > > > import numpy as np > > > from pgmpy . factors . distributions import GaussianDistribution as GD > > > dis1 = GD ( [ ' x1 ' , ' x2 ' , ' x3 ' ] , np . array ( [ [ 1 ] , [ - 3 ] , [ 4 ] ] ) , . . . np . array ( [ [ 4 , 2 , - 2 ] , [ 2 , 5 , - 5 ] , [ - 2 , - 5 , 8 ] ] ) ) > > > dis2 = GD ( [ ' x3 ' , ' x4 ' ] , [ 1 , 2 ] , [ [ 2 , 3 ] , [ 5 , 6 ] ] ) > > > dis3 = dis1 . divide ( dis2 , inplace = False ) > > > dis3 . covariance array ( [ [ 3.6 , 1 . , - 0.4 , - 0.6 ] , [ 1 . , 2.5 , - 1 . , - 1.5 ] , [ - 0.4 , - 1 . , 1.6 , 2.4 ] , [ - 1 . , - 2.5 , 4 . , 4.5 ] ] ) > > > dis3 . mean array ( [ [ 1.6 ] , [ - 1.5 ] , [ 1.6 ] , [ 3.5 ] ] )"""
return self . _operate ( other , operation = 'divide' , inplace = inplace )
def get_pmag_dir ( ) : """Returns directory in which PmagPy is installed"""
# this is correct for py2exe ( DEPRECATED ) # win _ frozen = is _ frozen ( ) # if win _ frozen : # path = os . path . abspath ( unicode ( sys . executable , sys . getfilesystemencoding ( ) ) ) # path = os . path . split ( path ) [ 0] # return path # this is correct for py2app try : return os . environ [ 'RESOURCEPATH' ] # this works for everything else except KeyError : pass # new way : # if we ' re in the local PmagPy directory : if os . path . isfile ( os . path . join ( os . getcwd ( ) , 'pmagpy' , 'pmag.py' ) ) : lib_dir = os . path . join ( os . getcwd ( ) , 'pmagpy' ) # if we ' re anywhere else : elif getattr ( sys , 'frozen' , False ) : # pyinstaller datafile directory return sys . _MEIPASS else : # horrible , hack - y fix # ( prevents namespace issue between # local github PmagPy and pip - installed PmagPy ) . # must reload because we may have # changed directories since importing temp = os . getcwd ( ) os . chdir ( '..' ) reload ( locator ) lib_file = resource_filename ( 'locator' , 'resource.py' ) full_dir = os . path . split ( lib_file ) [ 0 ] ind = full_dir . rfind ( os . sep ) lib_dir = full_dir [ : ind + 1 ] lib_dir = os . path . realpath ( os . path . join ( lib_dir , 'pmagpy' ) ) os . chdir ( temp ) # end fix # old way : # lib _ dir = os . path . dirname ( os . path . realpath ( _ _ file _ _ ) ) if not os . path . isfile ( os . path . join ( lib_dir , 'pmag.py' ) ) : lib_dir = os . getcwd ( ) fname = os . path . join ( lib_dir , 'pmag.py' ) if not os . path . isfile ( fname ) : pmag_dir = os . path . split ( os . path . split ( __file__ ) [ 0 ] ) [ 0 ] if os . path . isfile ( os . path . join ( pmag_dir , 'pmagpy' , 'pmag.py' ) ) : return pmag_dir else : print ( '-W- Can\'t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade"' ) return '.' # strip " / " or " \ " and " pmagpy " to return proper PmagPy directory if lib_dir . endswith ( os . sep ) : lib_dir = lib_dir [ : - 1 ] if lib_dir . endswith ( 'pmagpy' ) : pmag_dir = os . path . split ( lib_dir ) [ 0 ] else : pmag_dir = lib_dir return pmag_dir
def onlinetime_get ( self , service_staff_id , start_date , end_date , session ) : '''taobao . wangwang . eservice . onlinetime . get 日累计在线时长 描述 : 根据客服ID和日期 , 获取该客服 " 当日在线时长 " 。 备注 : - 1 、 如果是操作者ID = 被查者ID , 返回被查者ID的 " 当日在线时长 " 。 - 2 、 如果操作者是组管理员 , 他可以查询他的组中的所有子帐号的 " 当日在线时长 " 。 - 3 、 如果操作者是主账户 , 他可以查询所有子帐号的 " 当日在线时长 " 。 - 4 、 被查者ID可以是多个 , 用 " , " 隔开 , id数不能超过30。 - 5 、 日累计在线时长定义 : 当日该用户累计的旺旺在线时长 - 6 、 开始时间与结束时间之间的间隔不能超过7天 - 7 、 不能查询90天以前的数据 - 8 、 不能查询当天的记录'''
request = TOPRequest ( 'taobao.wangwang.eservice.onlinetime.get' ) request [ 'service_staff_id' ] = service_staff_id request [ 'start_date' ] = start_date request [ 'end_date' ] = end_date self . create ( self . execute ( request , session ) ) return self . online_times_list_on_days
def render_unregistered ( error = None ) : """Render template file for the unregistered user . Args : error ( str , default None ) : Optional error message . Returns : str : Template filled with data ."""
return template ( read_index_template ( ) , registered = False , error = error , seeder_data = None , url_id = None , )
def set_walltime ( self , walltime ) : """Setting a walltime for the job > > > job . set _ walltime ( datetime . timedelta ( hours = 2 , minutes = 30 ) ) : param walltime : Walltime of the job ( an instance of timedelta ) : returns : self : rtype : self"""
if not isinstance ( walltime , timedelta ) : raise TypeError ( 'walltime must be an instance of datetime.timedelta. %s given' % type ( walltime ) ) self . _options [ 'walltime' ] = walltime return self
def sanitizeStructTime ( struct ) : """Convert struct _ time tuples with possibly invalid values to valid ones by substituting the closest valid value ."""
maxValues = ( 9999 , 12 , 31 , 23 , 59 , 59 ) minValues = ( 1 , 1 , 1 , 0 , 0 , 0 ) newstruct = [ ] for value , maxValue , minValue in zip ( struct [ : 6 ] , maxValues , minValues ) : newstruct . append ( max ( minValue , min ( value , maxValue ) ) ) return tuple ( newstruct ) + struct [ 6 : ]
def get_authorization_url ( self , requested_scopes = None , redirect_uri = OAUTH_REDIRECT_URL , ** kwargs ) : """Initializes the oauth authorization flow , getting the authorization url that the user must approve . : param list [ str ] requested _ scopes : list of scopes to request access for : param str redirect _ uri : redirect url configured in registered app : param kwargs : allow to pass unused params in conjunction with Connection : return : authorization url : rtype : str"""
# TODO : remove this warning in future releases if redirect_uri == OAUTH_REDIRECT_URL : warnings . warn ( 'The default redirect uri was changed in version 1.1.4. to' ' "https://login.microsoftonline.com/common/oauth2/nativeclient".' ' You may have to change the registered app "redirect uri" or pass here the old "redirect_uri"' , DeprecationWarning ) client_id , client_secret = self . auth if requested_scopes : scopes = requested_scopes elif self . scopes is not None : scopes = self . scopes else : raise ValueError ( 'Must provide at least one scope' ) self . session = oauth = OAuth2Session ( client_id = client_id , redirect_uri = redirect_uri , scope = scopes ) self . session . proxies = self . proxy if self . request_retries : retry = Retry ( total = self . request_retries , read = self . request_retries , connect = self . request_retries , backoff_factor = RETRIES_BACKOFF_FACTOR , status_forcelist = RETRIES_STATUS_LIST ) adapter = HTTPAdapter ( max_retries = retry ) self . session . mount ( 'http://' , adapter ) self . session . mount ( 'https://' , adapter ) # TODO : access _ type = ' offline ' has no effect according to documentation # This is done through scope ' offline _ access ' . auth_url , state = oauth . authorization_url ( url = self . _oauth2_authorize_url , access_type = 'offline' ) return auth_url
def _py2_and_3_joiner ( sep , joinable ) : """Allow ' \n ' . join ( . . . ) statements to work in Py2 and Py3. : param sep : : param joinable : : return :"""
if ISPY3 : sep = bytes ( sep , DEFAULT_ENCODING ) joined = sep . join ( joinable ) return joined . decode ( DEFAULT_ENCODING ) if ISPY3 else joined
def create_fwrule ( kwargs = None , call = None ) : '''Create a GCE firewall rule . The ' default ' network is used if not specified . CLI Example : . . code - block : : bash salt - cloud - f create _ fwrule gce name = allow - http allow = tcp : 80'''
if call != 'function' : raise SaltCloudSystemExit ( 'The create_fwrule function must be called with -f or --function.' ) if not kwargs or 'name' not in kwargs : log . error ( 'A name must be specified when creating a firewall rule.' ) return False if 'allow' not in kwargs : log . error ( 'Must use "allow" to specify allowed protocols/ports.' ) return False name = kwargs [ 'name' ] network_name = kwargs . get ( 'network' , 'default' ) allow = _parse_allow ( kwargs [ 'allow' ] ) src_range = kwargs . get ( 'src_range' , '0.0.0.0/0' ) src_tags = kwargs . get ( 'src_tags' , None ) dst_tags = kwargs . get ( 'dst_tags' , None ) if src_range : src_range = src_range . split ( ',' ) if src_tags : src_tags = src_tags . split ( ',' ) if dst_tags : dst_tags = dst_tags . split ( ',' ) conn = get_conn ( ) __utils__ [ 'cloud.fire_event' ] ( 'event' , 'create firewall' , 'salt/cloud/firewall/creating' , args = { 'name' : name , 'network' : network_name , 'allow' : kwargs [ 'allow' ] , } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) fwrule = conn . ex_create_firewall ( name , allow , network = network_name , source_ranges = src_range , source_tags = src_tags , target_tags = dst_tags ) __utils__ [ 'cloud.fire_event' ] ( 'event' , 'created firewall' , 'salt/cloud/firewall/created' , args = { 'name' : name , 'network' : network_name , 'allow' : kwargs [ 'allow' ] , } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) return _expand_item ( fwrule )
def _remove_clublog_xml_header ( self , cty_xml_filename ) : """remove the header of the Clublog XML File to make it properly parseable for the python ElementTree XML parser"""
import tempfile try : with open ( cty_xml_filename , "r" ) as f : content = f . readlines ( ) cty_dir = tempfile . gettempdir ( ) cty_name = os . path . split ( cty_xml_filename ) [ 1 ] cty_xml_filename_no_header = os . path . join ( cty_dir , "NoHeader_" + cty_name ) with open ( cty_xml_filename_no_header , "w" ) as f : f . writelines ( "<clublog>\n\r" ) f . writelines ( content [ 1 : ] ) self . _logger . debug ( "Header successfully modified for XML Parsing" ) return cty_xml_filename_no_header except Exception as e : self . _logger . error ( "Clublog CTY could not be opened / modified" ) self . _logger . error ( "Error Message: " + str ( e ) ) return
def analyzeModelWeightDistribution ( modelName , base ) : """Plot histogram of non - zero weight values ."""
model = torch . load ( modelName ) model . eval ( ) analyzeWeightDistribution ( model . l1 . weight . data , base )
def analyze ( self , handle , filename ) : """Submit a file for analysis . : type handle : File handle : param handle : Handle to file to upload for analysis . : type filename : str : param filename : File name . : rtype : str : return : File ID as a string"""
# multipart post files . files = { "sample_file" : ( filename , handle ) } # ensure the handle is at offset 0. handle . seek ( 0 ) response = self . _request ( "/sample/submit" , method = 'POST' , files = files , headers = self . headers ) try : if response . status_code == 200 and not response . json ( ) [ 'data' ] [ 'errors' ] : # only support single - file submissions ; just grab the first one . return response . json ( ) [ 'data' ] [ 'samples' ] [ 0 ] [ 'sample_id' ] else : raise sandboxapi . SandboxError ( "api error in analyze ({u}): {r}" . format ( u = response . url , r = response . content ) ) except ( ValueError , KeyError , IndexError ) as e : raise sandboxapi . SandboxError ( "error in analyze: {e}" . format ( e = e ) )
def _prep_subsampled_bams ( data , work_dir ) : """Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs . This attempts to minimize run times by pre - extracting useful reads mixed with subsampled normal pairs to estimate paired end distributions : https : / / groups . google . com / d / msg / delly - users / xmia4lwOd1Q / uaajoBkahAIJ Subsamples correctly aligned reads to 100 million based on speedseq defaults and evaluations on NA12878 whole genome data : https : / / github . com / cc2qe / speedseq / blob / ca624ba9affb0bd0fb88834ca896e9122639ec94 / bin / speedseq # L1102 XXX Currently not used as new versions of delly do not get good sensitivity with downsampled BAMs ."""
sr_bam , disc_bam = sshared . get_split_discordants ( data , work_dir ) ds_bam = bam . downsample ( dd . get_align_bam ( data ) , data , 1e8 , read_filter = "-F 'not secondary_alignment and proper_pair'" , always_run = True , work_dir = work_dir ) out_bam = "%s-final%s" % utils . splitext_plus ( ds_bam ) if not utils . file_exists ( out_bam ) : bam . merge ( [ ds_bam , sr_bam , disc_bam ] , out_bam , data [ "config" ] ) bam . index ( out_bam , data [ "config" ] ) return [ out_bam ]
def parse_config ( contents ) : """Parse config file into a list of Identity objects ."""
for identity_str , curve_name in re . findall ( r'\<(.*?)\|(.*?)\>' , contents ) : yield device . interface . Identity ( identity_str = identity_str , curve_name = curve_name )
def write_xml ( xml_str , output_loc = None , filename = None ) : """Outputs the XML content ( string ) into a file . If ` output _ loc ` is supplied and it ' s a file ( not directory ) , the output will be saved there and the ` filename ` is ignored . Args : xml _ str : string with XML document output _ loc : file or directory for saving the file filename : file name that will be used if ` output _ loc ` is directory If it is needed and is not supplied , it will be generated"""
if not xml_str : raise Dump2PolarionException ( "No data to write." ) filename_fin = _get_filename ( output_loc = output_loc , filename = filename ) with io . open ( filename_fin , "w" , encoding = "utf-8" ) as xml_file : xml_file . write ( get_unicode_str ( xml_str ) ) logger . info ( "Data written to '%s'" , filename_fin )
def config_to_options ( config ) : """Convert ConfigParser instance to argparse . Namespace Parameters config : object A ConfigParser instance Returns object An argparse . Namespace instance"""
class Options : host = config . get ( 'smtp' , 'host' , raw = True ) port = config . getint ( 'smtp' , 'port' ) to_addr = config . get ( 'mail' , 'to_addr' , raw = True ) from_addr = config . get ( 'mail' , 'from_addr' , raw = True ) subject = config . get ( 'mail' , 'subject' , raw = True ) encoding = config . get ( 'mail' , 'encoding' , raw = True ) username = config . get ( 'auth' , 'username' ) opts = Options ( ) # format opts . from_addr % { 'host' : opts . host , 'prog' : 'notify' } opts . to_addr % { 'host' : opts . host , 'prog' : 'notify' } return opts
def prt_detail ( self ) : """Nicely print stats information ."""
screen = [ "Detail info of %s: " % self . abspath , "total size = %s" % string_SizeInBytes ( self . size_total ) , "number of sub folders = %s" % self . num_folder_total , "number of total files = %s" % self . num_file_total , "lvl 1 file size = %s" % string_SizeInBytes ( self . size_current ) , "lvl 1 folder number = %s" % self . num_folder_current , "lvl 1 file number = %s" % self . num_file_current , ] print ( "\n" . join ( screen ) )
def region ( self , region = None , seqid = None , start = None , end = None , strand = None , featuretype = None , completely_within = False ) : """Return features within specified genomic coordinates . Specifying genomic coordinates can be done in a flexible manner Parameters region : string , tuple , or Feature instance If string , then of the form " seqid : start - end " . If tuple , then ( seqid , start , end ) . If : class : ` Feature ` , then use the features seqid , start , and end values . This argument is mutually exclusive with start / end / seqid . * Note * : By design , even if a feature is provided , its strand will be ignored . If you want to restrict the output by strand , use the separate ` strand ` kwarg . strand : + | - | . | None If ` strand ` is provided , then only those features exactly matching ` strand ` will be returned . So ` strand = ' . ' ` will only return unstranded features . Default is ` strand = None ` which does not restrict by strand . seqid , start , end , strand Mutually exclusive with ` region ` . These kwargs can be used to approximate slice notation ; see " Details " section below . featuretype : None , string , or iterable If not None , then restrict output . If string , then only report that feature type . If iterable , then report all featuretypes in the iterable . completely _ within : bool By default ( ` completely _ within = False ` ) , returns features that partially or completely overlap ` region ` . If ` completely _ within = True ` , features that are completely within ` region ` will be returned . Notes The meaning of ` seqid ` , ` start ` , and ` end ` is interpreted as follows : seqid start end meaning str int int equivalent to ` region ` kwarg None int int features from all chroms within coords str None int equivalent to [ : end ] slice notation str int None equivalent to [ start : ] slice notation None None None equivalent to FeatureDB . all _ features ( ) If performance is a concern , use ` completely _ within = True ` . This allows the query to be optimized by only looking for features that fall in the precise genomic bin ( same strategy as UCSC Genome Browser and BEDTools ) . Otherwise all features ' start / stop coords need to be searched to see if they partially overlap the region of interest . Examples - ` region ( seqid = " chr1 " , start = 1000 ) ` returns all features on chr1 that start or extend past position 1000 - ` region ( seqid = " chr1 " , start = 1000 , completely _ within = True ) ` returns all features on chr1 that start past position 1000. - ` region ( " chr1:1-100 " , strand = " + " , completely _ within = True ) ` returns only plus - strand features that completely fall within positions 1 to 100 on chr1. Returns A generator object that yields : class : ` Feature ` objects ."""
# Argument handling . if region is not None : if ( seqid is not None ) or ( start is not None ) or ( end is not None ) : raise ValueError ( "If region is supplied, do not supply seqid, " "start, or end as separate kwargs" ) if isinstance ( region , six . string_types ) : toks = region . split ( ':' ) if len ( toks ) == 1 : seqid = toks [ 0 ] start , end = None , None else : seqid , coords = toks [ : 2 ] if len ( toks ) == 3 : strand = toks [ 2 ] start , end = coords . split ( '-' ) elif isinstance ( region , Feature ) : seqid = region . seqid start = region . start end = region . end strand = region . strand # otherwise assume it ' s a tuple else : seqid , start , end = region [ : 3 ] # e . g . , # completely _ within = True . . . . . start > = { start } AND end < = { end } # completely _ within = False . . . . start < { end } AND end > { start } if completely_within : start_op = '>=' end_op = '<=' else : start_op = '<' end_op = '>' end , start = start , end args = [ ] position_clause = [ ] if seqid is not None : position_clause . append ( 'seqid = ?' ) args . append ( seqid ) if start is not None : start = int ( start ) position_clause . append ( 'start %s ?' % start_op ) args . append ( start ) if end is not None : end = int ( end ) position_clause . append ( 'end %s ?' % end_op ) args . append ( end ) position_clause = ' AND ' . join ( position_clause ) # Only use bins if we have defined boundaries and completely _ within is # True . Otherwise you can ' t know how far away a feature stretches # ( which means bins are not computable ahead of time ) _bin_clause = '' if ( start is not None ) and ( end is not None ) and completely_within : if start <= bins . MAX_CHROM_SIZE and end <= bins . MAX_CHROM_SIZE : _bins = list ( bins . bins ( start , end , one = False ) ) # See issue # 45 if len ( _bins ) < 900 : _bin_clause = ' or ' . join ( [ 'bin = ?' for _ in _bins ] ) _bin_clause = 'AND ( %s )' % _bin_clause args += _bins query = ' ' . join ( [ constants . _SELECT , 'WHERE ' , position_clause , _bin_clause ] ) # Add the featuretype clause if featuretype is not None : if isinstance ( featuretype , six . string_types ) : featuretype = [ featuretype ] feature_clause = ' or ' . join ( [ 'featuretype = ?' for _ in featuretype ] ) query += ' AND (%s) ' % feature_clause args . extend ( featuretype ) if strand is not None : strand_clause = ' and strand = ? ' query += strand_clause args . append ( strand ) c = self . conn . cursor ( ) self . _last_query = query self . _last_args = args self . _context = { 'start' : start , 'end' : end , 'seqid' : seqid , 'region' : region , } c . execute ( query , tuple ( args ) ) for i in c : yield self . _feature_returner ( ** i )
def processes ( self ) : """The proccesses for this app ."""
return self . _h . _get_resources ( resource = ( 'apps' , self . name , 'ps' ) , obj = Process , app = self , map = ProcessListResource )
def get_relative_modpath ( module_fpath ) : """Returns path to module relative to the package root Args : module _ fpath ( str ) : module filepath Returns : str : modname Example : > > > # ENABLE _ DOCTEST > > > from utool . util _ path import * # NOQA > > > import utool as ut > > > module _ fpath = ut . util _ path . _ _ file _ _ > > > rel _ modpath = ut . get _ relative _ modpath ( module _ fpath ) > > > rel _ modpath = rel _ modpath . replace ( ' . pyc ' , ' . py ' ) # allow pyc or py > > > result = ensure _ crossplat _ path ( rel _ modpath ) > > > print ( result ) utool / util _ path . py"""
modsubdir_list = get_module_subdir_list ( module_fpath ) _ , ext = splitext ( module_fpath ) rel_modpath = join ( * modsubdir_list ) + ext rel_modpath = ensure_crossplat_path ( rel_modpath ) return rel_modpath
def curve ( self ) : """Curve of the super helix ."""
return HelicalCurve . pitch_and_radius ( self . major_pitch , self . major_radius , handedness = self . major_handedness )
def instance ( self , inst ) : """Called by _ _ parse _ xml _ file in source _ reader ."""
self . __inst = inst # use inst , to reduce attribute access time if isinstance ( inst , declarations . declaration_t ) and inst . location is not None and inst . location . file_name != '' : inst . location . file_name = self . __files [ inst . location . file_name ]
def move ( self , filename , target ) : '''Move a file given its filename to another path in the storage Default implementation perform a copy then a delete . Backends should overwrite it if there is a better way .'''
self . copy ( filename , target ) self . delete ( filename )
def check_data ( cls , name , dims , is_unstructured ) : """A validation method for the data shape Parameters name : list of str with length 2 The variable names ( one for the first , two for the second array ) dims : list with length 2 of lists with length 1 The dimension of the arrays . Only 2D - Arrays are allowed ( or 1 - D if an array is unstructured ) is _ unstructured : bool or list of bool True if the corresponding array is unstructured . Returns % ( Plotter . check _ data . returns ) s"""
if isinstance ( name , six . string_types ) or not is_iterable ( name ) : name = [ name ] dims = [ dims ] is_unstructured = [ is_unstructured ] msg = ( 'Two arrays are required (one for the scalar and ' 'one for the vector field)' ) if len ( name ) < 2 : return [ None ] , [ msg ] elif len ( name ) > 2 : return [ False ] , [ msg ] valid1 , msg1 = Simple2DBase . check_data ( name [ : 1 ] , dims [ 0 : 1 ] , is_unstructured [ : 1 ] ) valid2 , msg2 = BaseVectorPlotter . check_data ( name [ 1 : ] , dims [ 1 : ] , is_unstructured [ 1 : ] ) return valid1 + valid2 , msg1 + msg2
def get_querystring ( self ) : """Clean existing query string ( GET parameters ) by removing arguments that we don ' t want to preserve ( sort parameter , ' page ' )"""
to_remove = self . get_querystring_parameter_to_remove ( ) query_string = urlparse ( self . request . get_full_path ( ) ) . query query_dict = parse_qs ( query_string . encode ( 'utf-8' ) ) for arg in to_remove : if arg in query_dict : del query_dict [ arg ] clean_query_string = urlencode ( query_dict , doseq = True ) return clean_query_string
def record_ce_entries ( self ) : # type : ( ) - > bytes '''Return a string representing the Rock Ridge entries in the Continuation Entry . Parameters : None . Returns : A string representing the Rock Ridge entry .'''
if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Rock Ridge extension not yet initialized' ) return self . _record ( self . ce_entries )
def to_url ( request ) : """Serialize as a URL for a GET request ."""
scheme , netloc , path , query , fragment = urlsplit ( to_utf8 ( request . url ) ) query = parse_qs ( query ) for key , value in request . data_and_params . iteritems ( ) : query . setdefault ( key , [ ] ) . append ( value ) query = urllib . urlencode ( query , True ) return urlunsplit ( ( scheme , netloc , path , query , fragment ) )