signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _redis_connection_settings ( self ) : """Return a dictionary of redis connection settings ."""
return { config . HOST : self . settings . get ( config . HOST , self . _REDIS_HOST ) , config . PORT : self . settings . get ( config . PORT , self . _REDIS_PORT ) , 'selected_db' : self . settings . get ( config . DB , self . _REDIS_DB ) }
def save_plot ( axes = "gca" , path = None ) : """Saves the figure in my own ascii format"""
global line_attributes # choose a path to save to if path == None : path = _s . dialogs . Save ( "*.plot" , default_directory = "save_plot_default_directory" ) if path == "" : print ( "aborted." ) return if not path . split ( "." ) [ - 1 ] == "plot" : path = path + ".plot" f = file ( path , "w" ) # if no argument was given , get the current axes if axes == "gca" : axes = _pylab . gca ( ) # now loop over the available lines f . write ( "title=" + axes . title . get_text ( ) . replace ( '\n' , '\\n' ) + '\n' ) f . write ( "xlabel=" + axes . xaxis . label . get_text ( ) . replace ( '\n' , '\\n' ) + '\n' ) f . write ( "ylabel=" + axes . yaxis . label . get_text ( ) . replace ( '\n' , '\\n' ) + '\n' ) for l in axes . lines : # write the data header f . write ( "trace=new\n" ) f . write ( "legend=" + l . get_label ( ) . replace ( '\n' , '\\n' ) + "\n" ) for a in line_attributes : f . write ( a + "=" + str ( _pylab . getp ( l , a ) ) . replace ( '\n' , '' ) + "\n" ) # get the data x = l . get_xdata ( ) y = l . get_ydata ( ) # loop over the data for n in range ( 0 , len ( x ) ) : f . write ( str ( float ( x [ n ] ) ) + " " + str ( float ( y [ n ] ) ) + "\n" ) f . close ( )
def _write ( self , ret ) : """This function needs to correspond to this : https : / / github . com / saltstack / salt / blob / develop / salt / returners / redis _ return . py # L88"""
self . redis . set ( '{0}:{1}' . format ( ret [ 'id' ] , ret [ 'jid' ] ) , json . dumps ( ret ) ) self . redis . lpush ( '{0}:{1}' . format ( ret [ 'id' ] , ret [ 'fun' ] ) , ret [ 'jid' ] ) self . redis . sadd ( 'minions' , ret [ 'id' ] ) self . redis . sadd ( 'jids' , ret [ 'jid' ] )
def requestTimedOut ( self , reply ) : """Trap the timeout . In Async mode requestTimedOut is called after replyFinished"""
# adapt http _ call _ result basing on receiving qgs timer timout signal self . exception_class = RequestsExceptionTimeout self . http_call_result . exception = RequestsExceptionTimeout ( "Timeout error" )
def prepare_date ( data , schema ) : """Converts datetime . date to int timestamp"""
if isinstance ( data , datetime . date ) : return data . toordinal ( ) - DAYS_SHIFT else : return data
def add_icon_widget ( self , ref , x = 1 , y = 1 , name = "heart" ) : """Add Icon Widget"""
if ref not in self . widgets : widget = IconWidget ( screen = self , ref = ref , x = x , y = y , name = name ) self . widgets [ ref ] = widget return self . widgets [ ref ]
def make_anchor_id ( self ) : """Return string to use as URL anchor for this comment ."""
result = re . sub ( '[^a-zA-Z0-9_]' , '_' , self . user + '_' + self . timestamp ) return result
def _is_current ( self , file_path , zip_path ) : """Return True if the file _ path is current for this zip _ path"""
timestamp , size = self . _get_date_and_size ( self . zipinfo [ zip_path ] ) if not os . path . isfile ( file_path ) : return False stat = os . stat ( file_path ) if stat . st_size != size or stat . st_mtime != timestamp : return False # check that the contents match zip_contents = self . loader . get_data ( zip_path ) with open ( file_path , 'rb' ) as f : file_contents = f . read ( ) return zip_contents == file_contents
def isa ( cls , protocol ) : """Does the type ' cls ' participate in the ' protocol ' ?"""
if not isinstance ( cls , type ) : raise TypeError ( "First argument to isa must be a type. Got %s." % repr ( cls ) ) if not isinstance ( protocol , type ) : raise TypeError ( ( "Second argument to isa must be a type or a Protocol. " "Got an instance of %r." ) % type ( protocol ) ) return issubclass ( cls , protocol ) or issubclass ( AnyType , protocol )
def make_multisig_segwit_wallet ( m , n ) : """Create a bundle of information that can be used to generate an m - of - n multisig witness script ."""
pks = [ ] for i in xrange ( 0 , n ) : pk = BitcoinPrivateKey ( compressed = True ) . to_wif ( ) pks . append ( pk ) return make_multisig_segwit_info ( m , pks )
def build_help_html ( ) : """Build the help HTML using the shared resources ."""
remove_from_help = [ "not-in-help" , "copyright" ] if sys . platform in [ "win32" , "cygwin" ] : remove_from_help . extend ( [ "osx" , "linux" ] ) elif sys . platform == "darwin" : remove_from_help . extend ( [ "linux" , "windows" , "linux-windows" ] ) else : remove_from_help . extend ( [ "osx" , "windows" ] ) readme_part_gen = all_but_blocks ( remove_from_help , README , newline = None ) rst_body = "\n" . join ( list ( readme_part_gen ) + CHANGES ) doctree = docutils . core . publish_doctree ( rst_body ) visitor = Doctree2HtmlForWx ( doctree ) doctree . walkabout ( visitor ) # Fills visitor . body and visitor . toc return ( # wx . html . HtmlWindow only renders a HTML subset u'<body bgcolor="{bg}" text="{fg}" link="{link}">' u'<a name="0"><h1>{title}</h1></a>' u"{sectionless}" u"{toc}" u"<p>This help was generated using selected information from the\n" u"Dose <tt>README.rst</tt> and <tt>CHANGES.rst</tt> project files.</p>" u'<p><a href="{url}">{url}</a></p><hr/>' u"{body}</body>" ) . format ( bg = HELP_BG_COLOR , fg = HELP_FG_COLOR , link = HELP_LINK_COLOR , title = HELP_TITLE , sectionless = u"" . join ( visitor . sectionless ) , toc = u"" . join ( visitor . toc ) , url = __url__ , body = u"" . join ( visitor . body ) )
def get_tasklogger ( name = "TaskLogger" ) : """Get a TaskLogger object Parameters logger : str , optional ( default : " TaskLogger " ) Unique name of the logger to retrieve Returns logger : TaskLogger"""
try : return logging . getLogger ( name ) . tasklogger except AttributeError : return logger . TaskLogger ( name )
def get_directors ( self ) : """Return all directors for this company"""
directors = Director . objects . filter ( company = self , is_current = True ) . select_related ( 'person' ) return directors
def from_diff ( diff , options = None , cwd = None ) : """Create a Radius object from a diff rather than a reposistory ."""
return RadiusFromDiff ( diff = diff , options = options , cwd = cwd )
def get_existing_path ( path , topmost_path = None ) : """Get the longest parent path in ` path ` that exists . If ` path ` exists , it is returned . Args : path ( str ) : Path to test topmost _ path ( str ) : Do not test this path or above Returns : str : Existing path , or None if no path was found ."""
prev_path = None if topmost_path : topmost_path = os . path . normpath ( topmost_path ) while True : if os . path . exists ( path ) : return path path = os . path . dirname ( path ) if path == prev_path : return None if topmost_path and os . path . normpath ( path ) == topmost_path : return None prev_path = path
def candidate_foundation ( candidate_type , candidate_transport , base_address ) : """See RFC 5245 - 4.1.1.3 . Computing Foundations"""
key = '%s|%s|%s' % ( candidate_type , candidate_transport , base_address ) return hashlib . md5 ( key . encode ( 'ascii' ) ) . hexdigest ( )
def stack ( self , k = 5 , stratify = False , shuffle = True , seed = 100 , full_test = True , add_diff = False ) : """Stacks sequence of models . Parameters k : int , default 5 Number of folds . stratify : bool , default False shuffle : bool , default True seed : int , default 100 full _ test : bool , default True If True then evaluate test dataset on the full data otherwise take the mean of every fold . add _ diff : bool , default False Returns ` DataFrame ` Examples > > > pipeline = ModelsPipeline ( model _ rf , model _ lr ) > > > stack _ ds = pipeline . stack ( k = 10 , seed = 111)"""
result_train = [ ] result_test = [ ] y = None for model in self . models : result = model . stack ( k = k , stratify = stratify , shuffle = shuffle , seed = seed , full_test = full_test ) train_df = pd . DataFrame ( result . X_train , columns = generate_columns ( result . X_train , model . name ) ) test_df = pd . DataFrame ( result . X_test , columns = generate_columns ( result . X_test , model . name ) ) result_train . append ( train_df ) result_test . append ( test_df ) if y is None : y = result . y_train result_train = pd . concat ( result_train , axis = 1 ) result_test = pd . concat ( result_test , axis = 1 ) if add_diff : result_train = feature_combiner ( result_train ) result_test = feature_combiner ( result_test ) ds = Dataset ( X_train = result_train , y_train = y , X_test = result_test ) return ds
async def _on_rpc_command ( self , event ) : """Received an RPC command that we should execute ."""
payload = event [ 'payload' ] rpc_id = payload [ 'rpc_id' ] tag = payload [ 'response_uuid' ] args = payload [ 'payload' ] result = 'success' response = b'' if self . _rpc_dispatcher is None or not self . _rpc_dispatcher . has_rpc ( rpc_id ) : result = 'rpc_not_found' else : try : response = self . _rpc_dispatcher . call_rpc ( rpc_id , args ) if inspect . iscoroutine ( response ) : response = await response except RPCInvalidArgumentsError : result = 'invalid_arguments' except RPCInvalidReturnValueError : result = 'invalid_response' except Exception : # pylint : disable = broad - except ; We are being called in a background task self . _logger . exception ( "Exception handling RPC 0x%04X" , rpc_id ) result = 'execution_exception' message = dict ( response_uuid = tag , result = result , response = response ) try : await self . send_command ( OPERATIONS . CMD_RESPOND_RPC , message , MESSAGES . RespondRPCResponse ) except : # pylint : disable = bare - except ; We are being called in a background worker self . _logger . exception ( "Error sending response to RPC 0x%04X" , rpc_id )
def prettyPrintPacket ( pkt ) : """not done"""
s = 'packet ID: {} instr: {} len: {}' . format ( pkt [ 4 ] , pkt [ 7 ] , int ( ( pkt [ 6 ] << 8 ) + pkt [ 5 ] ) ) if len ( s ) > 10 : params = pkt [ 8 : - 2 ] s += ' params: {}' . format ( params ) return s
def format_op_row ( ipFile , totLines , totWords , uniqueWords ) : """Format the output row with stats"""
txt = os . path . basename ( ipFile ) . ljust ( 36 ) + ' ' txt += str ( totLines ) . rjust ( 7 ) + ' ' txt += str ( totWords ) . rjust ( 7 ) + ' ' txt += str ( len ( uniqueWords ) ) . rjust ( 7 ) + ' ' return txt
def readline ( self , size = None ) : """Read a single line from rfile buffer and return it . Args : size ( int ) : minimum amount of data to read Returns : bytes : One line from rfile ."""
if size is not None : data = self . rfile . readline ( size ) self . bytes_read += len ( data ) self . _check_length ( ) return data # User didn ' t specify a size . . . # We read the line in chunks to make sure it ' s not a 100MB line ! res = [ ] while True : data = self . rfile . readline ( 256 ) self . bytes_read += len ( data ) self . _check_length ( ) res . append ( data ) # See https : / / github . com / cherrypy / cherrypy / issues / 421 if len ( data ) < 256 or data [ - 1 : ] == LF : return EMPTY . join ( res )
def remove_resource ( self , path ) : """Helper function to remove resources . : param path : the path for the unwanted resource : rtype : the removed object"""
path = path . strip ( "/" ) paths = path . split ( "/" ) actual_path = "" i = 0 for p in paths : i += 1 actual_path += "/" + p try : res = self . root [ actual_path ] except KeyError : res = None if res is not None : del ( self . root [ actual_path ] ) return res
def order_of_magnitude_str ( num , base = 10.0 , prefix_list = None , exponent_list = None , suffix = '' , prefix = None ) : """TODO : Rewrite byte _ str to use this func Returns : str"""
abs_num = abs ( num ) # Find the right magnidue for prefix_ , exponent in zip ( prefix_list , exponent_list ) : # Let user request the prefix requested = False if prefix is not None : if prefix != prefix_ : continue requested = True # Otherwise find the best prefix magnitude = base ** exponent # Be less than this threshold to use this unit thresh_mag = magnitude * base if requested or abs_num <= thresh_mag : break unit_str = _magnitude_str ( abs_num , magnitude , prefix_ , suffix ) return unit_str
def _do_perspective_warp ( c : FlowField , targ_pts : Points , invert = False ) : "Apply warp to ` targ _ pts ` from ` _ orig _ pts ` to ` c ` ` FlowField ` ."
if invert : return _apply_perspective ( c , _find_coeffs ( targ_pts , _orig_pts ) ) return _apply_perspective ( c , _find_coeffs ( _orig_pts , targ_pts ) )
def _get_sd ( file_descr ) : """Get streamdescriptor matching file _ descr fileno . : param file _ descr : file object : return : StreamDescriptor or None"""
for stream_descr in NonBlockingStreamReader . _streams : if file_descr == stream_descr . stream . fileno ( ) : return stream_descr return None
def _get_matchable_segments ( segments ) : """Performs a depth - first search of the segment tree to get all matchable segments ."""
for subsegment in segments : if isinstance ( subsegment , Token ) : break # No tokens allowed next to segments if isinstance ( subsegment , Segment ) : if isinstance ( subsegment , MatchableSegment ) : yield subsegment for matchable_subsegment in _get_matchable_segments ( subsegment ) : yield matchable_subsegment
def import_subview ( self , idx , subview ) : """Add the given subview to the corpus . Args : idx ( str ) : An idx that is unique in the corpus for identifying the subview . If already a subview exists with the given id it will be overridden . subview ( Subview ) : The subview to add ."""
subview . corpus = self self . _subviews [ idx ] = subview
def p_ExtendedAttributeNamedArgList ( p ) : """ExtendedAttributeNamedArgList : IDENTIFIER " = " IDENTIFIER " ( " ArgumentList " ) " """
p [ 0 ] = model . ExtendedAttribute ( name = p [ 1 ] , value = model . ExtendedAttributeValue ( name = p [ 3 ] , arguments = p [ 5 ] ) )
def _delete_chars ( self , value ) : """Deletes the specified number of charachters ."""
value = int ( value ) if value <= 0 : value = 1 for i in range ( value ) : self . _cursor . deleteChar ( ) self . _text_edit . setTextCursor ( self . _cursor ) self . _last_cursor_pos = self . _cursor . position ( )
def publish_page ( page , languages ) : """Publish a CMS page in all given languages ."""
for language_code , lang_name in iter_languages ( languages ) : url = page . get_absolute_url ( ) if page . publisher_is_draft : page . publish ( language_code ) log . info ( 'page "%s" published in %s: %s' , page , lang_name , url ) else : log . info ( 'published page "%s" already exists in %s: %s' , page , lang_name , url ) return page . reload ( )
def create ( self , data ) : """Create a new component"""
response = self . http . post ( str ( self ) , json = data , auth = self . auth ) response . raise_for_status ( ) return response . json ( )
def check_nonparametric_sources ( fname , smodel , investigation_time ) : """: param fname : full path to a source model file : param smodel : source model object : param investigation _ time : investigation _ time to compare with in the case of nonparametric sources : returns : the nonparametric sources in the model : raises : a ValueError if the investigation _ time is different from the expected"""
# NonParametricSeismicSources np = [ src for sg in smodel . src_groups for src in sg if hasattr ( src , 'data' ) ] if np and smodel . investigation_time != investigation_time : raise ValueError ( 'The source model %s contains an investigation_time ' 'of %s, while the job.ini has %s' % ( fname , smodel . investigation_time , investigation_time ) ) return np
def apply_dict_of_variables_vfunc ( func , * args , signature , join = 'inner' , fill_value = None ) : """Apply a variable level function over dicts of DataArray , DataArray , Variable and ndarray objects ."""
args = [ _as_variables_or_variable ( arg ) for arg in args ] names = join_dict_keys ( args , how = join ) grouped_by_name = collect_dict_values ( args , names , fill_value ) result_vars = OrderedDict ( ) for name , variable_args in zip ( names , grouped_by_name ) : result_vars [ name ] = func ( * variable_args ) if signature . num_outputs > 1 : return _unpack_dict_tuples ( result_vars , signature . num_outputs ) else : return result_vars
def get_top_live_games ( self , partner = '' , ** kwargs ) : """Returns a dictionary that includes top MMR live games : param partner : ( int , optional ) : return : dictionary of prize pools , see : doc : ` responses < / responses > `"""
if 'partner' not in kwargs : kwargs [ 'partner' ] = partner url = self . __build_url ( urls . GET_TOP_LIVE_GAME , ** kwargs ) req = self . executor ( url ) if self . logger : self . logger . info ( 'URL: {0}' . format ( url ) ) if not self . __check_http_err ( req . status_code ) : return response . build ( req , url , self . raw_mode )
def clear ( self ) : """Clears the dict ."""
self . __values . clear ( ) self . __access_keys = [ ] self . __modified_times . clear ( )
def load_scripts ( self ) : """opens file dialog to load scripts into gui"""
# update scripts so that current settings do not get lost for index in range ( self . tree_scripts . topLevelItemCount ( ) ) : script_item = self . tree_scripts . topLevelItem ( index ) self . update_script_from_item ( script_item ) dialog = LoadDialog ( elements_type = "scripts" , elements_old = self . scripts , filename = self . gui_settings [ 'scripts_folder' ] ) if dialog . exec_ ( ) : self . gui_settings [ 'scripts_folder' ] = str ( dialog . txt_probe_log_path . text ( ) ) scripts = dialog . get_values ( ) added_scripts = set ( scripts . keys ( ) ) - set ( self . scripts . keys ( ) ) removed_scripts = set ( self . scripts . keys ( ) ) - set ( scripts . keys ( ) ) if 'data_folder' in list ( self . gui_settings . keys ( ) ) and os . path . exists ( self . gui_settings [ 'data_folder' ] ) : data_folder_name = self . gui_settings [ 'data_folder' ] else : data_folder_name = None # create instances of new instruments / scripts self . scripts , loaded_failed , self . instruments = Script . load_and_append ( script_dict = { name : scripts [ name ] for name in added_scripts } , scripts = self . scripts , instruments = self . instruments , log_function = self . log , data_path = data_folder_name ) # delete instances of new instruments / scripts that have been deselected for name in removed_scripts : del self . scripts [ name ]
def connect ( self , dests = [ ] , name = None , id = '' , props = { } ) : '''Connect this port to other DataPorts . After the connection has been made , a delayed reparse of the connections for this and the destination port will be triggered . @ param dests A list of the destination Port objects . Must be provided . @ param name The name of the connection . If None , a suitable default will be created based on the names of the two ports . @ param id The ID of this connection . If None , one will be generated by the RTC implementation . @ param props Properties of the connection . Suitable defaults will be set for required values if they are not already present . @ raises WrongPortTypeError'''
# Data ports can only connect to opposite data ports with self . _mutex : new_props = props . copy ( ) ptypes = [ d . porttype for d in dests ] if self . porttype == 'DataInPort' : if 'DataOutPort' not in ptypes : raise exceptions . WrongPortTypeError if self . porttype == 'DataOutPort' : if 'DataInPort' not in ptypes : raise exceptions . WrongPortTypeError if 'dataport.dataflow_type' not in new_props : new_props [ 'dataport.dataflow_type' ] = 'push' if 'dataport.interface_type' not in new_props : new_props [ 'dataport.interface_type' ] = 'corba_cdr' if 'dataport.subscription_type' not in new_props : new_props [ 'dataport.subscription_type' ] = 'new' if 'dataport.data_type' not in new_props : new_props [ 'dataport.data_type' ] = self . properties [ 'dataport.data_type' ] super ( DataPort , self ) . connect ( dests = dests , name = name , id = id , props = new_props )
def control_valve_choke_P_l ( Psat , Pc , FL , P1 = None , P2 = None , disp = True ) : r'''Calculates either the upstream or downstream pressure at which choked flow though a liquid control valve occurs , given either a set upstream or downstream pressure . Implements an analytical solution of the needed equations from the full function : py : func : ` ~ . size _ control _ valve _ l ` . For some pressures , no choked flow is possible ; for choked flow to occur the direction if flow must be reversed . If ` disp ` is True , an exception will be raised for these conditions . . . math : : P _ 1 = \ frac { F _ { F } F _ { L } ^ { 2 } P _ { sat } - P _ { 2 } } { F _ { L } ^ { 2 } - 1} . . math : : P _ 2 = F _ { F } F _ { L } ^ { 2 } P _ { sat } - F _ { L } ^ { 2 } P _ { 1 } + P _ { 1} Parameters Psat : float Saturation pressure of the liquid at inlet temperature [ Pa ] Pc : float Critical pressure of the liquid [ Pa ] FL : float , optional Liquid pressure recovery factor of a control valve without attached fittings [ - ] P1 : float , optional Absolute pressure upstream of the valve [ Pa ] P2 : float , optional Absolute pressure downstream of the valve [ Pa ] disp : bool , optional Whether or not to raise an exception on flow reversal , [ - ] Returns P _ choke : float Pressure at which a choke occurs in the liquid valve [ Pa ] Notes Extremely cheap to compute . Examples > > > control _ valve _ choke _ P _ l ( 69682.89291024722 , 22048320.0 , 0.6 , 680000.0) 458887.5306077305 > > > control _ valve _ choke _ P _ l ( 69682.89291024722 , 22048320.0 , 0.6 , P2 = 458887.5306077305) 680000.0'''
FF = FF_critical_pressure_ratio_l ( Psat = Psat , Pc = Pc ) Pmin_absolute = FF * Psat if P2 is None : ans = P2 = FF * FL * FL * Psat - FL * FL * P1 + P1 elif P1 is None : ans = P1 = ( FF * FL * FL * Psat - P2 ) / ( FL * FL - 1.0 ) else : raise Exception ( 'Either P1 or P2 needs to be specified' ) if P2 > P1 and disp : raise Exception ( 'Specified P1 is too low for choking to occur ' 'at any downstream pressure; minimum ' 'upstream pressure for choking to be possible ' 'is %g Pa.' % Pmin_absolute ) return ans
def quality ( self , key ) : """Returns the quality of the key . . . versionadded : : 0.6 In previous versions you had to use the item - lookup syntax ( eg : ` ` obj [ key ] ` ` instead of ` ` obj . quality ( key ) ` ` )"""
for item , quality in self : if self . _value_matches ( key , item ) : return quality return 0
def compile ( schema , pointer , context , scope = None ) : """Compiles schema with ` JSON Schema ` _ draft - 04. : param schema : obj to compile : type schema : Mapping : param pointer : uri of the schema : type pointer : Pointer , str : param context : context of this schema : type context : Context . . _ ` JSON Schema ` : http : / / json - schema . org"""
schm = deepcopy ( schema ) scope = urljoin ( scope or str ( pointer ) , schm . pop ( 'id' , None ) ) if '$ref' in schema : return ReferenceValidator ( urljoin ( scope , schema [ '$ref' ] ) , context ) attrs = { } if 'additionalItems' in schm : subpointer = pointer_join ( pointer , 'additionalItems' ) attrs [ 'additional_items' ] = schm . pop ( 'additionalItems' ) if isinstance ( attrs [ 'additional_items' ] , dict ) : compiled = compile ( attrs [ 'additional_items' ] , subpointer , context , scope ) attrs [ 'additional_items' ] = compiled elif not isinstance ( attrs [ 'additional_items' ] , bool ) : raise CompilationError ( 'wrong type for {}' . format ( 'additional_items' ) , schema ) # noqa if 'additionalProperties' in schm : subpointer = pointer_join ( pointer , 'additionalProperties' ) attrs [ 'additional_properties' ] = schm . pop ( 'additionalProperties' ) if isinstance ( attrs [ 'additional_properties' ] , dict ) : compiled = compile ( attrs [ 'additional_properties' ] , subpointer , context , scope ) attrs [ 'additional_properties' ] = compiled elif not isinstance ( attrs [ 'additional_properties' ] , bool ) : raise CompilationError ( 'wrong type for {}' . format ( 'additional_properties' ) , schema ) # noqa if 'allOf' in schm : subpointer = pointer_join ( pointer , 'allOf' ) attrs [ 'all_of' ] = schm . pop ( 'allOf' ) if isinstance ( attrs [ 'all_of' ] , ( list , tuple ) ) : attrs [ 'all_of' ] = [ compile ( element , subpointer , context , scope ) for element in attrs [ 'all_of' ] ] # noqa else : # should be a boolean raise CompilationError ( 'wrong type for {}' . format ( 'allOf' ) , schema ) # noqa if 'anyOf' in schm : subpointer = pointer_join ( pointer , 'anyOf' ) attrs [ 'any_of' ] = schm . pop ( 'anyOf' ) if isinstance ( attrs [ 'any_of' ] , ( list , tuple ) ) : attrs [ 'any_of' ] = [ compile ( element , subpointer , context , scope ) for element in attrs [ 'any_of' ] ] # noqa else : # should be a boolean raise CompilationError ( 'wrong type for {}' . format ( 'anyOf' ) , schema ) # noqa if 'default' in schm : attrs [ 'default' ] = schm . pop ( 'default' ) if 'dependencies' in schm : attrs [ 'dependencies' ] = schm . pop ( 'dependencies' ) if not isinstance ( attrs [ 'dependencies' ] , dict ) : raise CompilationError ( 'dependencies must be an object' , schema ) for key , value in attrs [ 'dependencies' ] . items ( ) : if isinstance ( value , dict ) : subpointer = pointer_join ( pointer , 'dependencies' , key ) attrs [ 'dependencies' ] [ key ] = compile ( value , subpointer , context , scope ) elif not isinstance ( value , sequence_types ) : raise CompilationError ( 'dependencies must be an array or object' , schema ) # noqa if 'enum' in schm : attrs [ 'enum' ] = schm . pop ( 'enum' ) if not isinstance ( attrs [ 'enum' ] , sequence_types ) : raise CompilationError ( 'enum must be a sequence' , schema ) if 'exclusiveMaximum' in schm : attrs [ 'exclusive_maximum' ] = schm . pop ( 'exclusiveMaximum' ) if not isinstance ( attrs [ 'exclusive_maximum' ] , bool ) : raise CompilationError ( 'exclusiveMaximum must be a boolean' , schema ) # noqa if 'exclusiveMinimum' in schm : attrs [ 'exclusive_minimum' ] = schm . pop ( 'exclusiveMinimum' ) if not isinstance ( attrs [ 'exclusive_minimum' ] , bool ) : raise CompilationError ( 'exclusiveMinimum must be a boolean' , schema ) # noqa if 'format' in schm : attrs [ 'format' ] = schm . pop ( 'format' ) if not isinstance ( attrs [ 'format' ] , string_types ) : raise CompilationError ( 'format must be a string' , schema ) if 'items' in schm : subpointer = pointer_join ( pointer , 'items' ) attrs [ 'items' ] = schm . pop ( 'items' ) if isinstance ( attrs [ 'items' ] , ( list , tuple ) ) : # each value must be a json schema attrs [ 'items' ] = [ compile ( element , subpointer , context , scope ) for element in attrs [ 'items' ] ] # noqa elif isinstance ( attrs [ 'items' ] , dict ) : # value must be a json schema attrs [ 'items' ] = compile ( attrs [ 'items' ] , subpointer , context , scope ) # noqa else : # should be a boolean raise CompilationError ( 'wrong type for {}' . format ( 'items' ) , schema ) # noqa if 'maximum' in schm : attrs [ 'maximum' ] = schm . pop ( 'maximum' ) if not isinstance ( attrs [ 'maximum' ] , number_types ) : raise CompilationError ( 'maximum must be a number' , schema ) if 'maxItems' in schm : attrs [ 'max_items' ] = schm . pop ( 'maxItems' ) if not isinstance ( attrs [ 'max_items' ] , integer_types ) : raise CompilationError ( 'maxItems must be integer' , schema ) if 'maxLength' in schm : attrs [ 'max_length' ] = schm . pop ( 'maxLength' ) if not isinstance ( attrs [ 'max_length' ] , integer_types ) : raise CompilationError ( 'maxLength must be integer' , schema ) if 'maxProperties' in schm : attrs [ 'max_properties' ] = schm . pop ( 'maxProperties' ) if not isinstance ( attrs [ 'max_properties' ] , integer_types ) : raise CompilationError ( 'maxProperties must be integer' , schema ) if 'minimum' in schm : attrs [ 'minimum' ] = schm . pop ( 'minimum' ) if not isinstance ( attrs [ 'minimum' ] , number_types ) : raise CompilationError ( 'minimum must be a number' , schema ) if 'minItems' in schm : attrs [ 'min_items' ] = schm . pop ( 'minItems' ) if not isinstance ( attrs [ 'min_items' ] , integer_types ) : raise CompilationError ( 'minItems must be integer' , schema ) if 'minLength' in schm : attrs [ 'min_length' ] = schm . pop ( 'minLength' ) if not isinstance ( attrs [ 'min_length' ] , integer_types ) : raise CompilationError ( 'minLength must be integer' , schema ) if 'minProperties' in schm : attrs [ 'min_properties' ] = schm . pop ( 'minProperties' ) if not isinstance ( attrs [ 'min_properties' ] , integer_types ) : raise CompilationError ( 'minProperties must be integer' , schema ) if 'multipleOf' in schm : attrs [ 'multiple_of' ] = schm . pop ( 'multipleOf' ) if not isinstance ( attrs [ 'multiple_of' ] , number_types ) : raise CompilationError ( 'multipleOf must be a number' , schema ) if 'not' in schm : attrs [ 'not' ] = schm . pop ( 'not' ) if not isinstance ( attrs [ 'not' ] , dict ) : raise CompilationError ( 'not must be an object' , schema ) subpointer = pointer_join ( pointer , 'not' ) attrs [ 'not' ] = compile ( attrs [ 'not' ] , subpointer , context , scope ) if 'oneOf' in schm : subpointer = pointer_join ( pointer , 'oneOf' ) attrs [ 'one_of' ] = schm . pop ( 'oneOf' ) if isinstance ( attrs [ 'one_of' ] , ( list , tuple ) ) : # each value must be a json schema attrs [ 'one_of' ] = [ compile ( element , subpointer , context , scope ) for element in attrs [ 'one_of' ] ] # noqa else : # should be a boolean raise CompilationError ( 'wrong type for {}' . format ( 'oneOf' ) , schema ) if 'pattern' in schm : attrs [ 'pattern' ] = schm . pop ( 'pattern' ) if not isinstance ( attrs [ 'pattern' ] , string_types ) : raise CompilationError ( 'pattern must be a string' , schema ) if 'properties' in schm : attrs [ 'properties' ] = schm . pop ( 'properties' ) if not isinstance ( attrs [ 'properties' ] , dict ) : raise CompilationError ( 'properties must be an object' , schema ) for subname , subschema in attrs [ 'properties' ] . items ( ) : subpointer = pointer_join ( pointer , subname ) compiled = compile ( subschema , subpointer , context , scope ) attrs [ 'properties' ] [ subname ] = compiled if 'patternProperties' in schm : attrs [ 'pattern_properties' ] = schm . pop ( 'patternProperties' ) if not isinstance ( attrs [ 'pattern_properties' ] , dict ) : raise CompilationError ( 'patternProperties must be an object' , schema ) # noqa for subname , subschema in attrs [ 'pattern_properties' ] . items ( ) : subpointer = pointer_join ( pointer , 'patternProperties' , subname ) compiled = compile ( subschema , subpointer , context , scope ) attrs [ 'pattern_properties' ] [ subname ] = compiled if 'required' in schm : attrs [ 'required' ] = schm . pop ( 'required' ) if not isinstance ( attrs [ 'required' ] , list ) : raise CompilationError ( 'required must be a list' , schema ) if len ( attrs [ 'required' ] ) < 1 : raise CompilationError ( 'required cannot be empty' , schema ) if 'type' in schm : attrs [ 'type' ] = schm . pop ( 'type' ) if isinstance ( attrs [ 'type' ] , string_types ) : attrs [ 'type' ] = [ attrs [ 'type' ] ] elif not isinstance ( attrs [ 'type' ] , sequence_types ) : raise CompilationError ( 'type must be string or sequence' , schema ) if 'uniqueItems' in schm : attrs [ 'unique_items' ] = schm . pop ( 'uniqueItems' ) if not isinstance ( attrs [ 'unique_items' ] , bool ) : raise CompilationError ( 'type must be boolean' , schema ) return Draft04Validator ( attrs , str ( pointer ) , context . formats )
def reference_handler ( self , iobject , fact , attr_info , add_fact_kargs ) : """Handler for facts that contain a reference to a fact . As shown below in the handler list , this handler is called when a attribute with key ' @ idref ' on the fact ' s node is detected - - this attribute signifies that this fact does not contain a value but points to another object . Thus we either retrieve the object or , if an object with the given id does not yet exist , create a PLACEHOLDER object . We further create / refer to the fitting fact data type : we want the fact data type to express that the fact is a reference to an object ."""
logger . debug ( "XXX Found reference with %s" % attr_info ) if 'idref' in attr_info : ref_key = 'idref' ( namespace , namespace_uri , uid ) = self . split_qname ( attr_info [ ref_key ] ) elif fact [ 'attribute' ] and fact [ 'attribute' ] == 'phase_id' : if fact [ 'term' ] == '' : return True else : ( namespace , namespace_uri , uid ) = self . split_qname ( fact [ 'value' ] ) elif fact [ 'attribute' ] and fact [ 'attribute' ] == 'kill_chain_id' : if fact [ 'term' ] == '' : return True else : ( namespace , namespace_uri , uid ) = self . split_qname ( fact [ 'value' ] ) timestamp = None if '@timestamp' in attr_info : timestamp = attr_info [ '@timestamp' ] # if not timestamp : # timestamp = self . create _ timestamp ( target_mantis_obj , existed ) = MantisImporter . create_iobject ( uid = uid , identifier_ns_uri = namespace_uri , timestamp = timestamp , create_timestamp = self . default_timestamp ) logger . debug ( "Creation of Placeholder for %s %s returned %s" % ( namespace_uri , uid , existed ) ) add_fact_kargs [ 'value_iobject_id' ] = Identifier . objects . get ( uid = uid , namespace__uri = namespace_uri ) return True
def _get_attachment_data ( self , id , filename ) : """Retrieve the contents of a specific attachment ( identified by filename ) ."""
uri = '/' . join ( [ self . base_url , self . name , id , 'Attachments' , filename ] ) return uri , { } , 'get' , None , None , False
def coordination_geometry_symmetry_measures_sepplane_optim ( self , coordination_geometry , points_perfect = None , nb_set = None , optimization = None ) : """Returns the symmetry measures of a given coordination _ geometry for a set of permutations depending on the permutation setup . Depending on the parameters of the LocalGeometryFinder and on the coordination geometry , different methods are called . : param coordination _ geometry : Coordination geometry for which the symmetry measures are looked for : return : the symmetry measures of a given coordination _ geometry for a set of permutations : raise : NotImplementedError if the permutation _ setup does not exists"""
csms = [ ] permutations = [ ] algos = [ ] local2perfect_maps = [ ] perfect2local_maps = [ ] for algo in coordination_geometry . algorithms : if algo . algorithm_type == SEPARATION_PLANE : cgsm = self . coordination_geometry_symmetry_measures_separation_plane_optim ( coordination_geometry , algo , points_perfect = points_perfect , nb_set = nb_set , optimization = optimization ) csm , perm , algo , local2perfect_map , perfect2local_map = cgsm csms . extend ( csm ) permutations . extend ( perm ) algos . extend ( algo ) local2perfect_maps . extend ( local2perfect_map ) perfect2local_maps . extend ( perfect2local_map ) return csms , permutations , algos , local2perfect_maps , perfect2local_maps
def make_empty_table ( row_count , column_count ) : """Make an empty table Parameters row _ count : int The number of rows in the new table column _ count : int The number of columns in the new table Returns table : list of lists of str Each cell will be an empty str ( ' ' )"""
table = [ ] while row_count > 0 : row = [ ] for column in range ( column_count ) : row . append ( '' ) table . append ( row ) row_count -= 1 return table
def output ( self ) : """Return a 20 - byte hash corresponding to this script ( or None if not applicable ) ."""
hash160 = self . _script_info . get ( "hash160" , None ) if hash160 : yield ( "hash160" , b2h ( hash160 ) , None ) address = self . address ( ) yield ( "address" , address , "%s address" % self . _network . network_name ) yield ( "%s_address" % self . _network . symbol , address , "legacy" )
def state_args ( id_ , state , high ) : '''Return a set of the arguments passed to the named state'''
args = set ( ) if id_ not in high : return args if state not in high [ id_ ] : return args for item in high [ id_ ] [ state ] : if not isinstance ( item , dict ) : continue if len ( item ) != 1 : continue args . add ( next ( iter ( item ) ) ) return args
def _resolve_index ( self , mixed ) : """Find the index based on various strategies for a node , probably an input or output of chain . Supported inputs are indexes , node values or names ."""
if mixed is None : return None if type ( mixed ) is int or mixed in self . edges : return mixed if isinstance ( mixed , str ) and mixed in self . named : return self . named [ mixed ] if mixed in self . nodes : return self . nodes . index ( mixed ) raise ValueError ( "Cannot find node matching {!r}." . format ( mixed ) )
def finetune_classification_cnn ( config ) : """Main function ."""
# read params dataset = config [ 'dataset' ] x_names = config [ 'x_names' ] y_name = config [ 'y_name' ] model_dir = config [ 'model_dir' ] debug = config [ 'debug' ] num_classes = None if 'num_classes' in config . keys ( ) : num_classes = config [ 'num_classes' ] batch_size = config [ 'training' ] [ 'batch_size' ] train_pct = config [ 'training' ] [ 'train_pct' ] model_save_period = config [ 'training' ] [ 'model_save_period' ] data_aug_config = config [ 'data_augmentation' ] preproc_config = config [ 'preprocessing' ] iterator_config = config [ 'data_iteration' ] model_config = config [ 'model' ] base_model_config = model_config [ 'base' ] optimization_config = config [ 'optimization' ] train_config = config [ 'training' ] generator_image_shape = None if 'image_shape' in data_aug_config . keys ( ) : generator_image_shape = data_aug_config [ 'image_shape' ] optimizer_name = optimization_config [ 'optimizer' ] model_params = { } if 'params' in model_config . keys ( ) : model_params = model_config [ 'params' ] base_model_params = { } if 'params' in base_model_config . keys ( ) : base_model_params = base_model_config [ 'params' ] if debug : seed = 108 random . seed ( seed ) np . random . seed ( seed ) # generate model dir if not os . path . exists ( model_dir ) : os . mkdir ( model_dir ) model_id = utils . gen_experiment_id ( ) model_dir = os . path . join ( model_dir , 'model_%s' % ( model_id ) ) if not os . path . exists ( model_dir ) : os . mkdir ( model_dir ) logging . info ( 'Saving model to %s' % ( model_dir ) ) latest_model_filename = os . path . join ( model_dir , 'weights_{epoch:05d}.h5' ) best_model_filename = os . path . join ( model_dir , 'weights.h5' ) # save config training_config_filename = os . path . join ( model_dir , 'training_config.yaml' ) config . save ( training_config_filename ) # open dataset dataset = TensorDataset . open ( dataset ) # split dataset indices_filename = os . path . join ( model_dir , 'splits.npz' ) if os . path . exists ( indices_filename ) : indices = np . load ( indices_filename ) [ 'arr_0' ] . tolist ( ) train_indices = indices [ 'train' ] val_indices = indices [ 'val' ] else : train_indices , val_indices = dataset . split ( train_pct ) indices = np . array ( { 'train' : train_indices , 'val' : val_indices } ) np . savez_compressed ( indices_filename , indices ) num_train = train_indices . shape [ 0 ] num_val = val_indices . shape [ 0 ] val_steps = int ( np . ceil ( float ( num_val ) / batch_size ) ) # init generator train_generator_filename = os . path . join ( model_dir , 'train_preprocessor.pkl' ) val_generator_filename = os . path . join ( model_dir , 'val_preprocessor.pkl' ) if os . path . exists ( train_generator_filename ) : logging . info ( 'Loading generators' ) train_generator = pkl . load ( open ( train_generator_filename , 'rb' ) ) val_generator = pkl . load ( open ( val_generator_filename , 'rb' ) ) else : logging . info ( 'Fitting generator' ) train_generator = TensorDataGenerator ( num_classes = num_classes , ** data_aug_config ) val_generator = TensorDataGenerator ( featurewise_center = data_aug_config [ 'featurewise_center' ] , featurewise_std_normalization = data_aug_config [ 'featurewise_std_normalization' ] , image_shape = generator_image_shape , num_classes = num_classes ) fit_start = time . time ( ) train_generator . fit ( dataset , x_names , y_name , indices = train_indices , ** preproc_config ) val_generator . mean = train_generator . mean val_generator . std = train_generator . std val_generator . min_output = train_generator . min_output val_generator . max_output = train_generator . max_output val_generator . num_classes = train_generator . num_classes fit_stop = time . time ( ) logging . info ( 'Generator fit took %.3f sec' % ( fit_stop - fit_start ) ) pkl . dump ( train_generator , open ( train_generator_filename , 'wb' ) ) pkl . dump ( val_generator , open ( val_generator_filename , 'wb' ) ) if num_classes is None : num_classes = int ( train_generator . num_classes ) # init iterator train_iterator = train_generator . flow_from_dataset ( dataset , x_names , y_name , indices = train_indices , batch_size = batch_size , ** iterator_config ) val_iterator = val_generator . flow_from_dataset ( dataset , x_names , y_name , indices = val_indices , batch_size = batch_size , ** iterator_config ) # setup model base_cnn = ClassificationCNN . open ( base_model_config [ 'model' ] , base_model_config [ 'type' ] , input_name = x_names [ 0 ] , ** base_model_params ) cnn = FinetunedClassificationCNN ( base_cnn = base_cnn , name = 'dexresnet' , num_classes = num_classes , output_name = y_name , im_preprocessor = val_generator , ** model_params ) # setup training cnn . freeze_base_cnn ( ) if optimizer_name == 'sgd' : optimizer = SGD ( lr = optimization_config [ 'lr' ] , momentum = optimization_config [ 'momentum' ] ) elif optimizer_name == 'adam' : optimizer = Adam ( lr = optimization_config [ 'lr' ] ) else : raise ValueError ( 'Optimizer %s not supported!' % ( optimizer_name ) ) model = cnn . model model . compile ( optimizer = optimizer , loss = optimization_config [ 'loss' ] , metrics = optimization_config [ 'metrics' ] ) # train steps_per_epoch = int ( np . ceil ( float ( num_train ) / batch_size ) ) latest_model_ckpt = ModelCheckpoint ( latest_model_filename , period = model_save_period ) best_model_ckpt = ModelCheckpoint ( best_model_filename , save_best_only = True , period = model_save_period ) train_history_cb = TrainHistory ( model_dir ) callbacks = [ latest_model_ckpt , best_model_ckpt , train_history_cb ] history = model . fit_generator ( train_iterator , steps_per_epoch = steps_per_epoch , epochs = train_config [ 'epochs' ] , callbacks = callbacks , validation_data = val_iterator , validation_steps = val_steps , class_weight = train_config [ 'class_weight' ] , use_multiprocessing = train_config [ 'use_multiprocessing' ] ) # save model cnn . save ( model_dir ) # save history history_filename = os . path . join ( model_dir , 'history.pkl' ) pkl . dump ( history . history , open ( history_filename , 'wb' ) )
def omega ( self , structure , n , u ) : """Finds directional frequency contribution to the heat capacity from direction and polarization Args : structure ( Structure ) : Structure to be used in directional heat capacity determination n ( 3x1 array - like ) : direction for Cv determination u ( 3x1 array - like ) : polarization direction , note that no attempt for verification of eigenvectors is made"""
l0 = np . dot ( np . sum ( structure . lattice . matrix , axis = 0 ) , n ) l0 *= 1e-10 # in A weight = float ( structure . composition . weight ) * 1.66054e-27 # in kg vol = structure . volume * 1e-30 # in m ^ 3 vel = ( 1e9 * self [ 0 ] . einsum_sequence ( [ n , u , n , u ] ) / ( weight / vol ) ) ** 0.5 return vel / l0
def dump_part ( part , total_segments = None ) : """' part ' may be the hash _ key if we are dumping just a few hash _ keys - else it will be the segment number"""
try : connection = Connection ( host = config [ 'host' ] , region = config [ 'region' ] ) filename = "." . join ( [ config [ 'table_name' ] , str ( part ) , "dump" ] ) if config [ 'compress' ] : opener = gzip . GzipFile filename += ".gz" else : opener = open dumper = BatchDumper ( connection , config [ 'table_name' ] , config [ 'capacity' ] , part , total_segments ) with opener ( filename , 'w' ) as output : while dumper . has_items : items = dumper . get_items ( ) for item in items : output . write ( json . dumps ( item ) ) output . write ( "\n" ) output . flush ( ) config [ 'queue' ] . put ( len ( items ) ) config [ 'queue' ] . put ( 'complete' ) except Exception as e : print ( 'Unhandled exception: {0}' . format ( e ) )
def show_human ( ) : """Curses terminal with standard outputs"""
form = 'RAW' units = 'raw' data_window = curses . newwin ( 19 , 39 , 0 , 0 ) sat_window = curses . newwin ( 14 , 39 , 0 , 40 ) device_window = curses . newwin ( 6 , 39 , 13 , 40 ) packet_window = curses . newwin ( 7 , 79 , 19 , 0 ) for new_data in gpsd_socket : if new_data : data_stream . unpack ( new_data ) screen . nodelay ( 1 ) key_press = screen . getch ( ) if key_press == ord ( 'q' ) : # quit shut_down ( ) elif key_press == ord ( 'a' ) : # NMEA gpsd_socket . watch ( enable = False , gpsd_protocol = 'json' ) gpsd_socket . watch ( gpsd_protocol = 'nmea' ) show_nmea ( ) elif key_press == ord ( '0' ) : # raw form = 'RAW' units = 'raw' data_window . clear ( ) elif key_press == ord ( '1' ) : # DDD form = 'DDD' data_window . clear ( ) elif key_press == ord ( '2' ) : # DMM form = 'DMM' data_window . clear ( ) elif key_press == ord ( '3' ) : # DMS form = 'DMS' data_window . clear ( ) elif key_press == ord ( 'm' ) : # Metric units = 'metric' data_window . clear ( ) elif key_press == ord ( 'i' ) : # Imperial units = 'imperial' data_window . clear ( ) elif key_press == ord ( 'n' ) : # Nautical units = 'nautical' data_window . clear ( ) elif key_press == ord ( 'd' ) : # Refresh device listings gpsd_socket . send ( '?DEVICES;' ) device_window . clear ( ) data_window . box ( ) data_window . addstr ( 0 , 2 , 'GPS3 Python {}.{}.{} GPSD Interface' . format ( * sys . version_info ) , curses . A_BOLD ) data_window . addstr ( 1 , 2 , 'Time: {time} ' . format ( ** data_stream . TPV ) ) data_window . addstr ( 2 , 2 , 'Latitude: {} ' . format ( sexagesimal ( data_stream . TPV [ 'lat' ] , 'lat' , form ) ) ) data_window . addstr ( 3 , 2 , 'Longitude: {} ' . format ( sexagesimal ( data_stream . TPV [ 'lon' ] , 'lon' , form ) ) ) data_window . addstr ( 4 , 2 , 'Altitude: {} {}' . format ( * unit_conversion ( data_stream . TPV [ 'alt' ] , units , length = True ) ) ) data_window . addstr ( 5 , 2 , 'Speed: {} {}' . format ( * unit_conversion ( data_stream . TPV [ 'speed' ] , units ) ) ) data_window . addstr ( 6 , 2 , 'Heading: {track}° True' . format ( ** data_stream . TPV ) ) data_window . addstr ( 7 , 2 , 'Climb: {} {}/s' . format ( * unit_conversion ( data_stream . TPV [ 'climb' ] , units , length = True ) ) ) data_window . addstr ( 8 , 2 , 'Status: {mode:<}D ' . format ( ** data_stream . TPV ) ) data_window . addstr ( 9 , 2 , 'Latitude Err: +/-{} {} ' . format ( * unit_conversion ( data_stream . TPV [ 'epx' ] , units , length = True ) ) ) data_window . addstr ( 10 , 2 , 'Longitude Err: +/-{} {}' . format ( * unit_conversion ( data_stream . TPV [ 'epy' ] , units , length = True ) ) ) data_window . addstr ( 11 , 2 , 'Altitude Err: +/-{} {} ' . format ( * unit_conversion ( data_stream . TPV [ 'epv' ] , units , length = True ) ) ) data_window . addstr ( 12 , 2 , 'Course Err: +/-{epc} ' . format ( ** data_stream . TPV ) , curses . A_DIM ) data_window . addstr ( 13 , 2 , 'Speed Err: +/-{} {} ' . format ( * unit_conversion ( data_stream . TPV [ 'eps' ] , units ) ) , curses . A_DIM ) data_window . addstr ( 14 , 2 , 'Time Offset: +/-{ept} ' . format ( ** data_stream . TPV ) , curses . A_DIM ) data_window . addstr ( 15 , 2 , 'gdop:{gdop} pdop:{pdop} tdop:{tdop}' . format ( ** data_stream . SKY ) ) data_window . addstr ( 16 , 2 , 'ydop:{ydop} xdop:{xdop} ' . format ( ** data_stream . SKY ) ) data_window . addstr ( 17 , 2 , 'vdop:{vdop} hdop:{hdop} ' . format ( ** data_stream . SKY ) ) sat_window . clear ( ) sat_window . box ( ) sat_window . addstr ( 0 , 2 , 'Using {0[1]}/{0[0]} satellites (truncated)' . format ( satellites_used ( data_stream . SKY [ 'satellites' ] ) ) ) sat_window . addstr ( 1 , 2 , 'PRN Elev Azimuth SNR Used' ) line = 2 if isinstance ( data_stream . SKY [ 'satellites' ] , list ) : # Nested lists of dictionaries are strings before data is present for sats in data_stream . SKY [ 'satellites' ] [ 0 : 10 ] : sat_window . addstr ( line , 2 , '{PRN:>2} {el:>6} {az:>5} {ss:>5} {used:}' . format ( ** sats ) ) line += 1 # device _ window . clear ( ) device_window . box ( ) if not isinstance ( data_stream . DEVICES [ 'devices' ] , list ) : # Local machines need a ' device ' kick start gpsd_socket . send ( '?DEVICES;' ) # to have valid data I don ' t know why . if isinstance ( data_stream . DEVICES [ 'devices' ] , list ) : # Nested lists of dictionaries are strings before data is present . for gizmo in data_stream . DEVICES [ 'devices' ] : start_time , _uicroseconds = gizmo [ 'activated' ] . split ( '.' ) # Remove ' . 000Z ' elapsed = elapsed_time_from ( start_time ) device_window . addstr ( 1 , 2 , 'Activated: {}' . format ( gizmo [ 'activated' ] ) ) device_window . addstr ( 2 , 2 , 'Host:{0.host}:{0.port} {1}' . format ( args , gizmo [ 'path' ] ) ) device_window . addstr ( 3 , 2 , 'Driver:{driver} BPS:{bps}' . format ( ** gizmo ) ) device_window . addstr ( 4 , 2 , 'Cycle:{0} Hz {1!s:>14} Elapsed' . format ( 1 / gizmo [ 'cycle' ] , elapsed ) ) packet_window . clear ( ) # packet _ window . border ( 0) packet_window . scrollok ( True ) packet_window . addstr ( 0 , 0 , '{}' . format ( new_data ) ) # sleep ( . 9) data_window . refresh ( ) sat_window . refresh ( ) device_window . refresh ( ) packet_window . refresh ( ) else : # Reduced CPU cycles with the non - blocking socket read , by putting ' sleep ' here , rather than hitting sleep ( .1 )
def viewinfo ( self , postinfo ) : '''In infor .'''
self . redirect_kind ( postinfo ) if DB_CFG [ 'kind' ] == 's' : cat_enum1 = [ ] else : ext_catid = postinfo . extinfo [ 'def_cat_uid' ] if 'def_cat_uid' in postinfo . extinfo else '' ext_catid2 = postinfo . extinfo [ 'def_cat_uid' ] if 'def_cat_uid' in postinfo . extinfo else None cat_enum1 = MCategory . get_qian2 ( ext_catid2 [ : 2 ] ) if ext_catid else [ ] catinfo = None p_catinfo = None post2catinfo = MPost2Catalog . get_first_category ( postinfo . uid ) catalog_infors = None if post2catinfo : catinfo = MCategory . get_by_uid ( post2catinfo . tag_id ) if catinfo : p_catinfo = MCategory . get_by_uid ( catinfo . pid ) catalog_infors = MPost2Catalog . query_pager_by_slug ( catinfo . slug , current_page_num = 1 , order = True ) kwd = self . _the_view_kwd ( postinfo ) MPost . update_misc ( postinfo . uid , count = True ) if self . get_current_user ( ) : MUsage . add_or_update ( self . userinfo . uid , postinfo . uid , postinfo . kind ) tmpl = 'post_{0}/leaf_view.html' . format ( self . kind ) logger . info ( 'The Info Template: {0}' . format ( tmpl ) ) self . render ( tmpl , kwd = dict ( kwd , ** self . ext_view_kwd ( postinfo ) ) , postinfo = postinfo , userinfo = self . userinfo , catinfo = catinfo , pcatinfo = p_catinfo , ad_switch = random . randint ( 1 , 18 ) , tag_info = MPost2Label . get_by_uid ( postinfo . uid ) , catalog_infos = catalog_infors , cat_enum = cat_enum1 )
def _parse_trigger ( self , trigger_clause ) : """Parse a named event or explicit stream trigger into a TriggerDefinition ."""
cond = trigger_clause [ 0 ] named_event = None explicit_stream = None explicit_trigger = None # Identifier parse tree is Group ( Identifier ) if cond . getName ( ) == 'identifier' : named_event = cond [ 0 ] elif cond . getName ( ) == 'stream_trigger' : trigger_type = cond [ 0 ] stream = cond [ 1 ] oper = cond [ 2 ] ref = cond [ 3 ] trigger = InputTrigger ( trigger_type , oper , ref ) explicit_stream = stream explicit_trigger = trigger elif cond . getName ( ) == 'stream_always' : stream = cond [ 0 ] trigger = TrueTrigger ( ) explicit_stream = stream explicit_trigger = trigger else : raise ArgumentError ( "OnBlock created from an invalid ParseResults object" , parse_results = trigger_clause ) return TriggerDefinition ( named_event , explicit_stream , explicit_trigger )
def mouseMoveEvent ( self , event ) : """Handle the mouse move event for a drag operation ."""
self . declaration . mouse_move_event ( event ) super ( QtGraphicsView , self ) . mouseMoveEvent ( event )
def inverse ( self ) : """return index array that maps unique values back to original space . unique [ inverse ] = = keys"""
inv = np . empty ( self . size , np . int ) inv [ self . sorter ] = self . sorted_group_rank_per_key return inv
def gateway_by_type ( self , type = None , on_network = None ) : # @ ReservedAssignment """Return gateways for the specified node . You can also specify type to find only gateways of a specific type . Valid types are : bgp _ peering , netlink , ospfv2 _ area . : param RoutingNode self : the routing node to check : param str type : bgp _ peering , netlink , ospfv2 _ area : param str on _ network : if network is specified , should be CIDR and specifies a filter to only return gateways on that network when an interface has multiple : return : tuple of RoutingNode ( interface , network , gateway ) : rtype : list"""
gateways = route_level ( self , 'gateway' ) if not type : for gw in gateways : yield gw else : for node in gateways : # TODO : Change to type = = node . related _ element _ type when # only supporting SMC > = 6.4 if type == node . routing_node_element . typeof : # If the parent is level interface , this is a tunnel interface # where the gateway is bound to interface versus network parent = node . _parent if parent . level == 'interface' : interface = parent network = None else : network = parent interface = network . _parent if on_network is not None : if network and network . ip == on_network : yield ( interface , network , node ) else : yield ( interface , network , node )
def _populate_profile_from_attributes ( self , profile ) : """Populate the given profile object from AUTH _ LDAP _ PROFILE _ ATTR _ MAP . Returns True if the profile was modified ."""
save_profile = False for field , attr in self . settings . PROFILE_ATTR_MAP . items ( ) : try : # user _ attrs is a hash of lists of attribute values setattr ( profile , field , self . attrs [ attr ] [ 0 ] ) save_profile = True except Exception : logger . warning ( "%s does not have a value for the attribute %s" , self . dn , attr ) return save_profile
def add ( self , key , value ) : """Add an entry to a list preference Add ` value ` to the list of entries for the ` key ` preference ."""
if not key in self . prefs : self . prefs [ key ] = [ ] self . prefs [ key ] . append ( value )
def upload_file ( self , session , output , serverdir ) : """Upload a file to koji : return : str , pathname on server"""
name = output . metadata [ 'filename' ] self . log . debug ( "uploading %r to %r as %r" , output . file . name , serverdir , name ) kwargs = { } if self . blocksize is not None : kwargs [ 'blocksize' ] = self . blocksize self . log . debug ( "using blocksize %d" , self . blocksize ) upload_logger = KojiUploadLogger ( self . log ) session . uploadWrapper ( output . file . name , serverdir , name = name , callback = upload_logger . callback , ** kwargs ) path = os . path . join ( serverdir , name ) self . log . debug ( "uploaded %r" , path ) return path
def encrypt_and_hash ( self , plaintext : bytes ) -> bytes : """Sets ciphertext = EncryptWithAd ( h , plaintext ) , calls MixHash ( ciphertext ) , and returns ciphertext . Note that if k is empty , the EncryptWithAd ( ) call will set ciphertext equal to plaintext . : param plaintext : bytes sequence : return : ciphertext bytes sequence"""
ciphertext = self . cipher_state . encrypt_with_ad ( self . h , plaintext ) self . mix_hash ( ciphertext ) return ciphertext
def generate_statistics_subparser ( subparsers ) : """Adds a sub - command parser to ` subparsers ` to generate statistics from a set of results ."""
parser = subparsers . add_parser ( 'stats' , description = constants . STATISTICS_DESCRIPTION , formatter_class = ParagraphFormatter , help = constants . STATISTICS_HELP ) parser . set_defaults ( func = generate_statistics ) utils . add_common_arguments ( parser ) utils . add_corpus_arguments ( parser ) parser . add_argument ( 'results' , help = constants . STATISTICS_RESULTS_HELP , metavar = 'RESULTS' )
def get_interpolated_value ( self , energy , integrated = False ) : """Returns the COHP for a particular energy . Args : energy : Energy to return the COHP value for ."""
inter = { } for spin in self . cohp : if not integrated : inter [ spin ] = get_linear_interpolated_value ( self . energies , self . cohp [ spin ] , energy ) elif self . icohp is not None : inter [ spin ] = get_linear_interpolated_value ( self . energies , self . icohp [ spin ] , energy ) else : raise ValueError ( "ICOHP is empty." ) return inter
def get_middleware ( exclude = ( ) , append = ( ) , current = { 'middleware' : MIDDLEWARE_CLASSES } ) : """Returns MIDDLEWARE _ CLASSES without the middlewares listed in exclude and with the middlewares listed in append . The use of a mutable dict is intentional , in order to preserve the state of the MIDDLEWARE _ CLASSES tuple across multiple settings files ."""
current [ 'middleware' ] = tuple ( [ m for m in current [ 'middleware' ] if m not in exclude ] ) + tuple ( append ) return current [ 'middleware' ]
def which_api_version ( self , api_call ) : """Return QualysGuard API version for api _ call specified ."""
# Leverage patterns of calls to API methods . if api_call . endswith ( '.php' ) : # API v1. return 1 elif api_call . startswith ( 'api/2.0/' ) : # API v2. return 2 elif '/am/' in api_call : # Asset Management API . return 'am' elif '/was/' in api_call : # WAS API . return 'was' return False
def is_transaction_signer_authorized ( self , transactions , state_root , from_state ) : """Check the transaction signing key against the allowed transactor permissions . The roles being checked are the following , from first to last : " transactor . transaction _ signer . < TP _ Name > " " transactor . transaction _ signer " " transactor " " default " The first role that is set will be the one used to enforce if the transaction signer is allowed . Args : transactions ( List of Transactions ) : The transactions that are being verified . state _ root ( string ) : The state root of the previous block . If this is None , the current state root hash will be retrieved . from _ state ( bool ) : Whether the identity value should be read directly from state , instead of using the cached values . This should be used when the state _ root passed is not from the current chain head ."""
role = None if role is None : role = self . _cache . get_role ( "transactor.transaction_signer" , state_root , from_state ) if role is None : role = self . _cache . get_role ( "transactor" , state_root , from_state ) if role is None : policy_name = "default" else : policy_name = role . policy_name policy = self . _cache . get_policy ( policy_name , state_root , from_state ) family_roles = { } for transaction in transactions : header = TransactionHeader ( ) header . ParseFromString ( transaction . header ) family_policy = None if header . family_name not in family_roles : role = self . _cache . get_role ( "transactor.transaction_signer." + header . family_name , state_root , from_state ) if role is not None : family_policy = self . _cache . get_policy ( role . policy_name , state_root , from_state ) family_roles [ header . family_name ] = family_policy else : family_policy = family_roles [ header . family_name ] if family_policy is not None : if not self . _allowed ( header . signer_public_key , family_policy ) : LOGGER . debug ( "Transaction Signer: %s is not permitted." , header . signer_public_key ) return False else : if policy is not None : if not self . _allowed ( header . signer_public_key , policy ) : LOGGER . debug ( "Transaction Signer: %s is not permitted." , header . signer_public_key ) return False return True
def to_datetime ( t ) : """Convert 6 - part time tuple into datetime object ."""
if t is None : return None # extract values year , mon , day , h , m , s = t # assume the values are valid try : return datetime ( year , mon , day , h , m , s ) except ValueError : pass # sanitize invalid values mday = ( 0 , 31 , 29 , 31 , 30 , 31 , 30 , 31 , 31 , 30 , 31 , 30 , 31 ) if mon < 1 : mon = 1 if mon > 12 : mon = 12 if day < 1 : day = 1 if day > mday [ mon ] : day = mday [ mon ] if h > 23 : h = 23 if m > 59 : m = 59 if s > 59 : s = 59 if mon == 2 and day == 29 : try : return datetime ( year , mon , day , h , m , s ) except ValueError : day = 28 return datetime ( year , mon , day , h , m , s )
def save ( self , * args , ** kwargs ) : """Kicks off celery task to re - save associated special coverages to percolator : param args : inline arguments ( optional ) : param kwargs : keyword arguments : return : ` bulbs . campaigns . Campaign `"""
campaign = super ( Campaign , self ) . save ( * args , ** kwargs ) save_campaign_special_coverage_percolator . delay ( self . tunic_campaign_id ) return campaign
def tenant_get ( tenant_id = None , name = None , profile = None , ** connection_args ) : '''Return a specific tenants ( keystone tenant - get ) CLI Examples : . . code - block : : bash salt ' * ' keystone . tenant _ get c965f79c4f864eaaa9c3b41904e67082 salt ' * ' keystone . tenant _ get tenant _ id = c965f79c4f864eaaa9c3b41904e67082 salt ' * ' keystone . tenant _ get name = nova'''
kstone = auth ( profile , ** connection_args ) ret = { } if name : for tenant in getattr ( kstone , _TENANTS , None ) . list ( ) : if tenant . name == name : tenant_id = tenant . id break if not tenant_id : return { 'Error' : 'Unable to resolve tenant id' } tenant = getattr ( kstone , _TENANTS , None ) . get ( tenant_id ) ret [ tenant . name ] = dict ( ( value , getattr ( tenant , value ) ) for value in dir ( tenant ) if not value . startswith ( '_' ) and isinstance ( getattr ( tenant , value ) , ( six . string_types , dict , bool ) ) ) return ret
def dense_to_deeper_block ( dense_layer , weighted = True ) : '''deeper dense layer .'''
units = dense_layer . units weight = np . eye ( units ) bias = np . zeros ( units ) new_dense_layer = StubDense ( units , units ) if weighted : new_dense_layer . set_weights ( ( add_noise ( weight , np . array ( [ 0 , 1 ] ) ) , add_noise ( bias , np . array ( [ 0 , 1 ] ) ) ) ) return [ StubReLU ( ) , new_dense_layer ]
def kindpath ( self , kind ) : """Returns a path to the resources for a given input kind . : param ` kind ` : The kind of input : - " ad " : Active Directory - " monitor " : Files and directories - " registry " : Windows Registry - " script " : Scripts - " splunktcp " : TCP , processed - " tcp " : TCP , unprocessed - " udp " : UDP - " win - event - log - collections " : Windows event log - " win - perfmon " : Performance monitoring - " win - wmi - collections " : WMI : type kind : ` ` string ` ` : return : The relative endpoint path . : rtype : ` ` string ` `"""
if kind in self . kinds : return UrlEncoded ( kind , skip_encode = True ) # Special cases elif kind == 'tcp' : return UrlEncoded ( 'tcp/raw' , skip_encode = True ) elif kind == 'splunktcp' : return UrlEncoded ( 'tcp/cooked' , skip_encode = True ) else : raise ValueError ( "No such kind on server: %s" % kind )
def compile_with_symbol ( self , func , theano_args = None , owner = None ) : '''Compile the function with theano symbols'''
if theano_args is None : theano_args = [ ] # initialize the shared buffers upc = UpdateCollector ( ) # get the output symbols and other Theano options theano_ret = func ( * theano_args ) if owner is None else func ( owner , * theano_args ) # integrate the information of updates , givens and the other options out = copy . copy ( self . default_options ) out [ 'outputs' ] = theano_ret out [ 'updates' ] = upc . extract_updates ( ) # compile the function return theano . function ( theano_args , ** out )
def get_orthology_matrix ( self , pid_cutoff = None , bitscore_cutoff = None , evalue_cutoff = None , filter_condition = 'OR' , remove_strains_with_no_orthology = True , remove_strains_with_no_differences = False , remove_genes_not_in_base_model = True ) : """Create the orthology matrix by finding best bidirectional BLAST hits . Genes = rows , strains = columns Runs run _ makeblastdb , run _ bidirectional _ blast , and calculate _ bbh for protein sequences . Args : pid _ cutoff ( float ) : Minimum percent identity between BLAST hits to filter for in the range [ 0 , 100] bitscore _ cutoff ( float ) : Minimum bitscore allowed between BLAST hits evalue _ cutoff ( float ) : Maximum E - value allowed between BLAST hits filter _ condition ( str ) : ' OR ' or ' AND ' , how to combine cutoff filters . ' OR ' gives more results since it is less stringent , as you will be filtering for hits with ( > 80 % PID or > 30 bitscore or < 0.0001 evalue ) . remove _ strains _ with _ no _ orthology ( bool ) : Remove strains which have no orthologous genes found remove _ strains _ with _ no _ differences ( bool ) : Remove strains which have all the same genes as the base model . Default is False because since orthology is found using a PID cutoff , all genes may be present but differences may be on the sequence level . remove _ genes _ not _ in _ base _ model ( bool ) : Remove genes from the orthology matrix which are not present in our base model . This happens if we use a genome file for our model that has other genes in it . Returns : DataFrame : Orthology matrix calculated from best bidirectional BLAST hits ."""
# TODO : document and test other cutoffs # Get the path to the reference genome r_file = self . reference_gempro . genome_path bbh_files = { } log . info ( 'Running bidirectional BLAST and finding best bidirectional hits (BBH)...' ) for strain_gempro in tqdm ( self . strains ) : g_file = strain_gempro . genome_path # Run bidirectional BLAST log . debug ( '{} vs {}: Running bidirectional BLAST' . format ( self . reference_gempro . id , strain_gempro . id ) ) r_vs_g , g_vs_r = ssbio . protein . sequence . utils . blast . run_bidirectional_blast ( reference = r_file , other_genome = g_file , dbtype = 'prot' , outdir = self . sequences_by_organism_dir ) # Using the BLAST files , find the BBH log . debug ( '{} vs {}: Finding BBHs' . format ( self . reference_gempro . id , strain_gempro . id ) ) bbh = ssbio . protein . sequence . utils . blast . calculate_bbh ( blast_results_1 = r_vs_g , blast_results_2 = g_vs_r , outdir = self . sequences_by_organism_dir ) bbh_files [ strain_gempro . id ] = bbh # Make the orthologous genes matrix log . info ( 'Creating orthology matrix from BBHs...' ) ortho_matrix = ssbio . protein . sequence . utils . blast . create_orthology_matrix ( r_name = self . reference_gempro . id , genome_to_bbh_files = bbh_files , pid_cutoff = pid_cutoff , bitscore_cutoff = bitscore_cutoff , evalue_cutoff = evalue_cutoff , filter_condition = filter_condition , outname = '{}_{}_orthology.csv' . format ( self . reference_gempro . id , 'prot' ) , outdir = self . data_dir ) log . info ( 'Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.' . format ( ortho_matrix ) ) self . df_orthology_matrix = pd . read_csv ( ortho_matrix , index_col = 0 ) # Filter the matrix to genes only in our analysis , and also check for strains with no differences or no orthologous genes self . _filter_orthology_matrix ( remove_strains_with_no_orthology = remove_strains_with_no_orthology , remove_strains_with_no_differences = remove_strains_with_no_differences , remove_genes_not_in_base_model = remove_genes_not_in_base_model )
def format_units ( self , value , unit = "B" , optimal = 5 , auto = True , si = False ) : """Takes a value and formats it for user output , we can choose the unit to use eg B , MiB , kbits / second . This is mainly for use with bytes / bits it converts the value into a human readable form . It has various additional options but they are really only for special cases . The function returns a tuple containing the new value ( this is a number so that the user can still format it if required ) and a unit that is the units that we have been converted to . By supplying unit to the function we can force those units to be used eg ` ` unit = KiB ` ` would force the output to be in Kibibytes . By default we use non - si units but if the unit is si eg kB then we will switch to si units . Units can also be things like ` ` Mbit / sec ` ` . If the auto parameter is False then we use the unit provided . This only makes sense when the unit is singular eg ' Bytes ' and we want the result in bytes and not say converted to MBytes . optimal is used to control the size of the output value . We try to provide an output value of that number of characters ( including decimal point ) , it may also be less due to rounding . If a fixed unit is used the output may be more than this number of characters ."""
UNITS = "KMGTPEZY" DECIMAL_SIZE = 1000 BINARY_SIZE = 1024 CUTOFF = 1000 can_round = False if unit : # try to guess the unit . Do we have a known prefix too it ? if unit [ 0 ] . upper ( ) in UNITS : index = UNITS . index ( unit [ 0 ] . upper ( ) ) + 1 post = unit [ 1 : ] si = len ( unit ) > 1 and unit [ 1 ] != "i" if si : post = post [ 1 : ] if unit [ 1 ] == "b" : value *= 8 auto = False else : index = 0 post = unit if si : size = DECIMAL_SIZE else : size = BINARY_SIZE if auto : # we will try to use an appropriate prefix if value < CUTOFF : unit_out = post else : value /= size for prefix in UNITS : if abs ( value ) < CUTOFF : break value /= size if si : # si kilo is lowercase if prefix == "K" : prefix = "k" else : post = "i" + post unit_out = prefix + post can_round = True else : # we are using a fixed unit unit_out = unit size = pow ( size , index ) if size : value /= size can_round = True if can_round and optimal and value : # we will try to make the output value the desired size # we need to keep out value as a numeric type places = int ( log10 ( abs ( value ) ) ) if places >= optimal - 2 : value = int ( value ) else : value = round ( value , max ( optimal - places - 2 , 0 ) ) return value , unit_out
def _init_metadata ( self ) : """stub"""
self . _attempts_metadata = { 'element_id' : Id ( self . my_osid_object_form . _authority , self . my_osid_object_form . _namespace , 'attempts' ) , 'element_label' : 'Attempts' , 'instructions' : 'Max number of student attempts' , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_object_values' : [ 0 ] , 'syntax' : 'INTEGER' , 'object_set' : [ ] , 'minimum_integer' : None , 'maximum_integer' : None , 'integer_set' : [ ] } self . _weight_metadata = { 'element_id' : Id ( self . my_osid_object_form . _authority , self . my_osid_object_form . _namespace , 'weight' ) , 'element_label' : 'Weight' , 'instructions' : 'Weight of the item when calculating grades' , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_object_values' : [ 1.0 ] , 'syntax' : 'DECIMAL' , 'object_set' : [ ] , 'decimal_scale' : None , 'minimum_decimal' : None , 'maximum_decimal' : None , 'decimal_set' : [ ] } # self . _ rerandomize _ metadata = { # ' element _ id ' : Id ( self . my _ osid _ object _ form . _ authority , # self . my _ osid _ object _ form . _ namespace , # ' rerandomize ' ) , # ' element _ label ' : ' Randomize ' , # ' instructions ' : ' How to rerandomize the parameters ' , # ' required ' : False , # ' read _ only ' : False , # ' linked ' : False , # ' array ' : False , # ' default _ object _ values ' : [ ' never ' ] , # ' syntax ' : ' STRING ' , # ' minimum _ string _ length ' : None , # ' maximum _ string _ length ' : None , # ' string _ set ' : [ ] self . _showanswer_metadata = { 'element_id' : Id ( self . my_osid_object_form . _authority , self . my_osid_object_form . _namespace , 'showanswer' ) , 'element_label' : 'Show answer' , 'instructions' : 'When to show the answer to the student' , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_object_values' : [ 'closed' ] , 'syntax' : 'STRING' , 'minimum_string_length' : None , 'maximum_string_length' : None , 'string_set' : [ ] } self . _markdown_metadata = { 'element_id' : Id ( self . my_osid_object_form . _authority , self . my_osid_object_form . _namespace , 'markdown' ) , 'element_label' : 'Studio markdown' , 'instructions' : 'Studio markdown representation of the problem' , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_object_values' : [ '' ] , 'syntax' : 'STRING' , 'minimum_string_length' : None , 'maximum_string_length' : None , 'string_set' : [ ] }
def validate_proxy_ticket ( service , ticket , pgturl = None ) : """Validate a proxy ticket string . Return a 4 - tuple containing a ` ` ProxyTicket ` ` , an optional ` ` ProxyGrantingTicket ` ` and a list of proxies through which authentication proceeded , or a ` ` ValidationError ` ` if ticket validation failed ."""
logger . debug ( "Proxy validation request received for %s" % ticket ) pt = ProxyTicket . objects . validate_ticket ( ticket , service ) attributes = get_attributes ( pt . user , pt . service ) # Build a list of all services that proxied authentication , # in reverse order of which they were traversed proxies = [ pt . service ] prior_pt = pt . granted_by_pgt . granted_by_pt while prior_pt : proxies . append ( prior_pt . service ) prior_pt = prior_pt . granted_by_pgt . granted_by_pt if pgturl is not None : logger . debug ( "Proxy-granting ticket request received for %s" % pgturl ) pgt = ProxyGrantingTicket . objects . create_ticket ( service , pgturl , user = pt . user , granted_by_pt = pt ) else : pgt = None return pt , attributes , pgt , proxies
def predict_and_score ( self , eval_instances , random = False , verbosity = 0 ) : '''Return most likely outputs and scores for the particular set of outputs given in ` eval _ instances ` , as a tuple . Return value should be equivalent to the default implementation of return ( self . predict ( eval _ instances ) , self . score ( eval _ instances ) ) but subclasses can override this to combine the two calls and reduce duplicated work . Either the two separate methods or this one ( or all of them ) should be overridden . : param eval _ instances : The data to use to evaluate the model . Instances should have at least the ` input ` and ` output ` fields populated . ` output ` is needed to define which score is to be returned . : param random : If ` True ` , sample from the probability distribution defined by the classifier rather than output the most likely prediction . : param verbosity : The level of diagnostic output , relative to the global - - verbosity option . Used to adjust output when models are composed of multiple sub - models . : type eval _ instances : list ( instance . Instance ) : returns : tuple ( list ( output _ type ) , list ( float ) )'''
if hasattr ( self , '_using_default_separate' ) and self . _using_default_separate : raise NotImplementedError self . _using_default_combined = True return ( self . predict ( eval_instances , random = random , verbosity = verbosity ) , self . score ( eval_instances , verbosity = verbosity ) )
def get_num_names ( include_expired = False , proxy = None , hostport = None ) : """Get the number of names , optionally counting the expired ones Return { ' error ' : . . . } on failure"""
assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) schema = { 'type' : 'object' , 'properties' : { 'count' : { 'type' : 'integer' , 'minimum' : 0 , } , } , 'required' : [ 'count' , ] , } count_schema = json_response_schema ( schema ) resp = { } try : if include_expired : resp = proxy . get_num_names_cumulative ( ) else : resp = proxy . get_num_names ( ) resp = json_validate ( count_schema , resp ) if json_is_error ( resp ) : return resp except ValidationError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return resp [ 'count' ]
def BinaryRoche ( r , D , q , F , Omega = 0.0 ) : r"""Computes a value of the asynchronous , eccentric Roche potential . If : envvar : ` Omega ` is passed , it computes the difference . The asynchronous , eccentric Roche potential is given by [ Wilson1979 ] _ . . math : : \ Omega = \ frac { 1 } { \ sqrt { x ^ 2 + y ^ 2 + z ^ 2 } } + q \ left ( \ frac { 1 } { \ sqrt { ( x - D ) ^ 2 + y ^ 2 + z ^ 2 } } - \ frac { x } { D ^ 2 } \ right ) + \ frac { 1 } { 2 } F ^ 2(1 + q ) ( x ^ 2 + y ^ 2) @ param r : relative radius vector ( 3 components ) @ type r : 3 - tuple @ param D : instantaneous separation @ type D : float @ param q : mass ratio @ type q : float @ param F : synchronicity parameter @ type F : float @ param Omega : value of the potential @ type Omega : float"""
return 1.0 / sqrt ( r [ 0 ] * r [ 0 ] + r [ 1 ] * r [ 1 ] + r [ 2 ] * r [ 2 ] ) + q * ( 1.0 / sqrt ( ( r [ 0 ] - D ) * ( r [ 0 ] - D ) + r [ 1 ] * r [ 1 ] + r [ 2 ] * r [ 2 ] ) - r [ 0 ] / D / D ) + 0.5 * F * F * ( 1 + q ) * ( r [ 0 ] * r [ 0 ] + r [ 1 ] * r [ 1 ] ) - Omega
def pair_tree_creator ( meta_id ) : """Splits string into a pairtree path ."""
chunks = [ ] for x in range ( 0 , len ( meta_id ) ) : if x % 2 : continue if ( len ( meta_id ) - 1 ) == x : chunk = meta_id [ x ] else : chunk = meta_id [ x : x + 2 ] chunks . append ( chunk ) return os . sep + os . sep . join ( chunks ) + os . sep
def gff ( args ) : """% prog gff btabfile Convert btab file generated by AAT to gff3 format ."""
from jcvi . utils . range import range_minmax from jcvi . formats . gff import valid_gff_parent_child , valid_gff_type p = OptionParser ( gff . __doc__ ) p . add_option ( "--source" , default = None , help = "Specify GFF source." + " By default, it picks algorithm used to generate btab file." + " [default: %default]" ) p . add_option ( "--type" , default = "protein_match" , choices = valid_gff_type , help = "GFF feature type [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) btabfile , = args btabdict = { } btab = Btab ( btabfile , aat_dialect = True ) osource = opts . source or "aat" otype = opts . type octype = valid_gff_parent_child [ otype ] for b in btab : nargs = b . nargs id = b . query + "-" + otype + "{0:05d}" . format ( b . chainNum ) key = b . key if key not in btabdict : btabdict [ key ] = { 'id' : id , 'method' : b . method , 'query' : b . query , 'subject' : b . subject , 'strand' : b . qStrand , 'sDesc' : b . sDesc , 'coords' : [ ] , 'children' : [ ] } btabdict [ key ] [ 'coords' ] . append ( ( b . qStart , b . qStop ) ) btabdict [ key ] [ 'children' ] . append ( b . gffline ( source = osource , type = octype , id = id ) ) for v in btabdict . itervalues ( ) : b = BtabLine ( "\t" . join ( str ( x ) for x in [ 0 ] * nargs ) , aat_dialect = True ) id = v [ 'id' ] b . query = v [ 'query' ] b . method = v [ 'method' ] b . subject = v [ 'subject' ] b . qStrand = v [ 'strand' ] b . sDesc = v [ 'sDesc' ] b . qStart , b . qStop = range_minmax ( v [ 'coords' ] ) print ( b . gffline ( source = osource , type = otype , primary_tag = "ID" , id = id ) ) print ( "\n" . join ( v [ 'children' ] ) )
def load_frontends ( config , callback , internal_attributes ) : """Load all frontend modules specified in the config : type config : satosa . satosa _ config . SATOSAConfig : type callback : ( satosa . context . Context , satosa . internal . InternalData ) - > satosa . response . Response : type internal _ attributes : dict [ string , dict [ str , str | list [ str ] ] ] : rtype : Sequence [ satosa . frontends . base . FrontendModule ] : param config : The configuration of the satosa proxy : param callback : Function that will be called by the frontend after the authentication request has been processed . : return : A list of frontend modules"""
frontend_modules = _load_plugins ( config . get ( "CUSTOM_PLUGIN_MODULE_PATHS" ) , config [ "FRONTEND_MODULES" ] , frontend_filter , config [ "BASE" ] , internal_attributes , callback ) logger . info ( "Setup frontends: %s" % [ frontend . name for frontend in frontend_modules ] ) return frontend_modules
def shuffle_step ( entries , step ) : '''Shuffle the step'''
answer = [ ] for i in range ( 0 , len ( entries ) , step ) : sub = entries [ i : i + step ] shuffle ( sub ) answer += sub return answer
def download_file ( self , remote_filename , local_filename = None ) : """Download file from github . Args : remote _ filename ( str ) : The name of the file as defined in git repository . local _ filename ( str , optional ) : Defaults to None . The name of the file as it should be be written to local filesystem ."""
status = 'Failed' if local_filename is None : local_filename = remote_filename if not self . args . force and os . access ( local_filename , os . F_OK ) : if not self . _confirm_overwrite ( local_filename ) : self . _print_results ( local_filename , 'Skipped' ) return url = '{}{}' . format ( self . base_url , remote_filename ) r = requests . get ( url , allow_redirects = True ) if r . ok : open ( local_filename , 'wb' ) . write ( r . content ) status = 'Success' else : self . handle_error ( 'Error requesting: {}' . format ( url ) , False ) # print download status self . _print_results ( local_filename , status )
def force_update ( self ) : """Forces an update of the module ."""
if self . disabled or self . terminated or not self . enabled : return # clear cached _ until for each method to allow update for meth in self . methods : self . methods [ meth ] [ "cached_until" ] = time ( ) if self . config [ "debug" ] : self . _py3_wrapper . log ( "clearing cache for method {}" . format ( meth ) ) # set module to update self . _py3_wrapper . timeout_queue_add ( self )
def version_parser ( version_string ) : """: param version _ string : The version tag as returned by the registry / hub API : return : A tuple with the parsed version / an exception when the version format is unknown / incorrect"""
version , commit , branch = version_string . split ( '-' , 3 ) # Remove the " v " version_number = version [ 1 : ] version_number = int ( version_number ) return Version ( version_number , commit , branch )
def superclasses ( self , inherited = False ) : """Iterate over the superclasses of the class . This function is the Python equivalent of the CLIPS class - superclasses command ."""
data = clips . data . DataObject ( self . _env ) lib . EnvClassSuperclasses ( self . _env , self . _cls , data . byref , int ( inherited ) ) for klass in classes ( self . _env , data . value ) : yield klass
def section_tortuosity ( neurites , neurite_type = NeuriteType . all ) : '''section tortuosities in a collection of neurites'''
return map_sections ( sectionfunc . section_tortuosity , neurites , neurite_type = neurite_type )
def reset_namespace ( self ) : """Resets the namespace by removing all names defined by the user"""
self . shellwidget . reset_namespace ( warning = self . reset_warning , message = True )
def valid_result ( self , r ) : '''Check if the result is valid A result is invalid if : - All its source paths belong to the source path filtered - Or a similar result was reported and saved during a previous run'''
source_mapping_elements = [ elem [ 'source_mapping' ] [ 'filename_absolute' ] for elem in r [ 'elements' ] if 'source_mapping' in elem ] if r [ 'elements' ] and all ( ( any ( path in src_mapping for path in self . _paths_to_filter ) for src_mapping in source_mapping_elements ) ) : return False return not r [ 'description' ] in [ pr [ 'description' ] for pr in self . _previous_results ]
def build_list_result ( results , xml ) : """构建带翻页的列表 : param results : 已获取的数据列表 : param xml : 原始页面xml : return : { ' results ' : list , ' count ' : int , ' next _ start ' : int | None } 如果count与results长度不同 , 则有更多 如果next _ start不为None , 则可以到下一页"""
xml_count = xml . xpath ( '//div[@class="paginator"]/span[@class="count"]/text()' ) xml_next = xml . xpath ( '//div[@class="paginator"]/span[@class="next"]/a/@href' ) count = int ( re . search ( r'\d+' , xml_count [ 0 ] ) . group ( ) ) if xml_count else len ( results ) next_start = int ( re . search ( r'start=(\d+)' , xml_next [ 0 ] ) . groups ( ) [ 0 ] ) if xml_next else None return { 'results' : results , 'count' : count , 'next_start' : next_start }
def _update_likelihood_model ( self , inst , partition_parameters , tree ) : """Set parameters of likelihood model - inst - using values in dictionary - partition _ parameters - , and - tree -"""
# Build transition matrix from dict model = partition_parameters [ 'model' ] freqs = partition_parameters . get ( 'frequencies' ) if model == 'LG' : subs_model = phylo_utils . models . LG ( freqs ) elif model == 'WAG' : subs_model = phylo_utils . models . WAG ( freqs ) elif model == 'GTR' : rates = partition_parameters . get ( 'rates' ) subs_model = phylo_utils . models . GTR ( rates , freqs , True ) else : raise ValueError ( "Can't handle this model: {}" . format ( model ) ) tm = phylo_utils . markov . TransitionMatrix ( subs_model ) # Read alpha value alpha = partition_parameters [ 'alpha' ] inst . set_tree ( tree ) inst . update_alpha ( alpha ) inst . update_transition_matrix ( tm )
def _get_value_from_config ( self , section , name ) : """Loads the default from the config . Returns _ no _ value if it doesn ' t exist"""
conf = configuration . get_config ( ) try : value = conf . get ( section , name ) except ( NoSectionError , NoOptionError , KeyError ) : return _no_value return self . parse ( value )
def calc_inuh_v1 ( self ) : """Calculate the unit hydrograph input . Required derived parameters : | RelLandArea | Required flux sequences : | Q0 | | Q1 | Calculated flux sequence : | InUH | Basic equation : : math : ` InUH = Q0 + Q1 ` Example : The unit hydrographs receives base flow from the whole subbasin and direct flow from zones of type field , forest and glacier only . In the following example , these occupy only one half of the subbasin , which is why the partial input of q0 is halved : > > > from hydpy . models . hland import * > > > parameterstep ( ' 1d ' ) > > > derived . rellandarea = 0.5 > > > fluxes . q0 = 4.0 > > > fluxes . q1 = 1.0 > > > model . calc _ inuh _ v1 ( ) > > > fluxes . inuh inuh ( 3.0)"""
der = self . parameters . derived . fastaccess flu = self . sequences . fluxes . fastaccess flu . inuh = der . rellandarea * flu . q0 + flu . q1
def flags ( self , index ) : """Override Qt method"""
if not index . isValid ( ) : return Qt . ItemIsEnabled column = index . column ( ) if column in [ 0 , 1 , 2 , 3 ] : return Qt . ItemFlags ( Qt . ItemIsEnabled ) else : return Qt . ItemFlags ( Qt . NoItemFlags )
def _get_build_command ( self , mkdocs_site_path : Path ) -> str : '''Generate ` ` mkdocs build ` ` command to build the site . : param mkdocs _ site _ path : Path to the output directory for the site'''
components = [ self . _mkdocs_config . get ( 'mkdocs_path' , 'mkdocs' ) ] components . append ( 'build' ) components . append ( f'-d "{self._escape_control_characters(str(mkdocs_site_path))}"' ) command = ' ' . join ( components ) self . logger . debug ( f'Build command: {command}' ) return command
def clean ( self ) : '''Verify that the user and staffMember are not mismatched . Location can only be specified if user and staffMember are not .'''
if self . staffMember and self . staffMember . userAccount and self . user and not self . staffMember . userAccount == self . user : raise ValidationError ( _ ( 'Transaction party user does not match staff member user.' ) ) if self . location and ( self . user or self . staffMember ) : raise ValidationError ( _ ( 'Transaction party may not be both a Location and a User or StaffMember.' ) )
def pericenter ( self , return_times = False , func = np . mean , interp_kwargs = None , minimize_kwargs = None , approximate = False ) : """Estimate the pericenter ( s ) of the orbit by identifying local minima in the spherical radius and interpolating between timesteps near the minima . By default , this returns the mean of all local minima ( pericenters ) . To get , e . g . , the minimum pericenter , pass in ` ` func = np . min ` ` . To get all pericenters , pass in ` ` func = None ` ` . Parameters func : func ( optional ) A function to evaluate on all of the identified pericenter times . return _ times : bool ( optional ) Also return the pericenter times . interp _ kwargs : dict ( optional ) Keyword arguments to be passed to : class : ` scipy . interpolate . InterpolatedUnivariateSpline ` . minimize _ kwargs : dict ( optional ) Keyword arguments to be passed to : class : ` scipy . optimize . minimize ` . approximate : bool ( optional ) Compute an approximate pericenter by skipping interpolation . Returns peri : float , : class : ` ~ numpy . ndarray ` Either a single number or an array of pericenters . times : : class : ` ~ numpy . ndarray ` ( optional , see ` ` return _ times ` ` ) If ` ` return _ times = True ` ` , also returns an array of the pericenter times ."""
if return_times and func is not None : raise ValueError ( "Cannot return times if reducing pericenters " "using an input function. Pass `func=None` if " "you want to return all individual pericenters " "and times." ) if func is None : reduce = False func = lambda x : x else : reduce = True # time must increase if self . t [ - 1 ] < self . t [ 0 ] : self = self [ : : - 1 ] vals = [ ] times = [ ] for orbit in self . orbit_gen ( ) : v , t = orbit . _max_helper ( - orbit . physicsspherical . r , # pericenter interp_kwargs = interp_kwargs , minimize_kwargs = minimize_kwargs , approximate = approximate ) vals . append ( func ( - v ) ) # negative for pericenter times . append ( t ) return self . _max_return_helper ( vals , times , return_times , reduce )
def scatter_group ( ax , key , imask , adata , Y , projection = '2d' , size = 3 , alpha = None ) : """Scatter of group using representation of data Y ."""
mask = adata . obs [ key ] . cat . categories [ imask ] == adata . obs [ key ] . values color = adata . uns [ key + '_colors' ] [ imask ] if not isinstance ( color [ 0 ] , str ) : from matplotlib . colors import rgb2hex color = rgb2hex ( adata . uns [ key + '_colors' ] [ imask ] ) if not is_color_like ( color ) : raise ValueError ( '"{}" is not a valid matplotlib color.' . format ( color ) ) data = [ Y [ mask , 0 ] , Y [ mask , 1 ] ] if projection == '3d' : data . append ( Y [ mask , 2 ] ) ax . scatter ( * data , marker = '.' , alpha = alpha , c = color , edgecolors = 'none' , s = size , label = adata . obs [ key ] . cat . categories [ imask ] , rasterized = settings . _vector_friendly ) return mask
def items_differ ( jsonitems , dbitems , subfield_dict ) : """check whether or not jsonitems and dbitems differ"""
# short circuit common cases if len ( jsonitems ) == len ( dbitems ) == 0 : # both are empty return False elif len ( jsonitems ) != len ( dbitems ) : # if lengths differ , they ' re definitely different return True original_jsonitems = jsonitems jsonitems = copy . deepcopy ( jsonitems ) keys = jsonitems [ 0 ] . keys ( ) # go over dbitems looking for matches for dbitem in dbitems : order = getattr ( dbitem , 'order' , None ) match = None for i , jsonitem in enumerate ( jsonitems ) : # check if all keys ( excluding subfields ) match for k in keys : if k not in subfield_dict and getattr ( dbitem , k ) != jsonitem . get ( k , None ) : break else : # all fields match so far , possibly equal , just check subfields now for k in subfield_dict : jsonsubitems = jsonitem [ k ] dbsubitems = list ( getattr ( dbitem , k ) . all ( ) ) if items_differ ( jsonsubitems , dbsubitems , subfield_dict [ k ] [ 2 ] ) : break else : # if the dbitem sets ' order ' , then the order matters if order is not None and int ( order ) != original_jsonitems . index ( jsonitem ) : break # these items are equal , so let ' s mark it for removal match = i break if match is not None : # item exists in both , remove from jsonitems jsonitems . pop ( match ) else : # exists in db but not json return True # if we get here , jsonitems has to be empty because we asserted that the length was # the same and we found a match for each thing in dbitems , here ' s a safety check just in case if jsonitems : # pragma : no cover return True return False
def read_msbuild_xml ( path , values = { } ) : """Reads the MS Build XML file at the path and returns its contents . Keyword arguments : values - - The map to append the contents to ( default { } )"""
# Attempt to read the file contents try : document = parse ( path ) except Exception as e : logging . exception ( 'Could not read MS Build XML file at %s' , path ) return values # Convert the XML to JSON format logging . info ( 'Processing MS Build XML file at %s' , path ) # Get the rule node rule = document . getElementsByTagName ( 'Rule' ) [ 0 ] rule_name = rule . attributes [ 'Name' ] . value logging . info ( 'Found rules for %s' , rule_name ) # Proprocess Argument values __preprocess_arguments ( rule ) # Get all the values converted_values = [ ] __convert ( rule , 'EnumProperty' , converted_values , __convert_enum ) __convert ( rule , 'BoolProperty' , converted_values , __convert_bool ) __convert ( rule , 'StringListProperty' , converted_values , __convert_string_list ) __convert ( rule , 'StringProperty' , converted_values , __convert_string ) __convert ( rule , 'IntProperty' , converted_values , __convert_string ) values [ rule_name ] = converted_values return values