signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def template_for_action ( self , action ) :
"""Returns the template to use for the passed in action""" | return "%s/%s_%s.html" % ( self . module_name . lower ( ) , self . model_name . lower ( ) , action ) |
def nextLunarEclipse ( date ) :
"""Returns the Datetime of the maximum phase of the
next global lunar eclipse .""" | eclipse = swe . lunarEclipseGlobal ( date . jd , backward = False )
return Datetime . fromJD ( eclipse [ 'maximum' ] , date . utcoffset ) |
def run_bcl2fastq ( run_folder , ss_csv , config ) :
"""Run bcl2fastq for de - multiplexing and fastq generation .
run _ folder - - directory of Illumina outputs
ss _ csv - - Samplesheet CSV file describing samples .""" | bc_dir = os . path . join ( run_folder , "Data" , "Intensities" , "BaseCalls" )
output_dir = os . path . join ( run_folder , "fastq" )
if not os . path . exists ( os . path . join ( output_dir , "Makefile" ) ) :
subprocess . check_call ( [ "configureBclToFastq.pl" , "--no-eamss" , "--input-dir" , bc_dir , "--output-dir" , output_dir , "--sample-sheet" , ss_csv ] )
with utils . chdir ( output_dir ) :
cores = str ( utils . get_in ( config , ( "algorithm" , "num_cores" ) , 1 ) )
cmd = [ "make" , "-j" , cores ]
if "submit_cmd" in config [ "process" ] and "bcl2fastq_batch" in config [ "process" ] :
_submit_and_wait ( cmd , cores , config , output_dir )
else :
subprocess . check_call ( cmd )
return output_dir |
def prepare ( self , data_batch , sparse_row_id_fn = None ) :
"""Prepares two modules for processing a data batch .
Usually involves switching bucket and reshaping .
For modules that contain ` row _ sparse ` parameters in KVStore ,
it prepares the ` row _ sparse ` parameters based on the sparse _ row _ id _ fn .
When KVStore is used to update parameters for multi - device or multi - machine training ,
a copy of the parameters are stored in KVStore . Note that for ` row _ sparse ` parameters ,
the ` update ( ) ` updates the copy of parameters in KVStore , but doesn ' t broadcast
the updated parameters to all devices / machines . The ` prepare ` function is used to
broadcast ` row _ sparse ` parameters with the next batch of data .
Parameters
data _ batch : DataBatch
The current batch of data for forward computation .
sparse _ row _ id _ fn : A callback function
The function takes ` data _ batch ` as an input and returns a dict of
str - > NDArray . The resulting dict is used for pulling row _ sparse
parameters from the kvstore , where the str key is the name of the param ,
and the value is the row id of the param to pull .""" | super ( SVRGModule , self ) . prepare ( data_batch , sparse_row_id_fn = sparse_row_id_fn )
self . _mod_aux . prepare ( data_batch , sparse_row_id_fn = sparse_row_id_fn ) |
def get_groups_by_name ( self , name , parent = None ) :
"""Retrieve all groups matching the given name and optionally filtered by the given parent node .
: param name : The name of the group that has to be returned
: param parent : A PBXGroup object where the object has to be retrieved from . If None all matching groups are returned
: return : An list of all matching groups""" | groups = self . objects . get_objects_in_section ( u'PBXGroup' )
groups = [ group for group in groups if group . get_name ( ) == name ]
if parent :
return [ group for group in groups if parent . has_child ( group ) ]
return groups |
def _print_layers ( targets , components , tasks ) :
"""Print dependency information , grouping components based on their position
in the dependency graph . Components with no dependnecies will be in layer
0 , components that only depend on layer 0 will be in layer 1 , and so on .
If there ' s a circular dependency , those nodes and their dependencies will
be colored red .
Arguments
targets - the targets explicitly requested
components - full configuration for all components in a project""" | layer = 0
expected_count = len ( tasks )
counts = { }
def _add_layer ( resolved , dep_fn ) :
nonlocal layer
nonlocal counts
nonlocal expected_count
really_resolved = [ ]
for resolved_task in resolved :
resolved_component_tasks = counts . get ( resolved_task [ 0 ] , [ ] )
resolved_component_tasks . append ( resolved_task )
if len ( resolved_component_tasks ) == expected_count :
really_resolved . extend ( resolved_component_tasks )
del counts [ resolved_task [ 0 ] ]
else :
counts [ resolved_task [ 0 ] ] = resolved_component_tasks
if really_resolved :
indentation = " " * 4
print ( "{}subgraph cluster_{} {{" . format ( indentation , layer ) )
print ( '{}label="Layer {}"' . format ( indentation * 2 , layer ) )
dep_fn ( indentation * 2 , really_resolved )
print ( "{}}}" . format ( indentation ) )
layer += 1
_do_dot ( targets , components , tasks , _add_layer ) |
def create ( obj : PersistedObject , obj_type : Type [ T ] , errors : Dict [ Type , Exception ] ) :
"""Helper method provided because we actually can ' t put that in the constructor , it creates a bug in Nose tests
https : / / github . com / nose - devs / nose / issues / 725
: param obj :
: param errors : a dictionary of the errors raised for each alternate type tried
: return :""" | e = NoParserFoundForUnionType ( '{obj} cannot be parsed as a {typ} because no parser could be found for any of ' 'the alternate types. Caught exceptions: {errs}' '' . format ( obj = obj , typ = get_pretty_type_str ( obj_type ) , errs = errors ) )
# save the errors
e . errors = errors
return e |
def get_stdlib_path ( ) :
"""Returns the path to the standard lib for the current path installation .
This function can be dropped and " sysconfig . get _ paths ( ) " used directly once Python 2.6 support is dropped .""" | if sys . version_info >= ( 2 , 7 ) :
import sysconfig
return sysconfig . get_paths ( ) [ 'stdlib' ]
else :
return os . path . join ( sys . prefix , 'lib' ) |
def linkify ( self , commands , notificationways ) :
"""Create link between objects : :
* contacts - > notificationways
: param notificationways : notificationways to link
: type notificationways : alignak . objects . notificationway . Notificationways
: return : None
TODO : Clean this function""" | self . linkify_with_notificationways ( notificationways )
self . linkify_command_list_with_commands ( commands , 'service_notification_commands' )
self . linkify_command_list_with_commands ( commands , 'host_notification_commands' ) |
def get_portchannel_info_by_intf_output_lacp_actor_priority ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_portchannel_info_by_intf = ET . Element ( "get_portchannel_info_by_intf" )
config = get_portchannel_info_by_intf
output = ET . SubElement ( get_portchannel_info_by_intf , "output" )
lacp = ET . SubElement ( output , "lacp" )
actor_priority = ET . SubElement ( lacp , "actor-priority" )
actor_priority . text = kwargs . pop ( 'actor_priority' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _parse_world_info ( self , world_info_table ) :
"""Parses the World Information table from Tibia . com and adds the found values to the object .
Parameters
world _ info _ table : : class : ` list ` [ : class : ` bs4 . Tag ` ]""" | world_info = { }
for row in world_info_table :
cols_raw = row . find_all ( 'td' )
cols = [ ele . text . strip ( ) for ele in cols_raw ]
field , value = cols
field = field . replace ( "\xa0" , "_" ) . replace ( " " , "_" ) . replace ( ":" , "" ) . lower ( )
value = value . replace ( "\xa0" , " " )
world_info [ field ] = value
try :
self . online_count = int ( world_info . pop ( "players_online" ) )
except KeyError :
self . online_count = 0
self . location = try_enum ( WorldLocation , world_info . pop ( "location" ) )
self . pvp_type = try_enum ( PvpType , world_info . pop ( "pvp_type" ) )
self . transfer_type = try_enum ( TransferType , world_info . pop ( "transfer_type" , None ) , TransferType . REGULAR )
m = record_regexp . match ( world_info . pop ( "online_record" ) )
if m :
self . record_count = int ( m . group ( "count" ) )
self . record_date = parse_tibia_datetime ( m . group ( "date" ) )
if "world_quest_titles" in world_info :
self . world_quest_titles = [ q . strip ( ) for q in world_info . pop ( "world_quest_titles" ) . split ( "," ) ]
self . experimental = world_info . pop ( "game_world_type" ) != "Regular"
self . _parse_battleye_status ( world_info . pop ( "battleye_status" ) )
self . premium_only = "premium_type" in world_info
month , year = world_info . pop ( "creation_date" ) . split ( "/" )
month = int ( month )
year = int ( year )
if year > 90 :
year += 1900
else :
year += 2000
self . creation_date = "%d-%02d" % ( year , month )
for k , v in world_info . items ( ) :
try :
setattr ( self , k , v )
except AttributeError :
pass |
def list_resources ( self , lang ) :
"""Return a sequence of resources for a given lang .
Each Resource is a dict containing the slug , name , i18n _ type ,
source _ language _ code and the category .""" | return registry . registry . http_handler . get ( '/api/2/project/%s/resources/' % ( self . get_project_slug ( lang ) , ) ) |
def customer_webhook_handler ( event ) :
"""Handle updates to customer objects .
First determines the crud _ type and then handles the event if a customer exists locally .
As customers are tied to local users , djstripe will not create customers that
do not already exist locally .
Docs and an example customer webhook response : https : / / stripe . com / docs / api # customer _ object""" | if event . customer : # As customers are tied to local users , djstripe will not create
# customers that do not already exist locally .
_handle_crud_like_event ( target_cls = models . Customer , event = event , crud_exact = True , crud_valid = True ) |
def is_excluded ( root , excludes ) :
"""Check if the directory is in the exclude list .
Note : by having trailing slashes , we avoid common prefix issues , like
e . g . an exlude " foo " also accidentally excluding " foobar " .""" | sep = os . path . sep
if not root . endswith ( sep ) :
root += sep
for exclude in excludes :
if root . startswith ( exclude ) :
return True
return False |
def use_comparative_gradebook_view ( self ) :
"""Pass through to provider GradeSystemGradebookSession . use _ comparative _ gradebook _ view""" | self . _gradebook_view = COMPARATIVE
# self . _ get _ provider _ session ( ' grade _ system _ gradebook _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_comparative_gradebook_view ( )
except AttributeError :
pass |
def _get_range_from_filters ( cls , filters , model_class ) :
"""Get property range from filters user provided .
This method also validates there is one and only one closed range on a
single property .
Args :
filters : user supplied filters . Each filter should be a list or tuple of
format ( < property _ name _ as _ str > , < query _ operator _ as _ str > ,
< value _ of _ certain _ type > ) . Value type should satisfy the property ' s type .
model _ class : the model class for the entity type to apply filters on .
Returns :
a tuple of ( property , start _ filter , end _ filter ) . property is the model ' s
field that the range is about . start _ filter and end _ filter define the
start and the end of the range . ( None , None , None ) if no range is found .
Raises :
BadReaderParamsError : if any filter is invalid in any way .""" | if not filters :
return None , None , None
range_property = None
start_val = None
end_val = None
start_filter = None
end_filter = None
for f in filters :
prop , op , val = f
if op in [ ">" , ">=" , "<" , "<=" ] :
if range_property and range_property != prop :
raise errors . BadReaderParamsError ( "Range on only one property is supported." )
range_property = prop
if val is None :
raise errors . BadReaderParamsError ( "Range can't be None in filter %s" , f )
if op in [ ">" , ">=" ] :
if start_val is not None :
raise errors . BadReaderParamsError ( "Operation %s is specified more than once." , op )
start_val = val
start_filter = f
else :
if end_val is not None :
raise errors . BadReaderParamsError ( "Operation %s is specified more than once." , op )
end_val = val
end_filter = f
elif op != "=" :
raise errors . BadReaderParamsError ( "Only < <= > >= = are supported as operation. Got %s" , op )
if not range_property :
return None , None , None
if start_val is None or end_val is None :
raise errors . BadReaderParamsError ( "Filter should contains a complete range on property %s" , range_property )
if issubclass ( model_class , db . Model ) :
property_obj = model_class . properties ( ) [ range_property ]
else :
property_obj = ( model_class . _properties [ # pylint : disable = protected - access
range_property ] )
supported_properties = ( _DISCRETE_PROPERTY_SPLIT_FUNCTIONS . keys ( ) + _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS . keys ( ) )
if not isinstance ( property_obj , tuple ( supported_properties ) ) :
raise errors . BadReaderParamsError ( "Filtered property %s is not supported by sharding." , range_property )
if not start_val < end_val :
raise errors . BadReaderParamsError ( "Start value %s should be smaller than end value %s" , start_val , end_val )
return property_obj , start_filter , end_filter |
def _dot1q_headers_size ( layer ) :
"""calculate size of lower dot1q layers ( if present )
: param layer : the layer to start at
: return : size of vlan headers , layer below lowest vlan header""" | vlan_headers_size = 0
under_layer = layer
while under_layer and isinstance ( under_layer , Dot1Q ) :
vlan_headers_size += LLDPDU . DOT1Q_HEADER_LEN
under_layer = under_layer . underlayer
return vlan_headers_size , under_layer |
def _set_local_as ( self , v , load = False ) :
"""Setter method for local _ as , mapped from YANG variable / routing _ system / router / router _ bgp / address _ family / ipv4 / ipv4 _ unicast / af _ vrf / neighbor / af _ ipv4 _ vrf _ neighbor _ address _ holder / af _ ipv4 _ neighbor _ addr / local _ as ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ local _ as is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ local _ as ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = local_as . local_as , is_container = 'container' , presence = False , yang_name = "local-as" , rest_name = "local-as" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Assign local-as number to neighbor' , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-break-sequence-commands' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-bgp' , defining_module = 'brocade-bgp' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """local_as must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=local_as.local_as, is_container='container', presence=False, yang_name="local-as", rest_name="local-as", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Assign local-as number to neighbor', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-break-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""" , } )
self . __local_as = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def get_commands ( self ) :
"""Returns commands stored in the registry ( sorted by name ) .""" | commands = OrderedDict ( )
for cmd in sorted ( self . registry . keys ( ) ) :
commands [ cmd ] = self . registry [ cmd ]
return commands |
def make_converters ( data_types ) -> dict :
"""Return a mapping between data type names , and casting functions ,
or class definitions to convert text into its Python object .
Parameters
data _ types : dict - like
data field name str : python primitive type or class .
Example
> > make _ converters ( { ' student ' : str , ' score ' : float , ' grade ' : Grade ) - >
{ ' student _ name ' : passthrough , ' score ' : parse _ float , ' grade ' : Grade )""" | return { k : TYPE_CASTERS . get ( v , v ) for k , v in data_types . items ( ) } |
def stop_playback ( self ) :
"""Stop playback from the audio sink .""" | self . _sink . flush ( )
self . _sink . stop ( )
self . _playing = False |
def _ParseFileEntry ( self , knowledge_base , file_entry ) :
"""Parses a file entry for a preprocessing attribute .
Args :
knowledge _ base ( KnowledgeBase ) : to fill with preprocessing information .
file _ entry ( dfvfs . FileEntry ) : file entry that contains the artifact
value data .
Raises :
PreProcessFail : if the preprocessing fails .""" | file_object = file_entry . GetFileObject ( )
try :
self . _ParseFileData ( knowledge_base , file_object )
finally :
file_object . close ( ) |
def list_courses ( self ) :
"""List enrolled courses .
@ return : List of enrolled courses .
@ rtype : [ str ]""" | course = CourseraOnDemand ( session = self . _session , course_id = None , course_name = None )
return course . list_courses ( ) |
def scope_in ( ctx ) :
"""- build new scope on the top of stack
- and current scope will wait for it result
: param ctx :
: return :""" | logger . debug ( '# scope_in' )
logger . debug ( ctx )
ctx = ctx . clone ( )
compiled_story = None
if not ctx . is_empty_stack ( ) :
compiled_story = ctx . get_child_story ( )
logger . debug ( '# child' )
logger . debug ( compiled_story )
# we match child story loop once by message
# what should prevent multiple matching by the same message
ctx . matched = True
ctx . message = modify_stack_in_message ( ctx . message , lambda stack : stack [ : - 1 ] + [ { 'data' : matchers . serialize ( callable . WaitForReturn ( ) ) , 'step' : stack [ - 1 ] [ 'step' ] , 'topic' : stack [ - 1 ] [ 'topic' ] } ] )
try :
if not compiled_story and ctx . is_scope_level_part ( ) :
compiled_story = ctx . get_current_story_part ( )
except story_context . MissedStoryPart :
pass
if not compiled_story :
compiled_story = ctx . compiled_story ( )
logger . debug ( '# [>] going deeper' )
ctx . message = modify_stack_in_message ( ctx . message , lambda stack : stack + [ stack_utils . build_empty_stack_item ( compiled_story . topic ) ] )
logger . debug ( ctx )
return ctx |
def get_printer ( name , color = None , ansi_code = None , force_color = False ) :
"""Return a function which prints a message with a coloured name prefix""" | if force_color or supports_color ( ) :
if color is None and ansi_code is None :
cpre_1 , csuf_1 = hash_coloured_escapes ( name )
cpre_2 , csuf_2 = hash_coloured_escapes ( name + 'salt' )
name = cpre_1 + '+' + cpre_2 + '+' + csuf_1 + ' ' + name
else :
name = colored ( name , color = color , ansi_code = ansi_code )
prefix = name + ': '
def printer ( text ) :
print ( prefix + str ( text ) )
return printer |
async def remove_participant ( self , p : Participant ) :
"""remove a participant from the tournament
| methcoro |
Args :
p : the participant to remove
Raises :
APIException""" | await self . connection ( 'DELETE' , 'tournaments/{}/participants/{}' . format ( self . _id , p . _id ) )
if p in self . participants :
self . participants . remove ( p ) |
def register ( cls ) :
"""Register a class .""" | definition_name = make_definition_name ( cls . __name__ )
REGISTRY [ definition_name ] = cls
return cls |
def _convert_angle_limit ( angle , joint , ** kwargs ) :
"""Converts the limit angle of the PyPot JSON file to the internal format""" | angle_pypot = angle
# No need to take care of orientation
if joint [ "orientation" ] == "indirect" :
angle_pypot = 1 * angle_pypot
# angle _ pypot = angle _ pypot + offset
return angle_pypot * np . pi / 180 |
def create ( self , friendly_name , api_version = values . unset , voice_url = values . unset , voice_method = values . unset , voice_fallback_url = values . unset , voice_fallback_method = values . unset , status_callback = values . unset , status_callback_method = values . unset , voice_caller_id_lookup = values . unset , sms_url = values . unset , sms_method = values . unset , sms_fallback_url = values . unset , sms_fallback_method = values . unset , sms_status_callback = values . unset , message_status_callback = values . unset ) :
"""Create a new ApplicationInstance
: param unicode friendly _ name : A string to describe the new resource
: param unicode api _ version : The API version to use to start a new TwiML session
: param unicode voice _ url : The URL to call when the phone number receives a call
: param unicode voice _ method : The HTTP method to use with the voice _ url
: param unicode voice _ fallback _ url : The URL to call when a TwiML error occurs
: param unicode voice _ fallback _ method : The HTTP method to use with voice _ fallback _ url
: param unicode status _ callback : The URL to send status information to your application
: param unicode status _ callback _ method : The HTTP method to use to call status _ callback
: param bool voice _ caller _ id _ lookup : Whether to lookup the caller ' s name
: param unicode sms _ url : The URL to call when the phone number receives an incoming SMS message
: param unicode sms _ method : The HTTP method to use with sms _ url
: param unicode sms _ fallback _ url : The URL to call when an error occurs while retrieving or executing the TwiML
: param unicode sms _ fallback _ method : The HTTP method to use with sms _ fallback _ url
: param unicode sms _ status _ callback : The URL to send status information to your application
: param unicode message _ status _ callback : The URL to send message status information to your application
: returns : Newly created ApplicationInstance
: rtype : twilio . rest . api . v2010 . account . application . ApplicationInstance""" | data = values . of ( { 'FriendlyName' : friendly_name , 'ApiVersion' : api_version , 'VoiceUrl' : voice_url , 'VoiceMethod' : voice_method , 'VoiceFallbackUrl' : voice_fallback_url , 'VoiceFallbackMethod' : voice_fallback_method , 'StatusCallback' : status_callback , 'StatusCallbackMethod' : status_callback_method , 'VoiceCallerIdLookup' : voice_caller_id_lookup , 'SmsUrl' : sms_url , 'SmsMethod' : sms_method , 'SmsFallbackUrl' : sms_fallback_url , 'SmsFallbackMethod' : sms_fallback_method , 'SmsStatusCallback' : sms_status_callback , 'MessageStatusCallback' : message_status_callback , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return ApplicationInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , ) |
async def get ( self , key , * , dc = None , watch = None , consistency = None ) :
"""Returns the specified key
Parameters :
key ( str ) : Key to fetch
watch ( Blocking ) : Do a blocking query
consistency ( Consistency ) : Force consistency
Returns :
ObjectMeta : where value is the queried kv value
Object will look like : :
" CreateIndex " : 100,
" ModifyIndex " : 200,
" LockIndex " : 200,
" Key " : " zip " ,
" Flags " : 0,
" Value " : b " my data " ,
" Session " : " adf4238a - 882b - 9ddc - 4a9d - 5b6758e4159e "
* * CreateIndex * * is the internal index value that represents when
the entry was created .
* * ModifyIndex * * is the last index that modified this key .
This index corresponds to the X - Consul - Index header value that is
returned in responses , and it can be used to establish blocking
queries . You can even perform blocking queries against entire
subtrees of the KV store .
* * LockIndex * * is the number of times this key has successfully been
acquired in a lock . If the lock is held , the Session key provides
the session that owns the lock .
* * Key * * is simply the full path of the entry .
* * Flags * * is an opaque unsigned integer that can be attached to each
entry . Clients can choose to use this however makes sense for their
application .
* * Value * * is a : class : ` ~ aioconsul . typing . Payload ` object ,
it depends on * * Flags * * .""" | response = await self . _read ( key , dc = dc , watch = watch , consistency = consistency )
result = response . body [ 0 ]
result [ "Value" ] = decode_value ( result [ "Value" ] , result [ "Flags" ] )
return consul ( result , meta = extract_meta ( response . headers ) ) |
def soldOutForRole ( event , role ) :
'''This tag allows one to determine whether any event is sold out for any
particular role .''' | if not isinstance ( event , Event ) or not isinstance ( role , DanceRole ) :
return None
return event . soldOutForRole ( role ) |
def _main ( ) :
"ctox : tox with conda" | from sys import argv
arguments = argv [ 1 : ]
toxinidir = os . getcwd ( )
return main ( arguments , toxinidir ) |
def _import ( self , document , element , base_location = None ) :
'''Algo take < import > element ' s children , clone them ,
and add them to the main document . Support for relative
locations is a bit complicated . The orig document context
is lost , so we need to store base location in DOM elements
representing < types > , by creating a special temporary
" base - location " attribute , and < import > , by resolving
the relative " location " and storing it as " location " .
document - - document we are loading
element - - DOM Element representing < import >
base _ location - - location of document from which this
< import > was gleaned .''' | namespace = DOM . getAttr ( element , 'namespace' , default = None )
location = DOM . getAttr ( element , 'location' , default = None )
if namespace is None or location is None :
raise WSDLError ( 'Invalid import element (missing namespace or location).' )
if base_location :
location = basejoin ( base_location , location )
element . setAttributeNS ( None , 'location' , location )
obimport = self . addImport ( namespace , location )
obimport . _loaded = 1
importdoc = DOM . loadFromURL ( location )
try :
if location . find ( '#' ) > - 1 :
idref = location . split ( '#' ) [ - 1 ]
imported = DOM . getElementById ( importdoc , idref )
else :
imported = importdoc . documentElement
if imported is None :
raise WSDLError ( 'Import target element not found for: %s' % location )
imported_tns = DOM . findTargetNS ( imported )
if imported_tns != namespace :
return
if imported . localName == 'definitions' :
imported_nodes = imported . childNodes
else :
imported_nodes = [ imported ]
parent = element . parentNode
parent . removeChild ( element )
for node in imported_nodes :
if node . nodeType != node . ELEMENT_NODE :
continue
child = DOM . importNode ( document , node , 1 )
parent . appendChild ( child )
child . setAttribute ( 'targetNamespace' , namespace )
attrsNS = imported . _attrsNS
for attrkey in attrsNS . keys ( ) :
if attrkey [ 0 ] == DOM . NS_XMLNS :
attr = attrsNS [ attrkey ] . cloneNode ( 1 )
child . setAttributeNode ( attr )
# XXX Quick Hack , should be in WSDL Namespace .
if child . localName == 'import' :
rlocation = child . getAttributeNS ( None , 'location' )
alocation = basejoin ( location , rlocation )
child . setAttribute ( 'location' , alocation )
elif child . localName == 'types' :
child . setAttribute ( 'base-location' , location )
finally :
importdoc . unlink ( )
return location |
def longest_path_weighted_nodes ( G , source , target , weights = None ) :
"""The longest path problem is the problem of finding a simple path of maximum
length in a given graph . While for general graph , this problem is NP - hard ,
but if G is a directed acyclic graph ( DAG ) , longest paths in G can be found
in linear time with dynamic programming .
> > > G = nx . DiGraph ( [ ( 1 , 2 ) , ( 1 , 3 ) , ( 2 , " M " ) , ( 3 , " M " ) ] )
> > > longest _ path _ weighted _ nodes ( G , 1 , " M " , weights = { 1 : 1 , 2 : 1 , 3 : 2 , " M " : 1 } )
( [ 1 , 3 , ' M ' ] , 4)""" | assert nx . is_directed_acyclic_graph ( G )
tree = nx . topological_sort ( G )
node_to_index = dict ( ( t , i ) for i , t in enumerate ( tree ) )
nnodes = len ( tree )
weights = [ weights . get ( x , 1 ) for x in tree ] if weights else [ 1 ] * nnodes
score , fromc = weights [ : ] , [ - 1 ] * nnodes
si = node_to_index [ source ]
ti = node_to_index [ target ]
for a in tree [ si : ti ] :
ai = node_to_index [ a ]
for b , w in G [ a ] . items ( ) :
bi = node_to_index [ b ]
w = w . get ( 'weight' , 1 )
d = score [ ai ] + weights [ bi ] * w
# Favor heavier edges
if d <= score [ bi ] :
continue
score [ bi ] = d
# Update longest distance so far
fromc [ bi ] = ai
# Backtracking
path = [ ]
while ti != - 1 :
path . append ( ti )
ti = fromc [ ti ]
path = [ tree [ x ] for x in path [ : : - 1 ] ]
return path , score [ ti ] |
def namespace ( self , name = None , function = None , recursive = None ) :
"""Returns reference to namespace declaration that matches
a defined criteria .""" | return ( self . _find_single ( scopedef . scopedef_t . _impl_matchers [ namespace_t . namespace ] , name = name , function = function , recursive = recursive ) ) |
def circ_corrcc ( x , y , tail = 'two-sided' ) :
"""Correlation coefficient between two circular variables .
Parameters
x : np . array
First circular variable ( expressed in radians )
y : np . array
Second circular variable ( expressed in radians )
tail : string
Specify whether to return ' one - sided ' or ' two - sided ' p - value .
Returns
r : float
Correlation coefficient
pval : float
Uncorrected p - value
Notes
Adapted from the CircStats MATLAB toolbox ( Berens 2009 ) .
Use the np . deg2rad function to convert angles from degrees to radians .
Please note that NaN are automatically removed .
Examples
Compute the r and p - value of two circular variables
> > > from pingouin import circ _ corrcc
> > > x = [ 0.785 , 1.570 , 3.141 , 3.839 , 5.934]
> > > y = [ 0.593 , 1.291 , 2.879 , 3.892 , 6.108]
> > > r , pval = circ _ corrcc ( x , y )
> > > print ( r , pval )
0.942 0.06579836070349088""" | from scipy . stats import norm
x = np . asarray ( x )
y = np . asarray ( y )
# Check size
if x . size != y . size :
raise ValueError ( 'x and y must have the same length.' )
# Remove NA
x , y = remove_na ( x , y , paired = True )
n = x . size
# Compute correlation coefficient
x_sin = np . sin ( x - circmean ( x ) )
y_sin = np . sin ( y - circmean ( y ) )
# Similar to np . corrcoef ( x _ sin , y _ sin ) [ 0 ] [ 1]
r = np . sum ( x_sin * y_sin ) / np . sqrt ( np . sum ( x_sin ** 2 ) * np . sum ( y_sin ** 2 ) )
# Compute T - and p - values
tval = np . sqrt ( ( n * ( x_sin ** 2 ) . mean ( ) * ( y_sin ** 2 ) . mean ( ) ) / np . mean ( x_sin ** 2 * y_sin ** 2 ) ) * r
# Approximately distributed as a standard normal
pval = 2 * norm . sf ( abs ( tval ) )
pval = pval / 2 if tail == 'one-sided' else pval
return np . round ( r , 3 ) , pval |
def editable_loader ( context ) :
"""Set up the required JS / CSS for the in - line editing toolbar and controls .""" | user = context [ "request" ] . user
template_vars = { "has_site_permission" : has_site_permission ( user ) , "request" : context [ "request" ] , }
if ( settings . INLINE_EDITING_ENABLED and template_vars [ "has_site_permission" ] ) :
t = get_template ( "includes/editable_toolbar.html" )
template_vars [ "REDIRECT_FIELD_NAME" ] = REDIRECT_FIELD_NAME
template_vars [ "editable_obj" ] = context . get ( "editable_obj" , context . get ( "page" , None ) )
template_vars [ "accounts_logout_url" ] = context . get ( "accounts_logout_url" , None )
template_vars [ "toolbar" ] = t . render ( Context ( template_vars ) )
template_vars [ "richtext_media" ] = RichTextField ( ) . formfield ( ) . widget . media
return template_vars |
def _get_value ( self , scalar_data_blob , dtype_enum ) :
"""Obtains value for scalar event given blob and dtype enum .
Args :
scalar _ data _ blob : The blob obtained from the database .
dtype _ enum : The enum representing the dtype .
Returns :
The scalar value .""" | tensorflow_dtype = tf . DType ( dtype_enum )
buf = np . frombuffer ( scalar_data_blob , dtype = tensorflow_dtype . as_numpy_dtype )
return np . asscalar ( buf ) |
def computeNumberOfMarkers ( inputFileName ) :
"""Count the number of marker ( line ) in a BIM file .
: param inputFileName : the name of the ` ` bim ` ` file .
: type inputFileName : str
: returns : the number of marker in the ` ` bim ` ` file .""" | nbLine = 0
with open ( inputFileName , "r" ) as inputFile :
nbLine = len ( inputFile . readlines ( ) )
return nbLine |
def _set_option_by_index ( self , index ) :
"""Sets a single option in the Combo by its index , returning True if it was able too .""" | if index < len ( self . _options ) :
self . _selected . set ( self . _options [ index ] )
return True
else :
return False |
def delete_ikepolicy ( self , ikepolicy ) :
'''Deletes the specified IKEPolicy''' | ikepolicy_id = self . _find_ikepolicy_id ( ikepolicy )
ret = self . network_conn . delete_ikepolicy ( ikepolicy_id )
return ret if ret else True |
def cmd_slow_requests ( self ) :
"""List all requests that took a certain amount of time to be
processed .
. . warning : :
By now hardcoded to 1 second ( 1000 milliseconds ) , improve the
command line interface to allow to send parameters to each command
or globally .""" | slow_requests = [ line . time_wait_response for line in self . _valid_lines if line . time_wait_response > 1000 ]
return slow_requests |
def from_terms_dict ( terms_dict ) :
"""For internal use .""" | return Expr ( tuple ( Term ( k , v ) for k , v in terms_dict . items ( ) if v ) ) |
def to_instants_dataframe ( self , sql_ctx ) :
"""Returns a DataFrame of instants , each a horizontal slice of this TimeSeriesRDD at a time .
This essentially transposes the TimeSeriesRDD , producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD .""" | ssql_ctx = sql_ctx . _ssql_ctx
jdf = self . _jtsrdd . toInstantsDataFrame ( ssql_ctx , - 1 )
return DataFrame ( jdf , sql_ctx ) |
def rebase ( self , yaml_dict ) :
'''Use yaml _ dict as self ' s new base and update with existing
reverse of update .''' | base = yaml_dict . clone ( )
base . update ( self )
self . clear ( )
self . update ( base ) |
def save ( fname , d , link_copy = True , raiseError = False ) :
"""link _ copy is used by hdf5 saving only , it allows to creat link of identical arrays ( saving space )""" | # make sure the object is dict ( recursively ) this allows reading it
# without the DataStorage module
fname = pathlib . Path ( fname )
d = toDict ( d , recursive = True )
d [ 'filename' ] = str ( fname )
extension = fname . suffix
log . info ( "Saving storage file %s" % fname )
try :
if extension == ".npz" :
return dictToNpz ( fname , d )
elif extension == ".h5" :
return dictToH5 ( fname , d , link_copy = link_copy )
elif extension == ".npy" :
return dictToNpy ( fname , d )
else :
raise ValueError ( "Extension must be h5, npy or npz, it was %s" % extension )
except Exception as e :
log . exception ( "Could not save %s" % fname )
if raiseError :
raise e |
def get_networkid ( vm_ ) :
'''Return the networkid to use , only valid for Advanced Zone''' | networkid = config . get_cloud_config_value ( 'networkid' , vm_ , __opts__ )
if networkid is not None :
return networkid
else :
return False |
def _drop_remaining_rules ( self , * rules ) :
"""Drops rules from the queue of the rules that still need to be
evaluated for the currently processed field .
If no arguments are given , the whole queue is emptied .""" | if rules :
for rule in rules :
try :
self . _remaining_rules . remove ( rule )
except ValueError :
pass
else :
self . _remaining_rules = [ ] |
def find ( self , flags = 0 ) :
"""Looks throught the text document based on the current criteria . The \
inputed flags will be merged with the generated search flags .
: param flags | < QTextDocument . FindFlag >
: return < bool > | success""" | # check against the web and text views
if ( not ( self . _textEdit or self . _webView ) ) :
fg = QColor ( 'darkRed' )
bg = QColor ( 'red' ) . lighter ( 180 )
palette = self . palette ( )
palette . setColor ( palette . Text , fg )
palette . setColor ( palette . Base , bg )
self . _searchEdit . setPalette ( palette )
self . _searchEdit . setToolTip ( 'No Text Edit is linked.' )
return False
if ( self . _caseSensitiveCheckbox . isChecked ( ) ) :
flags |= QTextDocument . FindCaseSensitively
if ( self . _textEdit and self . _wholeWordsCheckbox . isChecked ( ) ) :
flags |= QTextDocument . FindWholeWords
terms = self . _searchEdit . text ( )
if ( terms != self . _lastText ) :
self . _lastCursor = QTextCursor ( )
if ( self . _regexCheckbox . isChecked ( ) ) :
terms = QRegExp ( terms )
palette = self . palette ( )
# search on a text edit
if ( self . _textEdit ) :
cursor = self . _textEdit . document ( ) . find ( terms , self . _lastCursor , QTextDocument . FindFlags ( flags ) )
found = not cursor . isNull ( )
self . _lastCursor = cursor
self . _textEdit . setTextCursor ( cursor )
elif ( QWebPage ) :
flags = QWebPage . FindFlags ( flags )
flags |= QWebPage . FindWrapsAroundDocument
found = self . _webView . findText ( terms , flags )
self . _lastText = self . _searchEdit . text ( )
if ( not terms or found ) :
fg = palette . color ( palette . Text )
bg = palette . color ( palette . Base )
else :
fg = QColor ( 'darkRed' )
bg = QColor ( 'red' ) . lighter ( 180 )
palette . setColor ( palette . Text , fg )
palette . setColor ( palette . Base , bg )
self . _searchEdit . setPalette ( palette )
return found |
def shutdown ( self ) :
"""Shutdown the accept loop and stop running payloads""" | self . _must_shutdown = True
self . _is_shutdown . wait ( )
self . _meta_runner . stop ( ) |
def run_length_decode ( in_array ) :
"""A function to run length decode an int array .
: param in _ array : the input array of integers
: return the decoded array""" | switch = False
out_array = [ ]
in_array = in_array . tolist ( )
for item in in_array :
if switch == False :
this_item = item
switch = True
else :
switch = False
out_array . extend ( [ this_item ] * int ( item ) )
return numpy . asarray ( out_array , dtype = numpy . int32 ) |
async def reload_modules ( self , pathlist ) :
"""Reload modules with a full path in the pathlist""" | loadedModules = [ ]
failures = [ ]
for path in pathlist :
p , module = findModule ( path , False )
if module is not None and hasattr ( module , '_instance' ) and module . _instance . state != ModuleLoadStateChanged . UNLOADED :
loadedModules . append ( module )
# Unload all modules
ums = [ ModuleLoadStateChanged . createMatcher ( m , ModuleLoadStateChanged . UNLOADED ) for m in loadedModules ]
for m in loadedModules : # Only unload the module itself , not its dependencies , since we will restart the module soon enough
self . subroutine ( self . unloadmodule ( m , True ) , False )
await self . wait_for_all ( * ums )
# Group modules by package
grouped = { }
for path in pathlist :
dotpos = path . rfind ( '.' )
if dotpos == - 1 :
raise ModuleLoadException ( 'Must specify module with full path, including package name' )
package = path [ : dotpos ]
classname = path [ dotpos + 1 : ]
mlist = grouped . setdefault ( package , [ ] )
p , module = findModule ( path , False )
mlist . append ( ( classname , module ) )
for package , mlist in grouped . items ( ) : # Reload each package only once
try :
p = sys . modules [ package ]
# Remove cache to ensure a clean import from source file
removeCache ( p )
p = reload ( p )
except KeyError :
try :
p = __import__ ( package , fromlist = [ m [ 0 ] for m in mlist ] )
except Exception :
self . _logger . warning ( 'Failed to import a package: %r, resume others' , package , exc_info = True )
failures . append ( 'Failed to import: ' + package )
continue
except Exception :
self . _logger . warning ( 'Failed to import a package: %r, resume others' , package , exc_info = True )
failures . append ( 'Failed to import: ' + package )
continue
for cn , module in mlist :
try :
module2 = getattr ( p , cn )
except AttributeError :
self . _logger . warning ( 'Cannot find module %r in package %r, resume others' , package , cn )
failures . append ( 'Failed to import: ' + package + '.' + cn )
continue
if module is not None and module is not module2 : # Update the references
try :
lpos = loadedModules . index ( module )
loaded = True
except Exception :
loaded = False
for d in module . depends : # The new reference is automatically added on import , only remove the old reference
d . referencedBy . remove ( module )
if loaded and hasattr ( d , '_instance' ) :
try :
d . _instance . dependedBy . remove ( module )
d . _instance . dependedBy . add ( module2 )
except ValueError :
pass
if hasattr ( module , 'referencedBy' ) :
for d in module . referencedBy :
pos = d . depends . index ( module )
d . depends [ pos ] = module2
if not hasattr ( module2 , 'referencedBy' ) :
module2 . referencedBy = [ ]
module2 . referencedBy . append ( d )
if loaded :
loadedModules [ lpos ] = module2
# Start the uploaded modules
for m in loadedModules :
self . subroutine ( self . loadmodule ( m ) )
if failures :
raise ModuleLoadException ( 'Following errors occurred during reloading, check log for more details:\n' + '\n' . join ( failures ) ) |
def _extract_progress ( self , text ) :
'''Finds progress information in the text using the
user - supplied regex and calculation instructions''' | # monad - ish dispatch to avoid the if / else soup
find = partial ( re . search , string = text . strip ( ) . decode ( self . encoding ) )
regex = unit ( self . progress_regex )
match = bind ( regex , find )
result = bind ( match , self . _calculate_progress )
return result |
def MakeRequest ( http , http_request , retries = 7 , max_retry_wait = 60 , redirections = 5 , retry_func = HandleExceptionsAndRebuildHttpConnections , check_response_func = CheckResponse ) :
"""Send http _ request via the given http , performing error / retry handling .
Args :
http : An httplib2 . Http instance , or a http multiplexer that delegates to
an underlying http , for example , HTTPMultiplexer .
http _ request : A Request to send .
retries : ( int , default 7 ) Number of retries to attempt on retryable
replies ( such as 429 or 5XX ) .
max _ retry _ wait : ( int , default 60 ) Maximum number of seconds to wait
when retrying .
redirections : ( int , default 5 ) Number of redirects to follow .
retry _ func : Function to handle retries on exceptions . Argument is an
ExceptionRetryArgs tuple .
check _ response _ func : Function to validate the HTTP response .
Arguments are ( Response , response content , url ) .
Raises :
InvalidDataFromServerError : if there is no response after retries .
Returns :
A Response object .""" | retry = 0
first_req_time = time . time ( )
while True :
try :
return _MakeRequestNoRetry ( http , http_request , redirections = redirections , check_response_func = check_response_func )
# retry _ func will consume the exception types it handles and raise .
# pylint : disable = broad - except
except Exception as e :
retry += 1
if retry >= retries :
raise
else :
total_wait_sec = time . time ( ) - first_req_time
retry_func ( ExceptionRetryArgs ( http , http_request , e , retry , max_retry_wait , total_wait_sec ) ) |
def fetch_zip ( url , output_path , feature_type , progress_dialog = None ) :
"""Download zip containing shp file and write to output _ path .
. . versionadded : : 3.2
: param url : URL of the zip bundle .
: type url : str
: param output _ path : Path of output file ,
: type output _ path : str
: param feature _ type : What kind of features should be downloaded .
Currently ' buildings ' , ' building - points ' or ' roads ' are supported .
: type feature _ type : str
: param progress _ dialog : A progress dialog .
: type progress _ dialog : QProgressDialog
: raises : ImportDialogError - when network error occurred""" | LOGGER . debug ( 'Downloading file from URL: %s' % url )
LOGGER . debug ( 'Downloading to: %s' % output_path )
if progress_dialog :
progress_dialog . show ( )
# Infinite progress bar when the server is fetching data .
# The progress bar will be updated with the file size later .
progress_dialog . setMaximum ( 0 )
progress_dialog . setMinimum ( 0 )
progress_dialog . setValue ( 0 )
# Get a pretty label from feature _ type , but not translatable
label_feature_type = feature_type . replace ( '-' , ' ' )
label_text = tr ( 'Fetching %s' % label_feature_type )
progress_dialog . setLabelText ( label_text )
# Download Process
downloader = FileDownloader ( url , output_path , progress_dialog )
try :
result = downloader . download ( )
except IOError as ex :
raise IOError ( ex )
if result [ 0 ] is not True :
_ , error_message = result
if result [ 0 ] == QNetworkReply . OperationCanceledError :
raise CanceledImportDialogError ( error_message )
else :
raise DownloadError ( error_message ) |
def _find_spellcheckable_chunks ( contents , comment_system ) :
"""Given some contents for a file , find chunks that can be spellchecked .
This applies the following rules :
1 . If the comment system comments individual lines , that whole line
can be spellchecked from the point of the comment
2 . If a comment - start marker or triple quote is found , keep going
until a comment end marker or matching triple quote is found .
3 . In both cases , ignore anything in triple backticks .""" | state = InTextParser ( )
comment_system_transitions = CommentSystemTransitions ( comment_system )
chunks = [ ]
for line_index , line in enumerate ( contents ) :
column = 0
line_len = len ( line )
escape_next = False
# We hit a new line . If we were waiting until the end of the line
# then add a new chunk in here
( state , column_delta , chunk_info ) = state . get_transition ( line , line_index , 0 , False , comment_system_transitions )
_maybe_append_chunk ( chunk_info , line_index - 1 , len ( contents [ line_index - 1 ] ) , contents , chunks )
column += column_delta
while column < line_len : # Check if the next character should be considered as escaped . That
# only happens if we are not escaped and the current character is
# a backslash .
is_escaped = escape_next
escape_next = not is_escaped and line [ column ] == "\\"
( state , column_delta , chunk_info ) = state . get_transition ( line , line_index , column , is_escaped , comment_system_transitions )
_maybe_append_chunk ( chunk_info , line_index , column , contents , chunks )
column += column_delta
last_line_index = len ( contents ) - 1
( state , column_delta , chunk_info ) = state . get_transition ( contents [ - 1 ] , last_line_index , len ( contents [ - 1 ] ) , False , comment_system_transitions , eof = True )
_maybe_append_chunk ( chunk_info , last_line_index , len ( contents [ last_line_index ] ) , contents , chunks )
return chunks |
def output_detailed ( paragraphs , fp = sys . stdout ) :
"""Same as output _ default , but only < p > tags are used and the following
attributes are added : class , cfclass and heading .""" | for paragraph in paragraphs :
output = '<p class="%s" cfclass="%s" heading="%i" xpath="%s"> %s' % ( paragraph . class_type , paragraph . cf_class , int ( paragraph . heading ) , paragraph . xpath , cgi . escape ( paragraph . text ) )
print ( output , file = fp ) |
def get_nameserver_detail_output_show_nameserver_nameserver_portid ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_nameserver_detail = ET . Element ( "get_nameserver_detail" )
config = get_nameserver_detail
output = ET . SubElement ( get_nameserver_detail , "output" )
show_nameserver = ET . SubElement ( output , "show-nameserver" )
nameserver_portid = ET . SubElement ( show_nameserver , "nameserver-portid" )
nameserver_portid . text = kwargs . pop ( 'nameserver_portid' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def dbmin10years ( self , value = None ) :
"""Corresponds to IDD Field ` dbmin10years `
10 - year return period values for minimum extreme dry - bulb temperature
Args :
value ( float ) : value for IDD Field ` dbmin10years `
Unit : C
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `dbmin10years`' . format ( value ) )
self . _dbmin10years = value |
def apply_T1 ( word ) :
'''There is a syllable boundary in front of every CV - sequence .''' | WORD = _split_consonants_and_vowels ( word )
for k , v in WORD . iteritems ( ) :
if k == 1 and is_consonantal_onset ( v ) :
WORD [ k ] = '.' + v
elif is_consonant ( v [ 0 ] ) and WORD . get ( k + 1 , 0 ) :
WORD [ k ] = v [ : - 1 ] + '.' + v [ - 1 ]
word = _compile_dict_into_word ( WORD )
return word |
def main ( ) :
"""Provide the program ' s entry point when directly executed .""" | if len ( sys . argv ) != 2 :
print ( "Usage: {} USERNAME" . format ( sys . argv [ 0 ] ) )
return 1
authenticator = prawcore . TrustedAuthenticator ( prawcore . Requestor ( "prawcore_read_only_example" ) , os . environ [ "PRAWCORE_CLIENT_ID" ] , os . environ [ "PRAWCORE_CLIENT_SECRET" ] , )
authorizer = prawcore . ReadOnlyAuthorizer ( authenticator )
authorizer . refresh ( )
user = sys . argv [ 1 ]
with prawcore . session ( authorizer ) as session :
data = session . request ( "GET" , "/api/v1/user/{}/trophies" . format ( user ) )
for trophy in data [ "data" ] [ "trophies" ] :
description = trophy [ "data" ] [ "description" ]
print ( trophy [ "data" ] [ "name" ] + ( " ({})" . format ( description ) if description else "" ) )
return 0 |
def flush ( self ) :
"""Send messages by e - mail .
The sending of messages is suppressed if a trigger severity
level has been set and none of the received messages was at
that level or above . In that case the messages are
discarded . Empty e - mails are discarded .""" | if self . triggered and len ( self . buffer ) > 0 : # Do not send empty e - mails
text = [ ]
for record in self . buffer :
terminator = getattr ( record , 'terminator' , '\n' )
s = self . format ( record )
if terminator is not None :
text . append ( s + terminator )
else :
text . append ( s )
msg = MIMEText ( '' . join ( text ) )
msg [ 'From' ] = self . fromAddr
msg [ 'To' ] = self . toAddr
msg [ 'Subject' ] = self . subject
# print ' BufferingSMTPHandler '
# print msg . as _ string ( )
smtp = smtplib . SMTP ( 'localhost' )
smtp . sendmail ( self . fromAddr , [ self . toAddr ] , msg . as_string ( ) )
smtp . quit ( )
self . buffer = [ ] |
def write_to_file ( self , f ) :
"""Write configuration to a file - like object .""" | for section , values in self . _values . iteritems ( ) :
try :
section_name , subsection_name = section
except ValueError :
( section_name , ) = section
subsection_name = None
if subsection_name is None :
f . write ( "[%s]\n" % section_name )
else :
f . write ( "[%s \"%s\"]\n" % ( section_name , subsection_name ) )
for key , value in values . iteritems ( ) :
f . write ( "%s = %s\n" % ( key , _escape_value ( value ) ) ) |
def _inject_format_spec ( self , value , format_spec ) :
"""value : ' { x } ' , format _ spec : ' f ' - > ' { x : f } '""" | t = type ( value )
return value [ : - 1 ] + t ( u':' ) + format_spec + t ( u'}' ) |
def create_index ( self , table_name , attr_name ) :
""": param str table _ name :
Table name that contains the attribute to be indexed .
: param str attr _ name : Attribute name to create index .
: raises IOError : | raises _ write _ permission |
: raises simplesqlite . NullDatabaseConnectionError :
| raises _ check _ connection |
: raises simplesqlite . TableNotFoundError :
| raises _ verify _ table _ existence |""" | self . verify_table_existence ( table_name )
self . validate_access_permission ( [ "w" , "a" ] )
query_format = "CREATE INDEX IF NOT EXISTS {index:s} ON {table}({attr})"
query = query_format . format ( index = make_index_name ( table_name , attr_name ) , table = Table ( table_name ) , attr = Attr ( attr_name ) , )
logger . debug ( query )
self . execute_query ( query , logging . getLogger ( ) . findCaller ( ) ) |
def get_to_run_checks ( self , do_checks = False , do_actions = False , poller_tags = None , reactionner_tags = None , worker_name = 'none' , module_types = None ) : # pylint : disable = too - many - branches
"""Get actions / checks for reactionner / poller
Called by the poller to get checks ( do _ checks = True ) and
by the reactionner ( do _ actions = True ) to get actions
: param do _ checks : do we get checks ?
: type do _ checks : bool
: param do _ actions : do we get actions ?
: type do _ actions : bool
: param poller _ tags : poller tags to filter
: type poller _ tags : list
: param reactionner _ tags : reactionner tags to filter
: type reactionner _ tags : list
: param worker _ name : worker name to fill check / action ( to remember it )
: type worker _ name : str
: param module _ types : module type to filter
: type module _ types : list
: return : Check / Action list with poller / reactionner tags matching and module type matching
: rtype : list""" | res = [ ]
now = time . time ( )
if poller_tags is None :
poller_tags = [ 'None' ]
if reactionner_tags is None :
reactionner_tags = [ 'None' ]
if module_types is None :
module_types = [ 'fork' ]
if not isinstance ( module_types , list ) :
module_types = [ module_types ]
# If a poller wants its checks
if do_checks :
if self . checks :
logger . debug ( "I have %d prepared checks" , len ( self . checks ) )
for check in list ( self . checks . values ( ) ) :
logger . debug ( "Check: %s (%s / %s)" , check . uuid , check . poller_tag , check . module_type )
if check . internal : # Do not care about Alignak internally executed checks
continue
# If the command is untagged , and the poller too , or if both are tagged
# with same name , go for it
# if do _ check , call for poller , and so poller _ tags by default is [ ' None ' ]
# by default poller _ tag is ' None ' and poller _ tags is [ ' None ' ]
# and same for module _ type , the default is the ' fork ' type
if check . poller_tag not in poller_tags :
logger . debug ( " -> poller tag do not match" )
continue
if check . module_type not in module_types :
logger . debug ( " -> module type do not match" )
continue
logger . debug ( " -> : %s %s (%s)" , 'worker' if not check . internal else 'internal' , check . status , 'now' if check . is_launchable ( now ) else 'not yet' )
if check . _is_orphan and check . status == ACT_STATUS_SCHEDULED and os . getenv ( 'ALIGNAK_LOG_CHECKS' , None ) :
logger . info ( "--ALC-- orphan check: %s -> : %s %s (%s)" , check , 'worker' if not check . internal else 'internal' , check . status , 'now' if check . is_launchable ( now ) else 'not yet' )
# must be ok to launch , and not an internal one ( business rules based )
if check . status == ACT_STATUS_SCHEDULED and check . is_launchable ( now ) :
logger . debug ( "Check to run: %s" , check )
check . status = ACT_STATUS_POLLED
check . my_worker = worker_name
res . append ( check )
# Stats
self . nb_checks_launched += 1
if 'ALIGNAK_LOG_ACTIONS' in os . environ :
if os . environ [ 'ALIGNAK_LOG_ACTIONS' ] == 'WARNING' :
logger . warning ( "Check to run: %s" , check )
else :
logger . info ( "Check to run: %s" , check )
if res :
logger . debug ( "-> %d checks to start now" , len ( res ) )
else :
logger . debug ( "-> no checks to start now" )
# If a reactionner wants its actions
if do_actions :
if self . actions :
logger . debug ( "I have %d prepared actions" , len ( self . actions ) )
for action in list ( self . actions . values ( ) ) :
logger . debug ( "Action: %s (%s / %s)" , action . uuid , action . reactionner_tag , action . module_type )
if action . internal : # Do not care about Alignak internally executed checks
continue
is_master = ( action . is_a == 'notification' and not action . contact )
if is_master :
continue
# if do _ action , call the reactionner ,
# and so reactionner _ tags by default is [ ' None ' ]
# by default reactionner _ tag is ' None ' and reactionner _ tags is [ ' None ' ] too
# So if not the good one , loop for next : )
if action . reactionner_tag not in reactionner_tags :
logger . debug ( " -> reactionner tag do not match" )
continue
# same for module _ type
if action . module_type not in module_types :
logger . debug ( " -> module type do not match" )
continue
# And now look if we can launch or not : )
logger . debug ( " -> : worker %s (%s)" , action . status , 'now' if action . is_launchable ( now ) else 'not yet' )
if action . _is_orphan and action . status == ACT_STATUS_SCHEDULED and os . getenv ( 'ALIGNAK_LOG_CHECKS' , None ) :
logger . info ( "--ALC-- orphan action: %s" , action )
if action . status == ACT_STATUS_SCHEDULED and action . is_launchable ( now ) : # This is for child notifications and eventhandlers
action . status = ACT_STATUS_POLLED
action . my_worker = worker_name
res . append ( action )
# Stats
self . nb_actions_launched += 1
if 'ALIGNAK_LOG_ACTIONS' in os . environ :
if os . environ [ 'ALIGNAK_LOG_ACTIONS' ] == 'WARNING' :
logger . warning ( "Action to run: %s" , action )
else :
logger . info ( "Action to run: %s" , action )
if res :
logger . debug ( "-> %d actions to start now" , len ( res ) )
else :
logger . debug ( "-> no actions to start now" )
return res |
def metalarchives ( song ) :
"""Returns the lyrics found in MetalArchives for the specified mp3 file or an
empty string if not found .""" | artist = normalize ( song . artist )
title = normalize ( song . title )
url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs'
url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1'
soup = get_url ( url , parser = 'json' )
if not soup :
return ''
song_id_re = re . compile ( r'lyricsLink_([0-9]*)' )
ids = set ( re . search ( song_id_re , a ) for sub in soup [ 'aaData' ] for a in sub )
if not ids :
return ''
if None in ids :
ids . remove ( None )
ids = map ( lambda a : a . group ( 1 ) , ids )
for song_id in ids :
url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}'
lyrics = get_url ( url . format ( song_id ) , parser = 'html' )
lyrics = lyrics . get_text ( ) . strip ( )
if not re . search ( 'lyrics not available' , lyrics ) :
return lyrics
return '' |
def _write_instance_repr ( out , visited , name , pyop_attrdict , address ) :
'''Shared code for use by old - style and new - style classes :
write a representation to file - like object " out "''' | out . write ( '<' )
out . write ( name )
# Write dictionary of instance attributes :
if isinstance ( pyop_attrdict , PyDictObjectPtr ) :
out . write ( '(' )
first = True
for pyop_arg , pyop_val in pyop_attrdict . iteritems ( ) :
if not first :
out . write ( ', ' )
first = False
out . write ( pyop_arg . proxyval ( visited ) )
out . write ( '=' )
pyop_val . write_repr ( out , visited )
out . write ( ')' )
out . write ( ' at remote 0x%x>' % address ) |
def add_minutes ( self , datetimestr , n ) :
"""Returns a time that n minutes after a time .
: param datetimestr : a datetime object or a datetime str
: param n : number of minutes , value can be negative
* * 中文文档 * *
返回给定日期N分钟之后的时间 。""" | a_datetime = self . parse_datetime ( datetimestr )
return a_datetime + timedelta ( seconds = 60 * n ) |
def parse_command_line ( self , args : List [ str ] = None , final : bool = True ) -> List [ str ] :
"""Parses all options given on the command line ( defaults to
` sys . argv ` ) .
Options look like ` ` - - option = value ` ` and are parsed according
to their ` ` type ` ` . For boolean options , ` ` - - option ` ` is
equivalent to ` ` - - option = true ` `
If the option has ` ` multiple = True ` ` , comma - separated values
are accepted . For multi - value integer options , the syntax
` ` x : y ` ` is also accepted and equivalent to ` ` range ( x , y ) ` ` .
Note that ` ` args [ 0 ] ` ` is ignored since it is the program name
in ` sys . argv ` .
We return a list of all arguments that are not parsed as options .
If ` ` final ` ` is ` ` False ` ` , parse callbacks will not be run .
This is useful for applications that wish to combine configurations
from multiple sources .""" | if args is None :
args = sys . argv
remaining = [ ]
# type : List [ str ]
for i in range ( 1 , len ( args ) ) : # All things after the last option are command line arguments
if not args [ i ] . startswith ( "-" ) :
remaining = args [ i : ]
break
if args [ i ] == "--" :
remaining = args [ i + 1 : ]
break
arg = args [ i ] . lstrip ( "-" )
name , equals , value = arg . partition ( "=" )
name = self . _normalize_name ( name )
if name not in self . _options :
self . print_help ( )
raise Error ( "Unrecognized command line option: %r" % name )
option = self . _options [ name ]
if not equals :
if option . type == bool :
value = "true"
else :
raise Error ( "Option %r requires a value" % name )
option . parse ( value )
if final :
self . run_parse_callbacks ( )
return remaining |
def command ( state , args ) :
"""Delete priority rule .""" | args = parser . parse_args ( args [ 1 : ] )
query . files . delete_priority_rule ( state . db , args . id )
del state . file_picker |
def user_cache_dir ( appname = None , appauthor = None , version = None , opinion = True , as_path = False ) :
r"""Return full path to the user - specific cache dir for this application .
" appname " is the name of application .
If None , just the system directory is returned .
" appauthor " ( only used on Windows ) is the name of the
appauthor or distributing body for this application . Typically
it is the owning company name . This falls back to appname . You may
pass False to disable it .
" version " is an optional version path element to append to the
path . You might want to use this if you want multiple versions
of your app to be able to run independently . If used , this
would typically be " < major > . < minor > " .
Only applied when appname is present .
" opinion " ( boolean ) can be False to disable the appending of
" Cache " to the base app data dir for Windows . See
discussion below .
" as _ path " ( boolean , default False ) can be set to True to get pathlib . Path
objects instead of plain strings . On python 2.7 you need to
" pip install pathlib2 " .
Typical user cache directories are :
Mac OS X : ~ / Library / Caches / < AppName >
Unix : ~ / . cache / < AppName > ( XDG default )
Win XP : C : \ Documents and Settings \ < username > \ Local Settings \ Application Data \ < AppAuthor > \ < AppName > \ Cache
Vista : C : \ Users \ < username > \ AppData \ Local \ < AppAuthor > \ < AppName > \ Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the ` CSIDL _ LOCAL _ APPDATA ` directory . This is identical to the non - roaming
app data dir ( the default returned by ` user _ data _ dir ` above ) . Apps typically
put cache data somewhere * under * the given dir here . Some examples :
. . . \ Mozilla \ Firefox \ Profiles \ < ProfileName > \ Cache
. . . \ Acme \ SuperApp \ Cache \ 1.0
OPINION : This function appends " Cache " to the ` CSIDL _ LOCAL _ APPDATA ` value .
This can be disabled with the ` opinion = False ` option .""" | if system == "win32" :
if appauthor is None :
appauthor = appname
path = os . path . normpath ( _get_win_folder ( "CSIDL_LOCAL_APPDATA" ) )
if appname :
if appauthor is not False :
path = os . path . join ( path , appauthor , appname )
else :
path = os . path . join ( path , appname )
if opinion :
path = os . path . join ( path , "Cache" )
elif system == 'darwin' and not os . getenv ( 'XDG_CACHE_HOME' ) :
path = os . path . expanduser ( '~/Library/Caches' )
if appname :
path = os . path . join ( path , appname )
else :
path = os . getenv ( 'XDG_CACHE_HOME' ) or os . path . expanduser ( '~/.cache' )
if appname :
path = os . path . join ( path , appname )
if appname and version :
path = os . path . join ( path , version )
if as_path :
path = Path ( path )
return path |
def feature_info ( self ) :
"""Returns information about the features available for the CPC of this
partition .
Authorization requirements :
* Object - access permission to this partition .
Returns :
: term : ` iterable ` :
An iterable where each item represents one feature that is
available for the CPC of this partition .
Each item is a dictionary with the following items :
* ` name ` ( : term : ` unicode string ` ) : Name of the feature .
* ` description ` ( : term : ` unicode string ` ) : Short description of
the feature .
* ` state ` ( bool ) : Enablement state of the feature ( ` True ` if the
enabled , ` False ` if disabled ) .
Raises :
: exc : ` ValueError ` : Features are not supported on the HMC .
: exc : ` ~ zhmcclient . HTTPError `
: exc : ` ~ zhmcclient . ParseError `
: exc : ` ~ zhmcclient . AuthError `
: exc : ` ~ zhmcclient . ConnectionError `""" | feature_list = self . prop ( 'available-features-list' , None )
if feature_list is None :
raise ValueError ( "Firmware features are not supported on CPC %s" % self . manager . cpc . name )
return feature_list |
def make_confidence_report_spsa ( filepath , train_start = TRAIN_START , train_end = TRAIN_END , test_start = TEST_START , test_end = TEST_END , batch_size = BATCH_SIZE , which_set = WHICH_SET , report_path = REPORT_PATH , nb_iter = NB_ITER_SPSA , spsa_samples = SPSA_SAMPLES , spsa_iters = SPSA . DEFAULT_SPSA_ITERS ) :
"""Load a saved model , gather its predictions , and save a confidence report .
This function works by running a single MaxConfidence attack on each example ,
using SPSA as the underyling optimizer .
This is not intended to be a strong generic attack .
It is intended to be a test to uncover gradient masking .
: param filepath : path to model to evaluate
: param train _ start : index of first training set example to use
: param train _ end : index of last training set example to use
: param test _ start : index of first test set example to use
: param test _ end : index of last test set example to use
: param batch _ size : size of evaluation batches
: param which _ set : ' train ' or ' test '
: param nb _ iter : Number of iterations of PGD to run per class
: param spsa _ samples : Number of samples for SPSA""" | # Set TF random seed to improve reproducibility
tf . set_random_seed ( 1234 )
# Set logging level to see debug information
set_log_level ( logging . INFO )
# Create TF session
sess = tf . Session ( )
if report_path is None :
assert filepath . endswith ( '.joblib' )
report_path = filepath [ : - len ( '.joblib' ) ] + "_spsa_report.joblib"
with sess . as_default ( ) :
model = load ( filepath )
assert len ( model . get_params ( ) ) > 0
factory = model . dataset_factory
factory . kwargs [ 'train_start' ] = train_start
factory . kwargs [ 'train_end' ] = train_end
factory . kwargs [ 'test_start' ] = test_start
factory . kwargs [ 'test_end' ] = test_end
dataset = factory ( )
center = dataset . kwargs [ 'center' ]
center = np . float32 ( center )
max_val = dataset . kwargs [ 'max_val' ]
max_val = np . float32 ( max_val )
value_range = max_val * ( 1. + center )
min_value = np . float32 ( 0. - center * max_val )
if 'CIFAR' in str ( factory . cls ) :
base_eps = 8. / 255.
elif 'MNIST' in str ( factory . cls ) :
base_eps = .3
else :
raise NotImplementedError ( str ( factory . cls ) )
eps = np . float32 ( base_eps * value_range )
clip_min = min_value
clip_max = max_val
x_data , y_data = dataset . get_set ( which_set )
nb_classes = dataset . NB_CLASSES
spsa_max_confidence_recipe ( sess , model , x_data , y_data , nb_classes , eps , clip_min , clip_max , nb_iter , report_path , spsa_samples = spsa_samples , spsa_iters = spsa_iters , eval_batch_size = batch_size ) |
def add_edges_from ( self , ebunch , ** kwargs ) :
"""Add all the edges in ebunch .
If nodes referred in the ebunch are not already present , they
will be automatically added . Node names can be any hashable python object .
Parameters
ebunch : list , array - like
List of edges to add . Each edge must be of the form of
( ( start , time _ slice ) , ( end , time _ slice ) ) .
Examples
> > > from pgmpy . models import DynamicBayesianNetwork as DBN
> > > dbn = DBN ( )
> > > dbn . add _ edges _ from ( [ ( ( ' D ' , 0 ) , ( ' G ' , 0 ) ) , ( ( ' I ' , 0 ) , ( ' G ' , 0 ) ) ] )
> > > dbn . nodes ( )
[ ' G ' , ' I ' , ' D ' ]
> > > dbn . edges ( )
[ ( ( ' D ' , 1 ) , ( ' G ' , 1 ) ) ,
( ( ' I ' , 1 ) , ( ' G ' , 1 ) ) ,
( ( ' D ' , 0 ) , ( ' G ' , 0 ) ) ,
( ( ' I ' , 0 ) , ( ' G ' , 0 ) ) ]""" | for edge in ebunch :
self . add_edge ( edge [ 0 ] , edge [ 1 ] ) |
def truncate ( self , branch , turn , tick ) :
"""Delete all data after ( not on ) a specific tick""" | parents , branches , keys , settings , presettings , keycache , send = self . _truncate_stuff
def truncate_branhc ( branhc ) :
if turn in branhc :
trn = branhc [ turn ]
trn . truncate ( tick )
branhc . truncate ( turn )
if not trn :
del branhc [ turn ]
else :
branhc . truncate ( turn )
for entities in parents . values ( ) :
for keys in entities . values ( ) :
for branches in keys . values ( ) :
if branch not in branches :
continue
truncate_branhc ( branches [ branch ] )
for branches in branches . values ( ) :
if branch not in branches :
continue
truncate_branhc ( branches [ branch ] )
for keys in keys . values ( ) :
for branches in keys . values ( ) :
if branch not in branches :
continue
truncate_branhc ( branches [ branch ] )
truncate_branhc ( settings [ branch ] )
truncate_branhc ( presettings [ branch ] )
self . shallowest = OrderedDict ( )
for entity_branch in keycache :
if entity_branch [ - 1 ] == branch :
truncate_branhc ( keycache [ entity_branch ] )
send ( self , branch = branch , turn = turn , tick = tick , action = 'truncate' ) |
def _setup_edge ( self , capabilities ) :
"""Setup Edge webdriver
: param capabilities : capabilities object
: returns : a new local Edge driver""" | edge_driver = self . config . get ( 'Driver' , 'edge_driver_path' )
self . logger . debug ( "Edge driver path given in properties: %s" , edge_driver )
return webdriver . Edge ( edge_driver , capabilities = capabilities ) |
def set_path ( self , path ) :
'''Sets the listitem ' s path''' | self . _path = path
return self . _listitem . setPath ( path ) |
def lv_load_areas ( self ) :
"""Returns a generator for iterating over load _ areas
Yields
int
generator for iterating over load _ areas""" | for load_area in sorted ( self . _lv_load_areas , key = lambda _ : repr ( _ ) ) :
yield load_area |
def to_ufo_family_user_data ( self , ufo ) :
"""Set family - wide user data as Glyphs does .""" | if not self . use_designspace :
ufo . lib [ FONT_USER_DATA_KEY ] = dict ( self . font . userData ) |
def compare_cells ( self , cell1 , cell2 ) :
'''return true if exactly equal or if equal but modified ,
otherwise return false
return type : BooleanPlus''' | eqlanguage = cell1 [ "language" ] == cell2 [ "language" ]
eqinput = cell1 [ "input" ] == cell2 [ "input" ]
eqoutputs = self . equaloutputs ( cell1 [ "outputs" ] , cell2 [ "outputs" ] )
if eqlanguage and eqinput and eqoutputs :
return BooleanPlus ( True , False )
elif not self . check_modified :
return BooleanPlus ( False , False )
input1 = u"" . join ( cell1 [ 'input' ] )
input2 = u"" . join ( cell2 [ 'input' ] )
similarity_percent = Levenshtein . ratio ( input1 , input2 )
if similarity_percent >= 0.65 :
return BooleanPlus ( True , True )
return BooleanPlus ( False , False ) |
def validate_account_id ( sts_client , account_id ) :
"""Exit if get _ caller _ identity doesn ' t match account _ id .""" | resp = sts_client . get_caller_identity ( )
if 'Account' in resp :
if resp [ 'Account' ] == account_id :
LOGGER . info ( 'Verified current AWS account matches required ' 'account id %s.' , account_id )
else :
LOGGER . error ( 'Current AWS account %s does not match ' 'required account %s in Runway config.' , resp [ 'Account' ] , account_id )
sys . exit ( 1 )
else :
LOGGER . error ( 'Error checking current account ID' )
sys . exit ( 1 ) |
def _check_input ( self , X ) :
"""Check the input for validity .
Ensures that the input data , X , is a 2 - dimensional matrix , and that
the second dimension of this matrix has the same dimensionality as
the weight matrix .""" | if np . ndim ( X ) == 1 :
X = np . reshape ( X , ( 1 , - 1 ) )
if X . ndim != 2 :
raise ValueError ( "Your data is not a 2D matrix. " "Actual size: {0}" . format ( X . shape ) )
if X . shape [ 1 ] != self . data_dimensionality :
raise ValueError ( "Your data size != weight dim: {0}, " "expected {1}" . format ( X . shape [ 1 ] , self . data_dimensionality ) )
return X |
def summary ( self , indicator_data ) :
"""Return a summary value for any given indicator type .""" | summary = [ ]
for v in self . _value_fields :
summary . append ( indicator_data . get ( v , '' ) )
return indicator_data . get ( 'summary' , ' : ' . join ( summary ) ) |
def equals ( self , junc ) :
"""test equality with another junction""" | if self . left . equals ( junc . left ) :
return False
if self . right . equals ( junc . right ) :
return False
return True |
def delete ( self ) :
'''method to remove version from resource ' s history''' | # send patch
response = self . resource . repo . api . http_request ( 'DELETE' , self . uri )
# if response 204
if response . status_code == 204 :
logger . debug ( 'deleting previous version of resource, %s' % self . uri )
# remove from resource versions
delattr ( self . _current_resource . versions , self . label )
# if 400 , likely most recent version and cannot remove
elif response . status_code == 400 :
raise Exception ( 'HTTP 400, likely most recent resource version which cannot be removed' )
else :
raise Exception ( 'HTTP %s, could not delete resource version: %s' % ( response . status_code , self . uri ) ) |
def is_excluded ( root , excludes ) : # type : ( unicode , List [ unicode ] ) - > bool
"""Check if the directory is in the exclude list .
Note : by having trailing slashes , we avoid common prefix issues , like
e . g . an exlude " foo " also accidentally excluding " foobar " .""" | for exclude in excludes :
if fnmatch ( root , exclude ) :
return True
return False |
def encrypt_file ( self , path , output_path = None , overwrite = False , enable_verbose = True ) :
"""Encrypt a file using rsa .
RSA for big file encryption is very slow . For big file , I recommend
to use symmetric encryption and use RSA to encrypt the password .""" | path , output_path = files . process_dst_overwrite_args ( src = path , dst = output_path , overwrite = overwrite , src_to_dst_func = files . get_encrpyted_path , )
with open ( path , "rb" ) as infile , open ( output_path , "wb" ) as outfile :
encrypt_bigfile ( infile , outfile , self . his_pubkey ) |
def generate_model_cls ( config , schema , model_name , raml_resource , es_based = True ) :
"""Generate model class .
Engine DB field types are determined using ` type _ fields ` and only those
types may be used .
: param schema : Model schema dict parsed from RAML .
: param model _ name : String that is used as new model ' s name .
: param raml _ resource : Instance of ramlfications . raml . ResourceNode .
: param es _ based : Boolean indicating if generated model should be a
subclass of Elasticsearch - based document class or not .
It True , ESBaseDocument is used ; BaseDocument is used otherwise .
Defaults to True .""" | from nefertari . authentication . models import AuthModelMethodsMixin
base_cls = engine . ESBaseDocument if es_based else engine . BaseDocument
model_name = str ( model_name )
metaclass = type ( base_cls )
auth_model = schema . get ( '_auth_model' , False )
bases = [ ]
if config . registry . database_acls :
from nefertari_guards import engine as guards_engine
bases . append ( guards_engine . DocumentACLMixin )
if auth_model :
bases . append ( AuthModelMethodsMixin )
bases . append ( base_cls )
attrs = { '__tablename__' : model_name . lower ( ) , '_public_fields' : schema . get ( '_public_fields' ) or [ ] , '_auth_fields' : schema . get ( '_auth_fields' ) or [ ] , '_hidden_fields' : schema . get ( '_hidden_fields' ) or [ ] , '_nested_relationships' : schema . get ( '_nested_relationships' ) or [ ] , }
if '_nesting_depth' in schema :
attrs [ '_nesting_depth' ] = schema . get ( '_nesting_depth' )
# Generate fields from properties
properties = schema . get ( 'properties' , { } )
for field_name , props in properties . items ( ) :
if field_name in attrs :
continue
db_settings = props . get ( '_db_settings' )
if db_settings is None :
continue
field_kwargs = db_settings . copy ( )
field_kwargs [ 'required' ] = bool ( field_kwargs . get ( 'required' ) )
for default_attr_key in ( 'default' , 'onupdate' ) :
value = field_kwargs . get ( default_attr_key )
if is_callable_tag ( value ) :
field_kwargs [ default_attr_key ] = resolve_to_callable ( value )
type_name = ( field_kwargs . pop ( 'type' , 'string' ) or 'string' ) . lower ( )
if type_name not in type_fields :
raise ValueError ( 'Unknown type: {}' . format ( type_name ) )
field_cls = type_fields [ type_name ]
if field_cls is engine . Relationship :
prepare_relationship ( config , field_kwargs [ 'document' ] , raml_resource )
if field_cls is engine . ForeignKeyField :
key = 'ref_column_type'
field_kwargs [ key ] = type_fields [ field_kwargs [ key ] ]
if field_cls is engine . ListField :
key = 'item_type'
field_kwargs [ key ] = type_fields [ field_kwargs [ key ] ]
attrs [ field_name ] = field_cls ( ** field_kwargs )
# Update model definition with methods and variables defined in registry
attrs . update ( registry . mget ( model_name ) )
# Generate new model class
model_cls = metaclass ( model_name , tuple ( bases ) , attrs )
setup_model_event_subscribers ( config , model_cls , schema )
setup_fields_processors ( config , model_cls , schema )
return model_cls , auth_model |
def _linux_disks ( ) :
'''Return list of disk devices and work out if they are SSD or HDD .''' | ret = { 'disks' : [ ] , 'SSDs' : [ ] }
for entry in glob . glob ( '/sys/block/*/queue/rotational' ) :
try :
with salt . utils . files . fopen ( entry ) as entry_fp :
device = entry . split ( '/' ) [ 3 ]
flag = entry_fp . read ( 1 )
if flag == '0' :
ret [ 'SSDs' ] . append ( device )
log . trace ( 'Device %s reports itself as an SSD' , device )
elif flag == '1' :
ret [ 'disks' ] . append ( device )
log . trace ( 'Device %s reports itself as an HDD' , device )
else :
log . trace ( 'Unable to identify device %s as an SSD or HDD. It does ' 'not report 0 or 1' , device )
except IOError :
pass
return ret |
def _ParseProcessingOptions ( self , options ) :
"""Parses the processing options .
Args :
options ( argparse . Namespace ) : command line arguments .
Raises :
BadConfigOption : if the options are invalid .""" | self . _single_process_mode = getattr ( options , 'single_process' , False )
argument_helper_names = [ 'process_resources' , 'temporary_directory' , 'workers' , 'zeromq' ]
helpers_manager . ArgumentHelperManager . ParseOptions ( options , self , names = argument_helper_names ) |
def get_help_html ( message = None ) :
"""Create the HTML content for the help dialog or for external browser
: param message : An optional message object to display in the dialog .
: type message : Message . Message
: return : the help HTML content
: rtype : str""" | html = html_help_header ( )
if message is None :
message = dock_help ( )
html += message . to_html ( )
html += html_footer ( )
return html |
def external_editor ( self , filename , goto = - 1 ) :
"""Edit in an external editor
Recommended : SciTE ( e . g . to go to line where an error did occur )""" | editor_path = CONF . get ( 'internal_console' , 'external_editor/path' )
goto_option = CONF . get ( 'internal_console' , 'external_editor/gotoline' )
try :
args = [ filename ]
if goto > 0 and goto_option :
args . append ( '%s%d' . format ( goto_option , goto ) )
programs . run_program ( editor_path , args )
except OSError :
self . write_error ( "External editor was not found:" " %s\n" % editor_path ) |
def _get_client_address ( self , req ) :
"""Get address from ` ` X - Forwarded - For ` ` header or use remote address .
Remote address is used if the ` ` X - Forwarded - For ` ` header is not
available . Note that this may not be safe to depend on both without
proper authorization backend .
Args :
req ( falcon . Request ) : falcon . Request object .
Returns :
str : client address .""" | try :
forwarded_for = req . get_header ( 'X-Forwarded-For' , True )
return forwarded_for . split ( ',' ) [ 0 ] . strip ( )
except ( KeyError , HTTPMissingHeader ) :
return ( req . env . get ( 'REMOTE_ADDR' ) if self . remote_address_fallback else None ) |
def unhold ( name = None , pkgs = None , ** kwargs ) :
'''Remove specified package lock .
root
operate on a different root directory .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove _ lock < package name >
salt ' * ' pkg . remove _ lock < package1 > , < package2 > , < package3 >
salt ' * ' pkg . remove _ lock pkgs = ' [ " foo " , " bar " ] ' ''' | ret = { }
root = kwargs . get ( 'root' )
if ( not name and not pkgs ) or ( name and pkgs ) :
raise CommandExecutionError ( 'Name or packages must be specified.' )
elif name :
pkgs = [ name ]
locks = list_locks ( root )
try :
pkgs = list ( __salt__ [ 'pkg_resource.parse_targets' ] ( pkgs ) [ 0 ] . keys ( ) )
except MinionError as exc :
raise CommandExecutionError ( exc )
removed = [ ]
missing = [ ]
for pkg in pkgs :
if locks . get ( pkg ) :
removed . append ( pkg )
ret [ pkg ] [ 'comment' ] = 'Package {0} is no longer held.' . format ( pkg )
else :
missing . append ( pkg )
ret [ pkg ] [ 'comment' ] = 'Package {0} unable to be unheld.' . format ( pkg )
if removed :
__zypper__ ( root = root ) . call ( 'rl' , * removed )
return ret |
def write_dataframe ( self , df , path , format = 'csv' ) :
"""Write a pandas DataFrame to indicated file path ( default : HDFS ) in the
indicated format
Parameters
df : DataFrame
path : string
Absolute output path
format : { ' csv ' } , default ' csv '
Returns
None ( for now )""" | from ibis . impala . pandas_interop import DataFrameWriter
writer = DataFrameWriter ( self , df )
return writer . write_csv ( path ) |
def sync_local_to_set ( org , syncer , remote_set ) :
"""Syncs an org ' s set of local instances of a model to match the set of remote objects . Local objects not in the remote
set are deleted .
: param org : the org
: param * syncer : the local model syncer
: param remote _ set : the set of remote objects
: return : tuple of number of local objects created , updated , deleted and ignored""" | outcome_counts = defaultdict ( int )
remote_identities = set ( )
for remote in remote_set :
outcome = sync_from_remote ( org , syncer , remote )
outcome_counts [ outcome ] += 1
remote_identities . add ( syncer . identify_remote ( remote ) )
# active local objects which weren ' t in the remote set need to be deleted
active_locals = syncer . fetch_all ( org ) . filter ( is_active = True )
delete_locals = active_locals . exclude ( ** { syncer . local_id_attr + "__in" : remote_identities } )
for local in delete_locals :
with syncer . lock ( org , syncer . identify_local ( local ) ) :
syncer . delete_local ( local )
outcome_counts [ SyncOutcome . deleted ] += 1
return ( outcome_counts [ SyncOutcome . created ] , outcome_counts [ SyncOutcome . updated ] , outcome_counts [ SyncOutcome . deleted ] , outcome_counts [ SyncOutcome . ignored ] , ) |
def get_annotation_entries_by_names ( self , url : str , names : Iterable [ str ] ) -> List [ NamespaceEntry ] :
"""Get annotation entries by URL and names .
: param url : The url of the annotation source
: param names : The names of the annotation entries from the given url ' s document""" | annotation_filter = and_ ( Namespace . url == url , NamespaceEntry . name . in_ ( names ) )
return self . session . query ( NamespaceEntry ) . join ( Namespace ) . filter ( annotation_filter ) . all ( ) |
def _wrap_measure ( individual_group_measure_process , group_measure , loaded_processes ) :
"""Creates a function on a state _ collection , which creates analysis _ collections for each group in the collection .""" | def wrapped_measure ( state_collection , overriding_parameters = None , loggers = None ) :
if loggers == None :
loggers = funtool . logger . set_default_loggers ( )
if loaded_processes != None :
if group_measure . grouping_selectors != None :
for grouping_selector_name in group_measure . grouping_selectors :
state_collection = funtool . state_collection . add_grouping ( state_collection , grouping_selector_name , loaded_processes )
for group in funtool . state_collection . groups_in_grouping ( state_collection , grouping_selector_name ) :
analysis_collection = funtool . analysis . AnalysisCollection ( None , group , { } , { } )
if group_measure . analysis_selectors != None :
for analysis_selector in group_measure . analysis_selectors :
analysis_collection = loaded_processes [ "analysis_selector" ] [ analysis_selector ] . process_function ( analysis_collection , state_collection )
if analysis_collection != None :
individual_group_measure_process ( analysis_collection , state_collection )
return state_collection
return wrapped_measure |
def parse_stack_pointer ( sp ) :
"""Convert multiple supported forms of stack pointer representations into stack offsets .
: param sp : A stack pointer representation .
: return : A stack pointer offset .
: rtype : int""" | if isinstance ( sp , int ) :
return sp
if isinstance ( sp , StackBaseOffset ) :
return sp . offset
if isinstance ( sp , BinaryOp ) :
op0 , op1 = sp . operands
off0 = parse_stack_pointer ( op0 )
off1 = parse_stack_pointer ( op1 )
if sp . op == "Sub" :
return off0 - off1
elif sp . op == "Add" :
return off0 + off1
raise NotImplementedError ( "Unsupported stack pointer representation type %s." % type ( sp ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.