signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def run_filter_query ( self , resource_name , filter_clause ) :
"""run a query ( get ) against the CLUE api , using the API and user key fields of self and the fitler _ clause provided
Args :
resource _ name : str - name of the resource / collection to query - e . g . genes , perts , cells etc .
filter _ clause : dictionary - contains filter to pass to API to ; uses loopback specification
Returns : list of dictionaries containing the results of the query""" | url = self . base_url + "/" + resource_name
params = { "filter" : json . dumps ( filter_clause ) }
r = requests . get ( url , headers = self . headers , params = params )
logger . debug ( "requests.get result r.status_code: {}" . format ( r . status_code ) )
ClueApiClient . _check_request_response ( r )
return r . json ( ) |
def get ( self , word , default = nil ) :
"""Retrieves output value associated with word .
If there is no word returns default value ,
and if default is not given rises KeyError .""" | node = self . __get_node ( word )
output = nil
if node :
output = node . output
if output is nil :
if default is nil :
raise KeyError ( "no key '%s'" % word )
else :
return default
else :
return output |
def get ( cls , name ) :
"""returns the time of the timer .
: param name : the name of the timer
: type name : string
: rtype : the elapsed time""" | if name in cls . timer_end :
cls . timer_elapsed [ name ] = cls . timer_end [ name ] - cls . timer_start [ name ]
return cls . timer_elapsed [ name ]
else :
return "undefined" |
def get_rows ( self , indexes , column , as_list = False ) :
"""For a list of indexes and a single column name return the values of the indexes in that column .
: param indexes : either a list of index values or a list of booleans with same length as all indexes
: param column : single column name
: param as _ list : if True return a list , if False return DataFrame
: return : DataFrame is as _ list if False , a list if as _ list is True""" | c = self . _columns . index ( column )
if all ( [ isinstance ( i , bool ) for i in indexes ] ) : # boolean list
if len ( indexes ) != len ( self . _index ) :
raise ValueError ( 'boolean index list must be same size of existing index' )
if all ( indexes ) : # the entire column
data = self . _data [ c ]
index = self . _index
else :
data = list ( compress ( self . _data [ c ] , indexes ) )
index = list ( compress ( self . _index , indexes ) )
else : # index values list
locations = [ sorted_index ( self . _index , x ) for x in indexes ] if self . _sort else [ self . _index . index ( x ) for x in indexes ]
data = [ self . _data [ c ] [ i ] for i in locations ]
index = [ self . _index [ i ] for i in locations ]
return data if as_list else DataFrame ( data = { column : data } , index = index , index_name = self . _index_name , sort = self . _sort ) |
def report_factory ( app , report_name , ** kwargs ) :
"""Report instance factory populating boilerplate raw data
Args :
app ( App ) : Swimlane App instance
report _ name ( str ) : Generated Report name
Keyword Args
* * kwargs : Kwargs to pass to the Report class""" | # pylint : disable = protected - access
created = pendulum . now ( ) . to_rfc3339_string ( )
user_model = app . _swimlane . user . as_usergroup_selection ( )
return Report ( app , { "$type" : Report . _type , "groupBys" : [ ] , "aggregates" : [ ] , "applicationIds" : [ app . id ] , "columns" : [ ] , "sorts" : { "$type" : "System.Collections.Generic.Dictionary`2" "[[System.String, mscorlib]," "[Core.Models.Search.SortTypes, Core]], mscorlib" , } , "filters" : [ ] , "defaultSearchReport" : False , "allowed" : [ ] , "permissions" : { "$type" : "Core.Models.Security.PermissionMatrix, Core" } , "createdDate" : created , "modifiedDate" : created , "createdByUser" : user_model , "modifiedByUser" : user_model , "id" : None , "name" : report_name , "disabled" : False , "keywords" : "" } , ** kwargs ) |
def add_port_fwd ( zone , src , dest , proto = 'tcp' , dstaddr = '' , permanent = True ) :
'''Add port forwarding .
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt ' * ' firewalld . add _ port _ fwd public 80 443 tcp''' | cmd = '--zone={0} --add-forward-port=port={1}:proto={2}:toport={3}:toaddr={4}' . format ( zone , src , proto , dest , dstaddr )
if permanent :
cmd += ' --permanent'
return __firewall_cmd ( cmd ) |
def _get_desired_deployment_id ( self ) :
'''Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api _ name , swagger _ file''' | deployments = __salt__ [ 'boto_apigateway.describe_api_deployments' ] ( restApiId = self . restApiId , ** self . _common_aws_args ) . get ( 'deployments' )
if deployments :
for deployment in deployments :
if deployment . get ( 'description' ) == self . deployment_label_json :
return deployment . get ( 'id' )
return '' |
def bulk_update_resourcedata ( scenario_ids , resource_scenarios , ** kwargs ) :
"""Update the data associated with a list of scenarios .""" | user_id = kwargs . get ( 'user_id' )
res = None
res = { }
net_ids = db . DBSession . query ( Scenario . network_id ) . filter ( Scenario . id . in_ ( scenario_ids ) ) . all ( )
if len ( set ( net_ids ) ) != 1 :
raise HydraError ( "Scenario IDS are not in the same network" )
for scenario_id in scenario_ids :
_check_can_edit_scenario ( scenario_id , kwargs [ 'user_id' ] )
scen_i = _get_scenario ( scenario_id , user_id )
res [ scenario_id ] = [ ]
for rs in resource_scenarios :
if rs . dataset is not None :
updated_rs = _update_resourcescenario ( scen_i , rs , user_id = user_id , source = kwargs . get ( 'app_name' ) )
res [ scenario_id ] . append ( updated_rs )
else :
_delete_resourcescenario ( scenario_id , rs . resource_attr_id )
db . DBSession . flush ( )
return res |
def do_local_server_auth_flow ( session_params = None , force_new_client = False ) :
"""Starts a local http server , opens a browser to have the user authenticate ,
and gets the code redirected to the server ( no copy and pasting required )""" | session_params = session_params or { }
# start local server and create matching redirect _ uri
with start_local_server ( listen = ( "127.0.0.1" , 0 ) ) as server :
_ , port = server . socket . getsockname ( )
redirect_uri = "http://localhost:{}" . format ( port )
# get the ConfidentialApp client object and start a flow
auth_client = internal_auth_client ( requires_instance = True , force_new_client = force_new_client )
auth_client . oauth2_start_flow ( refresh_tokens = True , redirect_uri = redirect_uri , requested_scopes = SCOPES )
additional_params = { "prompt" : "login" }
additional_params . update ( session_params )
url = auth_client . oauth2_get_authorize_url ( additional_params = additional_params )
# open web - browser for user to log in , get auth code
webbrowser . open ( url , new = 1 )
auth_code = server . wait_for_code ( )
if isinstance ( auth_code , LocalServerError ) :
safeprint ( "Authorization failed: {}" . format ( auth_code ) , write_to_stderr = True )
click . get_current_context ( ) . exit ( 1 )
elif isinstance ( auth_code , Exception ) :
safeprint ( "Authorization failed with unexpected error:\n{}" . format ( auth_code ) , write_to_stderr = True , )
click . get_current_context ( ) . exit ( 1 )
# finish auth flow and return true
exchange_code_and_store_config ( auth_client , auth_code )
return True |
def add_file ( self , src , dest = None ) :
"""Add the file at ` ` src ` ` to the archive .
If ` ` dest ` ` is ` ` None ` ` then it is added under just the original
filename . So ` ` add _ file ( ' foo / bar . txt ' ) ` ` ends up at ` ` bar . txt ` ` in the
archive , while ` ` add _ file ( ' bar . txt ' , ' foo / bar . txt ' ) ` ` ends up at
` ` foo / bar . txt ` ` .""" | dest = dest or os . path . basename ( src )
with open ( src , 'rb' ) as fp :
contents = fp . read ( )
self . add_contents ( dest , contents ) |
def add_node ( self , payload ) :
"""Returns
int
Identifier for the inserted node .""" | self . nodes . append ( Node ( len ( self . nodes ) , payload ) )
return len ( self . nodes ) - 1 |
def get_dos ( self , partial_dos = False , npts_mu = 10000 , T = None ) :
"""Return a Dos object interpolating bands
Args :
partial _ dos : if True , projections will be interpolated as well
and partial doses will be return . Projections must be available
in the loader .
npts _ mu : number of energy points of the Dos
T : parameter used to smooth the Dos""" | spin = self . data . spin if isinstance ( self . data . spin , int ) else 1
energies , densities , vvdos , cdos = BL . BTPDOS ( self . eband , self . vvband , npts = npts_mu )
if T is not None :
densities = BL . smoothen_DOS ( energies , densities , T )
tdos = Dos ( self . efermi / units . eV , energies / units . eV , { Spin ( spin ) : densities } )
if partial_dos :
tdos = self . get_partial_doses ( tdos = tdos , npts_mu = npts_mu , T = T )
return tdos |
def ToJson ( self ) :
"""Convert object members to a dictionary that can be parsed as JSON .
Returns :
dict :""" | items = [ ]
for i in self . Items :
items . append ( { 'index' : i . index , 'height' : i . height } )
return { 'version' : self . StateVersion , 'txHash' : self . TransactionHash . ToString ( ) , 'txHeight' : self . TransactionHeight , 'items' : items } |
def get_header ( self ) :
"""Returns the header for screen logging of the minimization""" | result = " "
if self . step_rms is not None :
result += " Step RMS"
if self . step_max is not None :
result += " Step MAX"
if self . grad_rms is not None :
result += " Grad RMS"
if self . grad_max is not None :
result += " Grad MAX"
if self . rel_grad_rms is not None :
result += " Grad/F RMS"
if self . rel_grad_max is not None :
result += " Grad/F MAX"
return result |
def _pfp__parse ( self , stream , save_offset = False ) :
"""Parse the IO stream for this numeric field
: stream : An IO stream that can be read from
: returns : The number of bytes parsed""" | if save_offset :
self . _pfp__offset = stream . tell ( )
if self . bitsize is None :
raw_data = stream . read ( self . width )
data = utils . binary ( raw_data )
else :
bits = self . bitfield_rw . read_bits ( stream , self . bitsize , self . bitfield_padded , self . bitfield_left_right , self . endian )
width_diff = self . width - ( len ( bits ) // 8 ) - 1
bits_diff = 8 - ( len ( bits ) % 8 )
padding = [ 0 ] * ( width_diff * 8 + bits_diff )
bits = padding + bits
data = bitwrap . bits_to_bytes ( bits )
if self . endian == LITTLE_ENDIAN : # reverse the data
data = data [ : : - 1 ]
if len ( data ) < self . width :
raise errors . PrematureEOF ( )
self . _pfp__data = data
self . _pfp__value = struct . unpack ( "{}{}" . format ( self . endian , self . format ) , data ) [ 0 ]
return self . width |
def _parse_image ( self ) :
"""Returns an instance of the image . Image class for the RSS feed .""" | image = { 'title' : self . _channel . find ( './image/title' ) . text , 'width' : int ( self . _channel . find ( './image/width' ) . text ) , 'height' : int ( self . _channel . find ( './image/height' ) . text ) , 'link' : self . _channel . find ( './image/link' ) . text , 'url' : self . _channel . find ( './image/url' ) . text }
return Image ( image ) |
def get_required_status_checks ( self ) :
""": calls : ` GET / repos / : owner / : repo / branches / : branch / protection / required _ status _ checks < https : / / developer . github . com / v3 / repos / branches > ` _
: rtype : : class : ` github . RequiredStatusChecks . RequiredStatusChecks `""" | headers , data = self . _requester . requestJsonAndCheck ( "GET" , self . protection_url + "/required_status_checks" )
return github . RequiredStatusChecks . RequiredStatusChecks ( self . _requester , headers , data , completed = True ) |
def spec_check ( self , auth_list , fun , args , form ) :
'''Check special API permissions''' | if not auth_list :
return False
if form != 'cloud' :
comps = fun . split ( '.' )
if len ( comps ) != 2 : # Hint at a syntax error when command is passed improperly ,
# rather than returning an authentication error of some kind .
# See Issue # 21969 for more information .
return { 'error' : { 'name' : 'SaltInvocationError' , 'message' : 'A command invocation error occurred: Check syntax.' } }
mod_name = comps [ 0 ]
fun_name = comps [ 1 ]
else :
fun_name = mod_name = fun
for ind in auth_list :
if isinstance ( ind , six . string_types ) :
if ind [ 0 ] == '@' :
if ind [ 1 : ] == mod_name or ind [ 1 : ] == form or ind == '@{0}s' . format ( form ) :
return True
elif isinstance ( ind , dict ) :
if len ( ind ) != 1 :
continue
valid = next ( six . iterkeys ( ind ) )
if valid [ 0 ] == '@' :
if valid [ 1 : ] == mod_name :
if self . __fun_check ( ind [ valid ] , fun_name , args . get ( 'arg' ) , args . get ( 'kwarg' ) ) :
return True
if valid [ 1 : ] == form or valid == '@{0}s' . format ( form ) :
if self . __fun_check ( ind [ valid ] , fun , args . get ( 'arg' ) , args . get ( 'kwarg' ) ) :
return True
return False |
def brain ( self ) :
"""Catalog brain of the wrapped object""" | if self . _brain is None :
logger . debug ( "SuperModel::brain: *Fetch catalog brain*" )
self . _brain = self . get_brain_by_uid ( self . uid )
return self . _brain |
def play_env_problem_randomly ( env_problem , num_steps ) :
"""Plays the env problem by randomly sampling actions for ` num _ steps ` .""" | # Reset all environments .
env_problem . reset ( )
# Play all environments , sampling random actions each time .
for _ in range ( num_steps ) : # Sample batch _ size actions from the action space and stack them .
actions = np . stack ( [ env_problem . action_space . sample ( ) for _ in range ( env_problem . batch_size ) ] )
# Execute actions , observations are stored in ` env _ problem ` .
_ , _ , dones , _ = env_problem . step ( actions )
# Get the indices where we are done and reset those .
env_problem . reset ( indices = done_indices ( dones ) ) |
def send_error ( self , status_code = 500 , ** kwargs ) :
"""Override implementation to report all exceptions to sentry , even
after self . flush ( ) or self . finish ( ) is called , for pre - v3.1 Tornado .""" | if hasattr ( super ( SentryMixin , self ) , 'log_exception' ) :
return super ( SentryMixin , self ) . send_error ( status_code , ** kwargs )
else :
rv = super ( SentryMixin , self ) . send_error ( status_code , ** kwargs )
if 500 <= status_code <= 599 :
self . captureException ( exc_info = kwargs . get ( 'exc_info' ) )
return rv |
def fix_timezone_separator ( cls , timestr ) :
"""Replace invalid timezone separator to prevent
` dateutil . parser . parse ` to raise .
: return : the new string if invalid separators were found ,
` None ` otherwise""" | tz_sep = cls . TIMEZONE_SEPARATOR . match ( timestr )
if tz_sep is not None :
return tz_sep . group ( 1 ) + tz_sep . group ( 2 ) + ':' + tz_sep . group ( 3 )
return timestr |
def execute ( self , eopatch ) :
"""Transforms [ n , w , h , d ] eopatch into a [ n , w , h , 1 ] eopatch , adding it the classification mask .
: param eopatch : An input EOPatch
: type eopatch : EOPatch
: return : Outputs EOPatch with n classification masks appended to out _ feature _ type with out _ feature _ name key
: rtype : EOPatch""" | in_type , in_name = next ( self . input_feature ( eopatch ) )
out_type , out_name = next ( self . input_feature ( ) )
eopatch [ out_type ] [ out_name ] = self . classifier . image_predict ( eopatch [ in_type ] [ in_name ] )
return eopatch |
async def create_cred ( self , cred_offer_json , cred_req_json : str , cred_attrs : dict , rr_size : int = None ) -> ( str , str , int ) :
"""Create credential as Issuer out of credential request and dict of key : value ( raw , unencoded )
entries for attributes .
Return credential json , and if cred def supports revocation , credential revocation identifier
and revocation registry delta ledger timestamp ( epoch seconds ) .
If the credential definition supports revocation , and the current revocation registry is full ,
the processing creates a new revocation registry en passant . Depending on the revocation
registry size ( by default starting at 256 and doubling iteratively through 4096 ) , this
operation may delay credential creation by several seconds .
: param cred _ offer _ json : credential offer json as created by Issuer
: param cred _ req _ json : credential request json as created by HolderProver
: param cred _ attrs : dict mapping each attribute to its raw value ( the operation encodes it ) ; e . g . ,
' favourite _ drink ' : ' martini ' ,
' height ' : 180,
' last _ visit _ date ' : ' 2017-12-31 ' ,
' weaknesses ' : None
: param rr _ size : size of new revocation registry ( default as per _ create _ rev _ reg ( ) ) if necessary
: return : newly issued credential json ; credential revocation identifier ( if cred def supports
revocation , None otherwise ) , and ledger timestamp ( if cred def supports revocation , None otherwise )""" | LOGGER . debug ( 'Issuer.create_cred >>> cred_offer_json: %s, cred_req_json: %s, cred_attrs: %s, rr_size: %s' , cred_offer_json , cred_req_json , cred_attrs , rr_size )
cd_id = json . loads ( cred_offer_json ) [ 'cred_def_id' ]
cred_def = json . loads ( await self . get_cred_def ( cd_id ) )
# ensure cred def is in cache
if 'revocation' in cred_def [ 'value' ] :
with REVO_CACHE . lock :
rr_id = Tails . current_rev_reg_id ( self . _dir_tails , cd_id )
tails = REVO_CACHE [ rr_id ] . tails
assert tails
# at ( re ) start , at cred def , Issuer sync _ revoc ( ) sets this index in revocation cache
try :
( cred_json , cred_revoc_id , rr_delta_json ) = await anoncreds . issuer_create_credential ( self . wallet . handle , cred_offer_json , cred_req_json , json . dumps ( { k : cred_attr_value ( cred_attrs [ k ] ) for k in cred_attrs } ) , tails . rr_id , tails . reader_handle )
# do not create rr delta frame and append to cached delta frames list : timestamp could lag or skew
rre_req_json = await ledger . build_revoc_reg_entry_request ( self . did , tails . rr_id , 'CL_ACCUM' , rr_delta_json )
await self . _sign_submit ( rre_req_json )
resp_json = await self . _sign_submit ( rre_req_json )
resp = json . loads ( resp_json )
rv = ( cred_json , cred_revoc_id , resp [ 'result' ] [ 'txnMetadata' ] [ 'txnTime' ] )
except IndyError as x_indy :
if x_indy . error_code == ErrorCode . AnoncredsRevocationRegistryFullError :
( tag , rr_size_suggested ) = Tails . next_tag ( self . _dir_tails , cd_id )
rr_id = rev_reg_id ( cd_id , tag )
await self . _create_rev_reg ( rr_id , rr_size or rr_size_suggested )
REVO_CACHE [ rr_id ] . tails = await Tails ( self . _dir_tails , cd_id ) . open ( )
return await self . create_cred ( cred_offer_json , cred_req_json , cred_attrs )
# should be ok now
else :
LOGGER . debug ( 'Issuer.create_cred: <!< cannot create cred, indy error code %s' , x_indy . error_code )
raise
else :
try :
( cred_json , _ , _ ) = await anoncreds . issuer_create_credential ( self . wallet . handle , cred_offer_json , cred_req_json , json . dumps ( { k : cred_attr_value ( cred_attrs [ k ] ) for k in cred_attrs } ) , None , None )
rv = ( cred_json , _ , _ )
except IndyError as x_indy :
LOGGER . debug ( 'Issuer.create_cred: <!< cannot create cred, indy error code %s' , x_indy . error_code )
raise
LOGGER . debug ( 'Issuer.create_cred <<< %s' , rv )
return rv |
def get_attribute_classes ( ) -> Dict [ str , Attribute ] :
"""Lookup all builtin Attribute subclasses , load them , and return a dict""" | attribute_children = pkgutil . iter_modules ( importlib . import_module ( 'jawa.attributes' ) . __path__ , prefix = 'jawa.attributes.' )
result = { }
for _ , name , _ in attribute_children :
classes = inspect . getmembers ( importlib . import_module ( name ) , lambda c : ( inspect . isclass ( c ) and issubclass ( c , Attribute ) and c is not Attribute ) )
for class_name , class_ in classes :
attribute_name = getattr ( class_ , 'ATTRIBUTE_NAME' , class_name [ : - 9 ] )
result [ attribute_name ] = class_
return result |
def get_sentry_data_from_request ( self ) :
"""Extracts the data required for ' sentry . interfaces . Http ' from the
current request being handled by the request handler
: param return : A dictionary .""" | return { 'request' : { 'url' : self . request . full_url ( ) , 'method' : self . request . method , 'data' : self . request . body , 'query_string' : self . request . query , 'cookies' : self . request . headers . get ( 'Cookie' , None ) , 'headers' : dict ( self . request . headers ) , } } |
async def serviceQueues ( self , limit = None ) -> int :
"""Service at most ` limit ` messages from the inBox .
: param limit : the maximum number of messages to service
: return : the number of messages successfully processed""" | return await self . _inbox_router . handleAll ( self . _inbox , limit ) |
def get_port_profile_status_input_port_profile_name ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_port_profile_status = ET . Element ( "get_port_profile_status" )
config = get_port_profile_status
input = ET . SubElement ( get_port_profile_status , "input" )
port_profile_name = ET . SubElement ( input , "port-profile-name" )
port_profile_name . text = kwargs . pop ( 'port_profile_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def get_subnet_association ( subnets , region = None , key = None , keyid = None , profile = None ) :
'''Given a subnet ( aka : a vpc zone identifier ) or list of subnets , returns
vpc association .
Returns a VPC ID if the given subnets are associated with the same VPC ID .
Returns False on an error or if the given subnets are associated with
different VPC IDs .
CLI Examples :
. . code - block : : bash
salt myminion boto _ vpc . get _ subnet _ association subnet - 61b47516
. . code - block : : bash
salt myminion boto _ vpc . get _ subnet _ association [ ' subnet - 61b47516 ' , ' subnet - 2cb9785b ' ]''' | try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
# subnet _ ids = subnets can accept either a string or a list
subnets = conn . get_all_subnets ( subnet_ids = subnets )
except BotoServerError as e :
return { 'error' : __utils__ [ 'boto.get_error' ] ( e ) }
# using a set to store vpc _ ids - the use of set prevents duplicate
# vpc _ id values
vpc_ids = set ( )
for subnet in subnets :
log . debug ( 'examining subnet id: %s for vpc_id' , subnet . id )
if subnet in subnets :
log . debug ( 'subnet id: %s is associated with vpc id: %s' , subnet . id , subnet . vpc_id )
vpc_ids . add ( subnet . vpc_id )
if not vpc_ids :
return { 'vpc_id' : None }
elif len ( vpc_ids ) == 1 :
return { 'vpc_id' : vpc_ids . pop ( ) }
else :
return { 'vpc_ids' : list ( vpc_ids ) } |
def __get_types ( for_type = None , for_types = None ) :
"""Parse the arguments and return a tuple of types to implement for .
Raises :
ValueError or TypeError as appropriate .""" | if for_type :
if for_types :
raise ValueError ( "Cannot pass both for_type and for_types." )
for_types = ( for_type , )
elif for_types :
if not isinstance ( for_types , tuple ) :
raise TypeError ( "for_types must be passed as a tuple of " "types (classes)." )
else :
raise ValueError ( "Must pass either for_type or for_types." )
return for_types |
def add ( self , email ) :
"""Add a collaborator .
Args :
str : Collaborator email address .""" | if email not in self . _collaborators :
self . _collaborators [ email ] = ShareRequestValue . Add
self . _dirty = True |
def parse ( self , name , description ) :
"""Parse option name .
: param name : option ' s name
: param description : option ' s description
Parsing acceptable names :
* - f : shortname
* - - force : longname
* - f , - - force : shortname and longname
* - o < output > : shortname and a required value
* - o , - - output [ output ] : shortname , longname and optional value
Parsing default value from description :
* source directory , default : src
* source directory , default : [ src ]
* source directory , default : < src >""" | name = name . strip ( )
if '<' in name :
self . required = True
self . boolean = False
name = name [ : name . index ( '<' ) ] . strip ( )
elif '[' in name :
self . required = False
self . boolean = False
name = name [ : name . index ( '[' ) ] . strip ( )
else :
self . required = False
self . boolean = True
regex = re . compile ( r'(-\w)?(?:\,\s*)?(--[\w\-]+)?' )
m = regex . findall ( name )
if not m :
raise ValueError ( 'Invalid Option: %s' , name )
shortname , longname = m [ 0 ]
if not shortname and not longname :
raise ValueError ( 'Invalid Option: %s' , name )
self . shortname = shortname
self . longname = longname
# parse store key
if longname and longname . startswith ( '--no-' ) :
self . key = longname [ 5 : ]
elif longname :
self . key = longname [ 2 : ]
else :
self . key = shortname
if self . boolean : # boolean don ' t need to parse from description
if longname and longname . startswith ( '--no-' ) :
self . default = True
else :
self . default = False
return self
if not description :
self . default = None
return self
# parse default value from description
regex = re . compile ( r'\sdefault:(.*)$' )
m = regex . findall ( description )
if not m :
self . default = None
return self
# if it has a default value , it is not required
self . required = False
value = m [ 0 ] . strip ( )
if value . startswith ( '<' ) and value . endswith ( '>' ) :
value = value [ 1 : - 1 ]
elif value . startswith ( '[' ) and value . endswith ( ']' ) :
value = value [ 1 : - 1 ]
self . default = value . strip ( )
return self |
def _histogram_fixed_binsize ( a , start , width , n ) :
"""histogram _ even ( a , start , width , n ) - > histogram
Return an histogram where the first bin counts the number of lower
outliers and the last bin the number of upper outliers . Works only with
fixed width bins .
: Stochastics :
a : array
Array of samples .
start : float
Left - most bin edge .
width : float
Width of the bins . All bins are considered to have the same width .
n : int
Number of bins .
: Return :
H : array
Array containing the number of elements in each bin . H [ 0 ] is the number
of samples smaller than start and H [ - 1 ] the number of samples
greater than start + n * width .""" | return flib . fixed_binsize ( a , start , width , n ) |
def _safe_copy ( dat ) :
'''mongodb doesn ' t allow ' . ' in keys , but does allow unicode equivs .
Apparently the docs suggest using escaped unicode full - width
encodings . * sigh *
\\ - - > \\ \ $ --> \\ \\ u0024
. - - > \\ \\ u002e
Personally , I prefer URL encodings ,
\\ - - > % 5c
$ - - > % 24
. - - > % 2e
Which means also escaping ' % ' :
% - > % 25''' | if isinstance ( dat , dict ) :
ret = { }
for k in dat :
r = k . replace ( '%' , '%25' ) . replace ( '\\' , '%5c' ) . replace ( '$' , '%24' ) . replace ( '.' , '%2e' )
if r != k :
log . debug ( 'converting dict key from %s to %s for mongodb' , k , r )
ret [ r ] = _safe_copy ( dat [ k ] )
return ret
if isinstance ( dat , ( list , tuple ) ) :
return [ _safe_copy ( i ) for i in dat ]
return dat |
def generic_adjust ( colors , light ) :
"""Generic color adjustment for themers .""" | if light :
for color in colors :
color = util . saturate_color ( color , 0.60 )
color = util . darken_color ( color , 0.5 )
colors [ 0 ] = util . lighten_color ( colors [ 0 ] , 0.95 )
colors [ 7 ] = util . darken_color ( colors [ 0 ] , 0.75 )
colors [ 8 ] = util . darken_color ( colors [ 0 ] , 0.25 )
colors [ 15 ] = colors [ 7 ]
else :
colors [ 0 ] = util . darken_color ( colors [ 0 ] , 0.80 )
colors [ 7 ] = util . lighten_color ( colors [ 0 ] , 0.75 )
colors [ 8 ] = util . lighten_color ( colors [ 0 ] , 0.25 )
colors [ 15 ] = colors [ 7 ]
return colors |
def addRectAnnot ( self , rect ) :
"""Add a ' Rectangle ' annotation .""" | CheckParent ( self )
val = _fitz . Page_addRectAnnot ( self , rect )
if not val :
return
val . thisown = True
val . parent = weakref . proxy ( self )
self . _annot_refs [ id ( val ) ] = val
return val |
def loadString ( self , profilestr , merge = False , loadProfile = True ) :
"""Loads the information for this toolbar from the inputed string .
: param profilestr | < str >""" | try :
xtoolbar = ElementTree . fromstring ( nativestring ( profilestr ) )
except ExpatError , e :
return
if not merge :
self . clear ( )
self . blockSignals ( True )
for xprofile in xtoolbar :
prof = XViewProfile . fromXml ( xprofile )
if merge :
self . removeProfile ( prof . name ( ) , silent = True )
self . addProfile ( prof )
self . setCurrentProfile ( xtoolbar . get ( 'current' ) )
self . blockSignals ( False )
self . profilesChanged . emit ( ) |
def is_uniform ( elements : list ) -> bool :
"""Python function to verify if all elements in a list are identical .
Parameters :
elements ( list ) : a list that could hold any data types
Returns :
bool : True if all elements are the same , False otherwise .
Examples :
> > > is _ uniform ( [ ' same ' , ' same ' , ' same ' ] )
True
> > > is _ uniform ( [ ' one ' , ' two ' , ' three ' ] )
False
> > > is _ uniform ( [ ' bigdata ' , ' python ' , ' Django ' ] )
False""" | return len ( set ( elements ) ) == 1 |
def calculate_field_widths ( self , width = None , min_label_width = 10 , min_progress_width = 10 ) :
"""Calculate how wide each field should be so we can align them .
We always find room for the summaries since these are short and
packed with information . If possible , we will also find room for
labels , but if this would make the progress bar width shorter than
the specified minium then we will shorten the labels , though never
below the minium there . If this mean we have bars that are too wide
for the terminal , then your terminal needs to be wider .""" | if width is None : # pragma : no cover
width = shutil . get_terminal_size ( ) [ 0 ]
summary_width = self . summary_width ( )
label_width = self . label_width ( )
remaining = width - summary_width - label_width - 2
if remaining >= min_progress_width :
progress_width = remaining
else :
progress_width = min_progress_width
remaining = width - summary_width - progress_width - 2
if remaining >= min_label_width :
label_width = remaining
else :
label_width = min_label_width
return ( label_width , progress_width , summary_width ) |
def _get_bin_width ( stdev , count ) :
"""Return the histogram ' s optimal bin width based on Sturges
http : / / www . jstor . org / pss / 2965501""" | w = int ( round ( ( 3.5 * stdev ) / ( count ** ( 1.0 / 3 ) ) ) )
if w :
return w
else :
return 1 |
def set_params ( self , arg_params , aux_params , allow_missing = False , force_init = True , allow_extra = False ) :
"""Assigns parameter and aux state values .
Parameters
arg _ params : dict
Dictionary of name to ` NDArray ` .
aux _ params : dict
Dictionary of name to ` NDArray ` .
allow _ missing : bool
If ` ` True ` ` , params could contain missing values , and the initializer will be
called to fill those missing params .
force _ init : bool
If ` ` True ` ` , will force re - initialize even if already initialized .
allow _ extra : boolean , optional
Whether allow extra parameters that are not needed by symbol .
If this is True , no error will be thrown when arg _ params or aux _ params
contain extra parameters that is not needed by the executor .
Examples
> > > # An example of setting module parameters .
> > > sym , arg _ params , aux _ params = mx . model . load _ checkpoint ( model _ prefix , n _ epoch _ load )
> > > mod . set _ params ( arg _ params = arg _ params , aux _ params = aux _ params )""" | if not allow_missing :
self . init_params ( initializer = None , arg_params = arg_params , aux_params = aux_params , allow_missing = allow_missing , force_init = force_init , allow_extra = allow_extra )
return
if self . params_initialized and not force_init :
warnings . warn ( "Parameters already initialized and force_init=False. " "set_params call ignored." , stacklevel = 2 )
return
self . _exec_group . set_params ( arg_params , aux_params , allow_extra = allow_extra )
# because we didn ' t update self . _ arg _ params , they are dirty now .
self . _params_dirty = True
self . params_initialized = True |
def seqs_from_file ( filename , exit_on_err = False , return_qual = False ) :
"""Extract sequences from a file
Name :
seqs _ from _ file
Author ( s ) :
Martin C F Thomsen
Date :
18 Jul 2013
Description :
Iterator which extract sequence data from the input file
Args :
filename : string which contain a path to the input file
Supported Formats :
fasta , fastq
USAGE :
> > > import os , sys
> > > # Create fasta test file
> > > file _ content = ( ' > head1 desc1 \n this _ is _ seq _ 1 \n > head2 desc2 \n '
' this _ is _ seq _ 2 \n > head3 desc3 \n this _ is _ seq _ 3 \n ' )
> > > with open _ ( ' test . fsa ' , ' w ' ) as f : f . write ( file _ content )
> > > # Parse and print the fasta file
> > > for seq , name , desc in SeqsFromFile ( ' test . fsa ' ) :
. . . print " > % s % s \n % s " % ( name , desc , seq )
> head1 desc1
this _ is _ seq _ 1
> head2 desc2
this _ is _ seq _ 2
> head3 desc3
this _ is _ seq _ 3""" | # VALIDATE INPUT
if not isinstance ( filename , str ) :
msg = 'Filename has to be a string.'
if exit_on_err :
sys . stderr . write ( 'Error: %s\n' % msg )
sys . exit ( 1 )
else :
raise IOError ( msg )
if not os . path . exists ( filename ) :
msg = 'File "%s" does not exist.' % filename
if exit_on_err :
sys . stderr . write ( 'Error: %s\n' % msg )
sys . exit ( 1 )
else :
raise IOError ( msg )
# EXTRACT DATA
with open_ ( filename , "rt" ) as f :
query_seq_segments = [ ]
seq , name , desc , qual = '' , '' , '' , ''
add_segment = query_seq_segments . append
for l in f :
if len ( l . strip ( ) ) == 0 :
continue
# sys . stderr . write ( " % s \ n " % line )
fields = l . strip ( ) . split ( )
if l . startswith ( ">" ) : # FASTA HEADER FOUND
if query_seq_segments != [ ] : # YIELD SEQUENCE AND RESET
seq = '' . join ( query_seq_segments )
yield ( seq , name , desc )
seq , name , desc = '' , '' , ''
del query_seq_segments [ : ]
name = fields [ 0 ] [ 1 : ]
desc = ' ' . join ( fields [ 1 : ] )
elif l . startswith ( "@" ) : # FASTQ HEADER FOUND
name = fields [ 0 ] [ 1 : ]
desc = ' ' . join ( fields [ 1 : ] )
try : # EXTRACT FASTQ SEQUENCE
seq = next ( f ) . strip ( ) . split ( ) [ 0 ]
# SKIP SECOND HEADER LINE AND QUALITY SCORES
l = next ( f )
qual = next ( f ) . strip ( )
# Qualities
except :
break
else : # YIELD SEQUENCE AND RESET
if return_qual :
yield ( seq , qual , name , desc )
else :
yield ( seq , name , desc )
seq , name , desc , qual = '' , '' , '' , ''
elif len ( fields [ 0 ] ) > 0 : # EXTRACT FASTA SEQUENCE
add_segment ( fields [ 0 ] )
# CHECK FOR LAST FASTA SEQUENCE
if query_seq_segments != [ ] : # YIELD SEQUENCE
seq = '' . join ( query_seq_segments )
yield ( seq , name , desc ) |
def get_productivity_stats ( self , api_token , ** kwargs ) :
"""Return a user ' s productivity stats .
: param api _ token : The user ' s login api _ token .
: type api _ token : str
: return : The HTTP response to the request .
: rtype : : class : ` requests . Response `""" | params = { 'token' : api_token }
return self . _get ( 'get_productivity_stats' , params , ** kwargs ) |
def SA_ellipsoidal_head ( D , a ) :
r'''Calculates the surface area of an ellipsoidal head according to [ 1 ] _ .
Formula below is for the full shape , the result of which is halved . The
formula also does not support ` D ` being larger than ` a ` ; this is ensured
by simply swapping the variables if necessary , as geometrically the result
is the same . In the equations
. . math : :
SA = 2 \ pi a ^ 2 + \ frac { \ pi c ^ 2 } { e _ 1 } \ ln \ left ( \ frac { 1 + e _ 1 } { 1 - e _ 1 } \ right )
. . math : :
e _ 1 = \ sqrt { 1 - \ frac { c ^ 2 } { a ^ 2 } }
Parameters
D : float
Diameter of the main cylindrical section , [ m ]
a : float
Distance the ellipsoidal head extends , [ m ]
Returns
SA : float
Surface area [ m ^ 2]
Examples
Spherical case
> > > SA _ ellipsoidal _ head ( 2 , 1)
6.283185307179586
References
. . [ 1 ] Weisstein , Eric W . " Spheroid . " Text . Accessed March 14 , 2016.
http : / / mathworld . wolfram . com / Spheroid . html .''' | if D == a * 2 :
return pi * D ** 2 / 2
# necessary to avoid a division by zero when D = = a
D = D / 2.
D , a = min ( ( D , a ) ) , max ( ( D , a ) )
e1 = ( 1 - D ** 2 / a ** 2 ) ** 0.5
return ( 2 * pi * a ** 2 + pi * D ** 2 / e1 * log ( ( 1 + e1 ) / ( 1 - e1 ) ) ) / 2. |
def get_valid_app_auth ( self , app_uri ) :
"""获得账号下可用的应用的密钥
列出指定应用的可用密钥
Args :
- app _ uri : 应用的完整标识
Returns :
返回一个tuple对象 , 其格式为 ( < result > , < ResponseInfo > )
- result 成功返回可用秘钥列表 , 失败返回None
- ResponseInfo 请求的Response信息""" | ret , retInfo = self . get_app_keys ( app_uri )
if ret is None :
return None
for k in ret :
if ( k . get ( 'state' ) == 'enabled' ) :
return QiniuMacAuth ( k . get ( 'ak' ) , k . get ( 'sk' ) )
return None |
def check_mac ( original_mac ) :
'''Checks the format of a MAC address and returns it without double - colons and in capital letters , if it is correct . Otherwise it returns None .
* it accepts the format of the double colons and a single hex string''' | mac = ( original_mac . upper ( ) ) . strip ( )
parts = mac . split ( ':' )
if len ( parts ) == 6 : # let ' s think that it is a : separated mac
for p in parts :
if len ( p ) != 2 :
return None
mac = '' . join ( parts )
elif len ( parts ) > 1 :
return None
for c in mac :
if c not in '0123456789ABCDEF' :
return None
return mac |
def main ( ) :
"""Run the CLI .""" | parser = argparse . ArgumentParser ( description = 'Search artists, lyrics, and songs!' )
parser . add_argument ( 'artist' , help = 'Specify an artist name (Default: Taylor Swift)' , default = 'Taylor Swift' , nargs = '?' , )
parser . add_argument ( '-s' , '--song' , help = 'Given artist name, specify a song name' , required = False , )
parser . add_argument ( '-l' , '--lyrics' , help = 'Search for song by lyrics' , required = False , )
args = parser . parse_args ( )
if args . lyrics :
song = Song . find_song ( args . lyrics )
else :
if args . song :
song = Song ( title = args . song , artist = args . artist , )
else :
artist = Artist ( args . artist )
if artist . songs :
song = random . choice ( artist . songs )
else :
print ( 'Couldn\'t find any songs by artist {}!' . format ( args . artist ) )
sys . exit ( 1 )
print ( song . format ( ) ) |
def find_any_field ( browser , field_types , field_name ) :
"""Find a field of any of the specified types .""" | return reduce ( operator . add , ( find_field ( browser , field_type , field_name ) for field_type in field_types ) ) |
def is_state_machine_stopped_to_proceed ( selected_sm_id = None , root_window = None ) :
"""Check if state machine is stopped and in case request user by dialog how to proceed
The function checks if a specific state machine or by default all state machines have stopped or finished
execution . If a state machine is still running the user is ask by dialog window if those should be stopped or not .
: param selected _ sm _ id : Specific state mine to check for
: param root _ window : Root window for dialog window
: return :""" | # check if the / a state machine is still running
if not state_machine_execution_engine . finished_or_stopped ( ) :
if selected_sm_id is None or selected_sm_id == state_machine_manager . active_state_machine_id :
message_string = "A state machine is still running. This state machine can only be refreshed" "when not longer running."
dialog = RAFCONButtonDialog ( message_string , [ "Stop execution and refresh" , "Keep running and do not refresh" ] , message_type = Gtk . MessageType . QUESTION , parent = root_window )
response_id = dialog . run ( )
state_machine_stopped = False
if response_id == 1 :
state_machine_execution_engine . stop ( )
state_machine_stopped = True
elif response_id == 2 :
logger . debug ( "State machine will stay running and no refresh will be performed!" )
dialog . destroy ( )
return state_machine_stopped
return True |
def sink_get ( self , project , sink_name ) :
"""API call : retrieve a sink resource .
: type project : str
: param project : ID of the project containing the sink .
: type sink _ name : str
: param sink _ name : the name of the sink
: rtype : dict
: returns : The sink object returned from the API ( converted from a
protobuf to a dictionary ) .""" | path = "projects/%s/sinks/%s" % ( project , sink_name )
sink_pb = self . _gapic_api . get_sink ( path )
# NOTE : LogSink message type does not have an ` ` Any ` ` field
# so ` MessageToDict ` ` can safely be used .
return MessageToDict ( sink_pb ) |
def company_category ( self , symbol = '' ) :
'''查询公司信息目录
: param market : 市场代码
: param symbol : 股票代码
: return : pd . dataFrame or None''' | market = get_stock_market ( symbol )
with self . client . connect ( * self . bestip ) :
data = self . client . get_company_info_category ( int ( market ) , symbol )
return self . client . to_df ( data ) |
def parallel_iterate ( source_iterators : Sequence [ Iterator [ Optional [ Any ] ] ] , target_iterator : Iterator [ Optional [ Any ] ] , skip_blanks : bool = True ) :
"""Yields parallel source ( s ) , target sequences from iterables .
Checks for token parallelism in source sequences .
Skips pairs where element in at least one iterable is None .
Checks that all iterables have the same number of elements .
Can optionally continue from an already - begun iterator .
: param source _ iterators : A list of source iterators .
: param target _ iterator : A target iterator .
: param skip _ blanks : Whether to skip empty target lines .
: return : Iterators over sources and target .""" | num_skipped = 0
while True :
try :
sources = [ next ( source_iter ) for source_iter in source_iterators ]
target = next ( target_iterator )
except StopIteration :
break
if skip_blanks and ( any ( ( s is None for s in sources ) ) or target is None ) :
num_skipped += 1
continue
check_condition ( are_none ( sources ) or are_token_parallel ( sources ) , "Source sequences are not token-parallel: %s" % ( str ( sources ) ) )
yield sources , target
if num_skipped > 0 :
logger . warning ( "Parallel reading of sequences skipped %d elements" , num_skipped )
check_condition ( all ( next ( cast ( Iterator , s ) , None ) is None for s in source_iterators ) and next ( cast ( Iterator , target_iterator ) , None ) is None , "Different number of lines in source(s) and target iterables." ) |
def plot_cumulative_density ( self , ** kwargs ) :
"""Plots a pretty figure of { 0 } . { 1}
Matplotlib plot arguments can be passed in inside the kwargs , plus
Parameters
show _ censors : bool
place markers at censorship events . Default : False
censor _ styles : bool
If show _ censors , this dictionary will be passed into the plot call .
ci _ alpha : bool
the transparency level of the confidence interval . Default : 0.3
ci _ force _ lines : bool
force the confidence intervals to be line plots ( versus default shaded areas ) . Default : False
ci _ show : bool
show confidence intervals . Default : True
ci _ legend : bool
if ci _ force _ lines is True , this is a boolean flag to add the lines ' labels to the legend . Default : False
at _ risk _ counts : bool
show group sizes at time points . See function ` ` add _ at _ risk _ counts ` ` for details . Default : False
loc : slice
specify a time - based subsection of the curves to plot , ex :
> > > model . plot ( loc = slice ( 0 . , 10 . ) )
will plot the time values between t = 0 . and t = 10.
iloc : slice
specify a location - based subsection of the curves to plot , ex :
> > > model . plot ( iloc = slice ( 0,10 ) )
will plot the first 10 time points .
invert _ y _ axis : bool
boolean to invert the y - axis , useful to show cumulative graphs instead of survival graphs . ( Deprecated , use ` ` plot _ cumulative _ density ( ) ` ` )
Returns
ax :
a pyplot axis object""" | return _plot_estimate ( self , estimate = self . cumulative_density_ , confidence_intervals = self . confidence_interval_cumulative_density_ , ** kwargs ) |
def create_object ( self , data , view_kwargs ) :
"""Create an object through sqlalchemy
: param dict data : the data validated by marshmallow
: param dict view _ kwargs : kwargs from the resource view
: return DeclarativeMeta : an object from sqlalchemy""" | self . before_create_object ( data , view_kwargs )
relationship_fields = get_relationships ( self . resource . schema , model_field = True )
nested_fields = get_nested_fields ( self . resource . schema , model_field = True )
join_fields = relationship_fields + nested_fields
obj = self . model ( ** { key : value for ( key , value ) in data . items ( ) if key not in join_fields } )
self . apply_relationships ( data , obj )
self . apply_nested_fields ( data , obj )
self . session . add ( obj )
try :
self . session . commit ( )
except JsonApiException as e :
self . session . rollback ( )
raise e
except Exception as e :
self . session . rollback ( )
raise JsonApiException ( "Object creation error: " + str ( e ) , source = { 'pointer' : '/data' } )
self . after_create_object ( obj , data , view_kwargs )
return obj |
def options ( self , context , module_options ) :
'''USER Search for the specified username in available tokens ( default : None )
USERFILE File containing usernames to search for in available tokens ( defult : None )''' | self . user = None
self . userfile = None
if 'USER' in module_options and 'USERFILE' in module_options :
context . log . error ( 'USER and USERFILE options are mutually exclusive!' )
sys . exit ( 1 )
if 'USER' in module_options :
self . user = module_options [ 'USER' ]
elif 'USERFILE' in module_options :
path = os . path . expanduser ( module_options [ 'USERFILE' ] )
if not os . path . exists ( path ) :
context . log . error ( 'Path to USERFILE invalid!' )
sys . exit ( 1 )
self . userfile = path
self . ps_script = obfs_ps_script ( 'powersploit/Exfiltration/Invoke-TokenManipulation.ps1' ) |
def get_mesh_name ( mesh_id , offline = False ) :
"""Get the MESH label for the given MESH ID .
Uses the mappings table in ` indra / resources ` ; if the MESH ID is not listed
there , falls back on the NLM REST API .
Parameters
mesh _ id : str
MESH Identifier , e . g . ' D003094 ' .
offline : bool
Whether to allow queries to the NLM REST API if the given MESH ID is not
contained in INDRA ' s internal MESH mappings file . Default is False
( allows REST API queries ) .
Returns
str
Label for the MESH ID , or None if the query failed or no label was
found .""" | indra_mesh_mapping = mesh_id_to_name . get ( mesh_id )
if offline or indra_mesh_mapping is not None :
return indra_mesh_mapping
# Look up the MESH mapping from NLM if we don ' t have it locally
return get_mesh_name_from_web ( mesh_id ) |
def SetActiveBreakpoints ( self , breakpoints_data ) :
"""Adds new breakpoints and removes missing ones .
Args :
breakpoints _ data : updated list of active breakpoints .""" | with self . _lock :
ids = set ( [ x [ 'id' ] for x in breakpoints_data ] )
# Clear breakpoints that no longer show up in active breakpoints list .
for breakpoint_id in six . viewkeys ( self . _active ) - ids :
self . _active . pop ( breakpoint_id ) . Clear ( )
# Create new breakpoints .
self . _active . update ( [ ( x [ 'id' ] , python_breakpoint . PythonBreakpoint ( x , self . _hub_client , self , self . data_visibility_policy ) ) for x in breakpoints_data if x [ 'id' ] in ids - six . viewkeys ( self . _active ) - self . _completed ] )
# Remove entries from completed _ breakpoints _ that weren ' t listed in
# breakpoints _ data vector . These are confirmed to have been removed by the
# hub and the debuglet can now assume that they will never show up ever
# again . The backend never reuses breakpoint IDs .
self . _completed &= ids
if self . _active :
self . _next_expiration = datetime . min
# Not known .
else :
self . _next_expiration = datetime . max |
def sink_delete ( self , project , sink_name ) :
"""API call : delete a sink resource .
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / projects . sinks / delete
: type project : str
: param project : ID of the project containing the sink .
: type sink _ name : str
: param sink _ name : the name of the sink""" | target = "/projects/%s/sinks/%s" % ( project , sink_name )
self . api_request ( method = "DELETE" , path = target ) |
def can_create_family_with_record_types ( self , family_record_types ) :
"""Tests if this user can create a single ` ` Family ` ` using the desired record types .
While ` ` RelationshipManager . getFamilyRecordTypes ( ) ` ` can be used
to examine which records are supported , this method tests which
record ( s ) are required for creating a specific ` ` Family ` ` .
Providing an empty array tests if a ` ` Family ` ` can be created
with no records .
arg : family _ record _ types ( osid . type . Type [ ] ) : array of family
record types
return : ( boolean ) - ` ` true ` ` if ` ` Family ` ` creation using the
specified record ` ` Types ` ` is supported , ` ` false ` `
otherwise
raise : NullArgument - ` ` family _ record _ types is null ` `
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . BinAdminSession . can _ create _ bin _ with _ record _ types
# NOTE : It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl .
if self . _catalog_session is not None :
return self . _catalog_session . can_create_catalog_with_record_types ( catalog_record_types = family_record_types )
return True |
def parse ( name , content , releases , get_head_fn ) :
"""Parses the given content for a valid changelog
: param name : str , package name
: param content : str , content
: param releases : list , releases
: param get _ head _ fn : function
: return : dict , changelog""" | try :
return { e [ "version" ] : e [ "changelog" ] for e in content [ "entries" ] if e [ "changelog" ] }
except KeyError :
return { } |
def client ( self ) :
"""Get an elasticsearch client""" | if not hasattr ( self , "_client" ) :
self . _client = connections . get_connection ( "default" )
return self . _client |
def task_table ( self , task_id = None ) :
"""Fetch and parse the task table information for one or more task IDs .
Args :
task _ id : A hex string of the task ID to fetch information about . If
this is None , then the task object table is fetched .
Returns :
Information from the task table .""" | self . _check_connected ( )
if task_id is not None :
task_id = ray . TaskID ( hex_to_binary ( task_id ) )
return self . _task_table ( task_id )
else :
task_table_keys = self . _keys ( ray . gcs_utils . TablePrefix_RAYLET_TASK_string + "*" )
task_ids_binary = [ key [ len ( ray . gcs_utils . TablePrefix_RAYLET_TASK_string ) : ] for key in task_table_keys ]
results = { }
for task_id_binary in task_ids_binary :
results [ binary_to_hex ( task_id_binary ) ] = self . _task_table ( ray . TaskID ( task_id_binary ) )
return results |
def connect ( self ) :
'''Connect to the serial port''' | if self . _port :
self . _driver = MagDeckDriver ( )
self . _driver . connect ( self . _port )
self . _device_info = self . _driver . get_device_info ( )
else : # Sanity check : Should never happen , because connect should
# never be called without a port on Module
raise MissingDevicePortError ( "MagDeck couldnt connect to port {}" . format ( self . _port ) ) |
def file_modify ( filename , settings ) :
"""Modifies file access
Args :
filename ( str ) : Filename .
settings ( dict ) : Can be " mode " or " owners " """ | for k , v in settings . items ( ) :
if k == "mode" :
os . chmod ( filename , v )
if k == "owners" :
os . chown ( filename , v ) |
def async_or_eager ( self , ** options ) :
"""Attempt to call self . apply _ async , or if that fails because of a problem
with the broker , run the task eagerly and return an EagerResult .""" | args = options . pop ( "args" , None )
kwargs = options . pop ( "kwargs" , None )
possible_broker_errors = self . _get_possible_broker_errors_tuple ( )
try :
return self . apply_async ( args , kwargs , ** options )
except possible_broker_errors :
return self . apply ( args , kwargs , ** options ) |
def keyserverprefs ( self ) :
"""A ` ` list ` ` of : py : obj : ` ~ constants . KeyServerPreferences ` in this signature , if any . Otherwise , an empty ` ` list ` ` .""" | if 'KeyServerPreferences' in self . _signature . subpackets :
return next ( iter ( self . _signature . subpackets [ 'h_KeyServerPreferences' ] ) ) . flags
return [ ] |
def masked_array ( data , data_units = None , ** kwargs ) :
"""Create a : class : ` numpy . ma . MaskedArray ` with units attached .
This is a thin wrapper around : func : ` numpy . ma . masked _ array ` that ensures that
units are properly attached to the result ( otherwise units are silently lost ) . Units
are taken from the ` ` units ` ` argument , or if this is ` ` None ` ` , the units on ` ` data ` `
are used .
Parameters
data : array _ like
The source data . If ` ` units ` ` is ` None ` , this should be a ` pint . Quantity ` with
the desired units .
data _ units : str or ` pint . Unit `
The units for the resulting ` pint . Quantity `
* * kwargs : Arbitrary keyword arguments passed to ` numpy . ma . masked _ array `
Returns
` pint . Quantity `""" | if data_units is None :
data_units = data . units
return units . Quantity ( np . ma . masked_array ( data , ** kwargs ) , data_units ) |
def _init_metadata ( self ) :
"""stub""" | DecimalValuesFormRecord . _init_metadata ( self )
IntegerValuesFormRecord . _init_metadata ( self )
TextAnswerFormRecord . _init_metadata ( self )
super ( MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord , self ) . _init_metadata ( )
self . _tolerance_mode_metadata = { 'element_id' : Id ( self . my_osid_object_form . _authority , self . my_osid_object_form . _namespace , 'tolerance_mode' ) , 'element_label' : 'tolerance_mode' , 'instructions' : 'enter the tolerance mode' , 'required' : True , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ { 'text' : '' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 1024 , 'string_set' : [ ] } |
def set_baseline ( self , version ) :
"""Set the baseline into the creation information table
version : str
The version of the current database to set in the information
table . The baseline must be in the format x . x . x where x are numbers .""" | pattern = re . compile ( r"^\d+\.\d+\.\d+$" )
if not re . match ( pattern , version ) :
raise ValueError ( 'Wrong version format' )
query = """
INSERT INTO {} (
version,
description,
type,
script,
checksum,
installed_by,
execution_time,
success
) VALUES(
'{}',
'{}',
{},
'{}',
'{}',
'{}',
1,
TRUE
) """ . format ( self . upgrades_table , version , 'baseline' , 0 , '' , '' , self . __get_dbuser ( ) )
self . cursor . execute ( query )
self . connection . commit ( ) |
def translate ( self , address ) :
"""Translates the given address to another address specific to network or service .
: param address : ( : class : ` ~ hazelcast . core . Address ` ) , private address to be translated
: return : ( : class : ` ~ hazelcast . core . Address ` ) , new address if given address is known , otherwise returns null""" | if address is None :
return None
public_address = self . _private_to_public . get ( address )
if public_address :
return public_address
self . refresh ( )
return self . _private_to_public . get ( address ) |
def init_ui ( state ) :
"""Post initialization for UI application .""" | app = state . app
init_common ( app )
# Register blueprint for templates
app . register_blueprint ( blueprint , url_prefix = app . config [ 'USERPROFILES_PROFILE_URL' ] ) |
def parse_line ( text ) :
""": param text :
: type text : str
: return :""" | indent , text = calculate_indent ( text )
results = line_parser . parseString ( text , parseAll = True ) . asList ( )
return indent , results [ 0 ] |
def getWidget ( self , ** kwargs ) :
"""Wrapper function that returns a new widget attached to this simulation .
Widgets provide real - time 3D visualizations from within an Jupyter notebook .
See the Widget class for more details on the possible arguments .
Arguments
All arguments passed to this wrapper function will be passed to / Widget class .
Returns
A rebound . Widget object .
Examples
> > > sim = rebound . Simulation ( )
> > > sim . add ( m = 1 . )
> > > sim . add ( m = 1 . e - 3 , x = 1 . , vy = 1 . )
> > > sim . getWidget ( )""" | from . widget import Widget
# ondemand
from ipywidgets import DOMWidget
from IPython . display import display , HTML
if not hasattr ( self , '_widgets' ) :
self . _widgets = [ ]
def display_heartbeat ( simp ) :
for w in self . _widgets :
w . refresh ( simp , isauto = 1 )
self . visualization = VISUALIZATIONS [ "webgl" ]
clibrebound . reb_display_init_data ( byref ( self ) ) ;
self . _dhbf = AFF ( display_heartbeat )
self . _display_heartbeat = self . _dhbf
display ( HTML ( Widget . getClientCode ( ) ) )
# HACK ! Javascript should go into custom . js
newWidget = Widget ( self , ** kwargs )
self . _widgets . append ( newWidget )
newWidget . refresh ( isauto = 0 )
return newWidget |
def calc_pc_v1 ( self ) :
"""Apply the precipitation correction factors and adjust precipitation
to the altitude of the individual zones .
Required control parameters :
| NmbZones |
| PCorr |
| PCAlt |
| ZoneZ |
| ZRelP |
Required input sequence :
Required flux sequences :
| RfC |
| SfC |
Calculated flux sequences :
| PC |
Basic equation :
: math : ` PC = P \\ cdot PCorr
\\ cdot ( 1 + PCAlt \\ cdot ( ZoneZ - ZRelP ) )
\\ cdot ( RfC + SfC ) `
Examples :
Five zones are at an elevation of 200 m . A precipitation value
of 5 mm has been measured at a gauge at an elevation of 300 m :
> > > from hydpy . models . hland import *
> > > parameterstep ( ' 1d ' )
> > > nmbzones ( 5)
> > > zrelp ( 2.0)
> > > zonez ( 3.0)
> > > inputs . p = 5.0
The first four zones illustrate the individual precipitation
corrections due to the general precipitation correction factor
( | PCorr | , first zone ) , the altitude correction factor ( | PCAlt | ,
second zone ) , the rainfall related correction ( | RfC | , third zone ) ,
and the snowfall related correction factor ( | SfC | , fourth zone ) .
The fifth zone illustrates the interaction between all corrections :
> > > pcorr ( 1.3 , 1.0 , 1.0 , 1.0 , 1.3)
> > > pcalt ( 0.0 , 0.1 , 0.0 , 0.0 , 0.1)
> > > fluxes . rfc = 0.5 , 0.5 , 0.4 , 0.5 , 0.4
> > > fluxes . sfc = 0.5 , 0.5 , 0.5 , 0.7 , 0.7
> > > model . calc _ pc _ v1 ( )
> > > fluxes . pc
pc ( 6.5 , 5.5 , 4.5 , 6.0 , 7.865)
Usually , one would set zero or positive values for parameter | PCAlt | .
But it is also allowed to set negative values , in order to reflect
possible negative relationships between precipitation and altitude .
To prevent from calculating negative precipitation when too large
negative values are applied , a truncation is performed :
> > > pcalt ( - 1.0)
> > > model . calc _ pc _ v1 ( )
> > > fluxes . pc
pc ( 0.0 , 0.0 , 0.0 , 0.0 , 0.0)""" | con = self . parameters . control . fastaccess
inp = self . sequences . inputs . fastaccess
flu = self . sequences . fluxes . fastaccess
for k in range ( con . nmbzones ) :
flu . pc [ k ] = inp . p * ( 1. + con . pcalt [ k ] * ( con . zonez [ k ] - con . zrelp ) )
if flu . pc [ k ] <= 0. :
flu . pc [ k ] = 0.
else :
flu . pc [ k ] *= con . pcorr [ k ] * ( flu . rfc [ k ] + flu . sfc [ k ] ) |
def _delay ( self ) -> int :
"""Extract departure delay .""" | try :
return int ( self . journey . MainStop . BasicStop . Dep . Delay . text )
except AttributeError :
return 0 |
def sign_request ( self , signature_method , consumer , token ) :
"""Set the signature parameter to the result of sign .""" | if not self . is_form_encoded : # according to
# http : / / oauth . googlecode . com / svn / spec / ext / body _ hash / 1.0 / oauth - bodyhash . html
# section 4.1.1 " OAuth Consumers MUST NOT include an
# oauth _ body _ hash parameter on requests with form - encoded
# request bodies . "
if not self . body :
self . body = ''
self [ 'oauth_body_hash' ] = base64 . b64encode ( sha1 ( to_utf8 ( self . body ) ) . digest ( ) )
if 'oauth_consumer_key' not in self :
self [ 'oauth_consumer_key' ] = consumer . key
if token and 'oauth_token' not in self :
self [ 'oauth_token' ] = token . key
self [ 'oauth_signature_method' ] = signature_method . name
self [ 'oauth_signature' ] = signature_method . sign ( self , consumer , token ) |
def selenium_retry ( target = None , retry = True ) :
"""Decorator to turn on automatic retries of flaky selenium failures .
Decorate a robotframework library class to turn on retries for all
selenium calls from that library :
@ selenium _ retry
class MyLibrary ( object ) :
# Decorate a method to turn it back off for that method
@ selenium _ retry ( False )
def some _ keyword ( self ) :
self . selenium . click _ button ( ' foo ' )
Or turn it off by default but turn it on for some methods
( the class - level decorator is still required ) :
@ selenium _ retry ( False )
class MyLibrary ( object ) :
@ selenium _ retry ( True )
def some _ keyword ( self ) :
self . selenium . click _ button ( ' foo ' )""" | if isinstance ( target , bool ) : # Decorator was called with a single boolean argument
retry = target
target = None
def decorate ( target ) :
if isinstance ( target , type ) :
cls = target
# Metaclass time .
# We ' re going to generate a new subclass that :
# a ) mixes in RetryingSeleniumLibraryMixin
# b ) sets the initial value of ` retry _ selenium `
return type ( cls . __name__ , ( cls , RetryingSeleniumLibraryMixin ) , { "retry_selenium" : retry , "__doc__" : cls . __doc__ } , )
func = target
@ functools . wraps ( func )
def run_with_retry ( self , * args , ** kwargs ) : # Set the retry setting and run the original function .
old_retry = self . retry_selenium
self . retry = retry
try :
return func ( self , * args , ** kwargs )
finally : # Restore the previous value
self . retry_selenium = old_retry
set_pdb_trace ( )
run_with_retry . is_selenium_retry_decorator = True
return run_with_retry
if target is None : # Decorator is being used with arguments
return decorate
else : # Decorator was used without arguments
return decorate ( target ) |
def websocket_handler ( request ) :
"""Handle a new socket connection .""" | logger . debug ( 'New websocket connection.' )
websocket = web . WebSocketResponse ( )
yield from websocket . prepare ( request )
uuid = uuid4 ( )
request . app [ 'dispatcher' ] . subscribe ( uuid , websocket )
while True : # Consume input buffer
try :
msg = yield from websocket . receive ( )
except RuntimeError as e :
logger . debug ( 'Websocket exception: %s' , str ( e ) )
break
except CancelledError :
logger . debug ( 'Websocket closed' )
break
if msg . type == aiohttp . WSMsgType . CLOSED :
logger . debug ( 'Websocket closed' )
break
elif msg . type == aiohttp . WSMsgType . ERROR :
logger . debug ( 'Websocket exception: %s' , websocket . exception ( ) )
break
request . app [ 'dispatcher' ] . unsubscribe ( uuid )
return websocket |
def get_data_source_bulk_request ( self , rids , limit = 5 ) :
"""This grabs each datasource and its multiple datapoints for a particular device .""" | headers = { 'User-Agent' : self . user_agent ( ) , 'Content-Type' : self . content_type ( ) }
headers . update ( self . headers ( ) )
r = requests . get ( self . portals_url ( ) + '/data-sources/[' + "," . join ( rids ) + ']/data?limit=' + str ( limit ) , headers = headers , auth = self . auth ( ) )
if HTTP_STATUS . OK == r . status_code :
return r . json ( )
else :
print ( "Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) )
return { } |
def tj_email ( self ) :
"""Get ( or guess ) a user ' s TJ email .
If a fcps . edu or tjhsst . edu email is specified in their email
list , use that . Otherwise , append the user ' s username to the
proper email suffix , depending on whether they are a student or
teacher .""" | for email in self . emails . all ( ) :
if email . address . endswith ( ( "@fcps.edu" , "@tjhsst.edu" ) ) :
return email
if self . is_teacher :
domain = "fcps.edu"
else :
domain = "tjhsst.edu"
return "{}@{}" . format ( self . username , domain ) |
def get_block ( self , x , y , z ) :
"""Get a block from relative x , y , z .""" | sy , by = divmod ( y , 16 )
section = self . get_section ( sy )
if section == None :
return None
return section . get_block ( x , by , z ) |
def xinfo_consumers ( self , stream , group_name ) :
"""Retrieve consumers of a consumer group""" | fut = self . execute ( b'XINFO' , b'CONSUMERS' , stream , group_name )
return wait_convert ( fut , parse_lists_to_dicts ) |
def update_agent_requirements ( req_file , check , newline ) :
"""Replace the requirements line for the given check""" | package_name = get_package_name ( check )
lines = read_file_lines ( req_file )
for i , line in enumerate ( lines ) :
current_package_name = line . split ( '==' ) [ 0 ]
if current_package_name == package_name :
lines [ i ] = '{}\n' . format ( newline )
break
write_file_lines ( req_file , sorted ( lines ) ) |
def copyto ( self , other ) :
"""Copies the value of this array to another array .
If ` ` other ` ` is a ` ` NDArray ` ` object , then ` ` other . shape ` ` and
` ` self . shape ` ` should be the same . This function copies the value from
` ` self ` ` to ` ` other ` ` .
If ` ` other ` ` is a context , a new ` ` NDArray ` ` will be first created on
the target context , and the value of ` ` self ` ` is copied .
Parameters
other : NDArray or Context
The destination array or context .
Returns
NDArray , CSRNDArray or RowSparseNDArray
The copied array . If ` ` other ` ` is an ` ` NDArray ` ` , then the return value
and ` ` other ` ` will point to the same ` ` NDArray ` ` .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > y = mx . nd . zeros ( ( 2,3 ) , mx . gpu ( 0 ) )
> > > z = x . copyto ( y )
> > > z is y
True
> > > y . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > y . copyto ( mx . gpu ( 0 ) )
< NDArray 2x3 @ gpu ( 0 ) >""" | if isinstance ( other , NDArray ) :
if other . handle is self . handle :
warnings . warn ( 'You are attempting to copy an array to itself' , RuntimeWarning )
return False
return _internal . _copyto ( self , out = other )
elif isinstance ( other , Context ) :
hret = NDArray ( _new_alloc_handle ( self . shape , other , True , self . dtype ) )
return _internal . _copyto ( self , out = hret )
else :
raise TypeError ( 'copyto does not support type ' + str ( type ( other ) ) ) |
def checksum_status ( self , area_uuid , filename ) :
"""Retrieve checksum status and values for a file
: param str area _ uuid : A RFC4122 - compliant ID for the upload area
: param str filename : The name of the file within the Upload Area
: return : a dict with checksum information
: rtype : dict
: raises UploadApiException : if information could not be obtained""" | url_safe_filename = urlparse . quote ( filename )
path = "/area/{uuid}/{filename}/checksum" . format ( uuid = area_uuid , filename = url_safe_filename )
response = self . _make_request ( 'get' , path )
return response . json ( ) |
def json_friendly ( obj ) :
"""Convert an object into something that ' s more becoming of JSON""" | converted = True
typename = get_full_typename ( obj )
if is_tf_tensor_typename ( typename ) :
obj = obj . eval ( )
elif is_pytorch_tensor_typename ( typename ) :
try :
if obj . requires_grad :
obj = obj . detach ( )
except AttributeError :
pass
# before 0.4 is only present on variables
try :
obj = obj . data
except RuntimeError :
pass
# happens for Tensors before 0.4
if obj . size ( ) :
obj = obj . numpy ( )
else :
return obj . item ( ) , True
if np and isinstance ( obj , np . ndarray ) :
if obj . size == 1 :
obj = obj . flatten ( ) [ 0 ]
elif obj . size <= 32 :
obj = obj . tolist ( )
elif np and isinstance ( obj , np . generic ) :
obj = obj . item ( )
elif isinstance ( obj , bytes ) :
obj = obj . decode ( 'utf-8' )
elif isinstance ( obj , ( datetime , date ) ) :
obj = obj . isoformat ( )
else :
converted = False
if getsizeof ( obj ) > VALUE_BYTES_LIMIT :
logger . warning ( "Object %s is %i bytes" , obj , getsizeof ( obj ) )
return obj , converted |
def morphemes ( args ) :
"""Segment words according to their morphemes .""" | morfessor = load_morfessor_model ( lang = args . lang )
for l in args . input :
words = l . strip ( ) . split ( )
morphemes = [ ( w , u"_" . join ( morfessor . viterbi_segment ( w ) [ 0 ] ) ) for w in words ]
line_annotations = [ u"{:<16}{:<5}" . format ( w , p ) for w , p in morphemes ]
_print ( u"\n" . join ( line_annotations ) )
_print ( u"" ) |
def write ( self , output , mode = "w" , keep_rc = False ) :
"""Executes the pipeline and writes the results to the supplied output .
If output is a filename and the file didn ' t already exist before trying
to write , the file will be removed if an exception is raised .
Args :
output ( str or file like object ) : will create a new file of this
name or overwrite an existing file . If output is already a file
like object , it is used .
mode ( str ) : mode to use when creating or opening the provided file
name if it is a string . Ignored if output is a file like object .
Returns :
The final output of the pipeline .
Raises :
CalledProcessError if any return code in the pipeline is nonzero .""" | if isinstance ( output , six . string_types ) :
already_exists = os . path . exists ( output )
try :
with open ( output , mode ) as f :
p = self . _build_pipes ( f )
rc = p . wait ( )
if keep_rc :
return rc
if rc :
raise CalledProcessError ( rc , self . cmds [ 0 ] , "" )
except BaseException as be :
if not already_exists and os . path . exists ( output ) :
os . remove ( output )
six . reraise ( be . __class__ , be , sys . exc_info ( ) [ 2 ] )
else :
p = self . _build_pipes ( output )
rc = p . wait ( )
if keep_rc :
return rc
if rc :
raise CalledProcessError ( rc , self . cmds [ 0 ] , "" ) |
def split_top_down ( lower , upper , __fval = None , ** fval ) :
"""This call un - links an association that was made using bind _ top _ down .
Have a look at help ( bind _ top _ down )""" | if __fval is not None :
fval . update ( __fval )
if lower in upper . _overload_fields :
ofval = upper . _overload_fields [ lower ]
if any ( k not in ofval or ofval [ k ] != v for k , v in six . iteritems ( fval ) ) : # noqa : E501
return
upper . _overload_fields = upper . _overload_fields . copy ( )
del ( upper . _overload_fields [ lower ] ) |
def transform ( self , sequences ) :
"""Apply the dimensionality reduction on X .
Parameters
sequences : list of array - like , each of shape ( n _ samples _ i , n _ features )
Training data , where n _ samples _ i in the number of samples
in sequence i and n _ features is the number of features .
Returns
sequence _ new : list of array - like , each of shape ( n _ samples _ i , n _ components )""" | check_iter_of_sequences ( sequences , max_iter = 3 )
# we might be lazy - loading
sequences_new = [ ]
for X in sequences :
X = array2d ( X )
if self . means_ is not None :
X = X - self . means_
X_transformed = np . dot ( X , self . components_ . T )
if self . kinetic_mapping :
X_transformed *= self . eigenvalues_
if self . commute_mapping : # thanks to @ maxentile and @ jchodera for providing / directing to a
# reference implementation in pyemma
# ( markovmodel / PyEMMA # 963)
# dampening smaller timescales based recommendtion of [ 7]
# some timescales are NaNs and regularized timescales will
# be negative when they are less than the lag time ; all these
# are set to zero using nan _ to _ num before returning
regularized_timescales = 0.5 * self . timescales_ * np . tanh ( np . pi * ( ( self . timescales_ - self . lag_time ) / self . lag_time ) + 1 )
X_transformed *= np . sqrt ( regularized_timescales / 2 )
X_transformed = np . nan_to_num ( X_transformed )
sequences_new . append ( X_transformed )
return sequences_new |
def get_dev_at_mountpoint ( mntpoint ) :
"""Retrieves the device mounted at mntpoint , or raises
MountError if none .""" | results = util . subp ( [ 'findmnt' , '-o' , 'SOURCE' , mntpoint ] )
if results . return_code != 0 :
raise MountError ( 'No device mounted at %s' % mntpoint )
stdout = results . stdout . decode ( sys . getdefaultencoding ( ) )
return stdout . replace ( 'SOURCE\n' , '' ) . strip ( ) . split ( '\n' ) [ - 1 ] |
def add_force_flaky_options ( add_option ) :
"""Add options to the test runner that force all tests to be flaky .
: param add _ option :
A function that can add an option to the test runner .
Its argspec should equal that of argparse . add _ option .
: type add _ option :
` callable `""" | add_option ( '--force-flaky' , action = "store_true" , dest = "force_flaky" , default = False , help = "If this option is specified, we will treat all tests as " "flaky." )
add_option ( '--max-runs' , action = "store" , dest = "max_runs" , type = int , default = 2 , help = "If --force-flaky is specified, we will run each test at " "most this many times (unless the test has its own flaky " "decorator)." )
add_option ( '--min-passes' , action = "store" , dest = "min_passes" , type = int , default = 1 , help = "If --force-flaky is specified, we will run each test at " "least this many times (unless the test has its own flaky " "decorator)." ) |
def clear_end_timestamp ( self ) :
"""stub""" | if ( self . get_end_timestamp_metadata ( ) . is_read_only ( ) or self . get_end_timestamp_metadata ( ) . is_required ( ) ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'endTimestamp' ] = self . get_end_timestamp_metadata ( ) . get_default_integer_values ( ) |
def formfield ( self , form_class = None , choices_form_class = None , ** kwargs ) :
"""Returns a django . forms . Field instance for this database Field .""" | defaults = { 'required' : not self . blank , 'label' : capfirst ( self . verbose_name ) , 'help_text' : self . help_text , }
if self . has_default ( ) :
if callable ( self . default ) :
defaults [ 'initial' ] = self . default
defaults [ 'show_hidden_initial' ] = True
else :
defaults [ 'initial' ] = self . get_default ( )
include_blank = ( self . blank or not ( self . has_default ( ) or 'initial' in kwargs ) )
choices = [ BLANK_CHOICE_DASH , ] if include_blank else [ ]
choices . extend ( [ ( x . name , getattr ( x , 'verbose_name' , x . name ) or x . name , getattr ( x , 'help_text' , None ) or None ) for x in self . choices_class . constants ( ) ] )
defaults [ 'choices' ] = choices
defaults [ 'coerce' ] = self . to_python
if self . null :
defaults [ 'empty_value' ] = None
# Many of the subclass - specific formfield arguments ( min _ value ,
# max _ value ) don ' t apply for choice fields , so be sure to only pass
# the values that TypedChoiceField will understand .
for k in list ( kwargs ) :
if k not in ( 'coerce' , 'empty_value' , 'choices' , 'required' , 'widget' , 'label' , 'initial' , 'help_text' , 'error_messages' , 'show_hidden_initial' ) :
del kwargs [ k ]
defaults . update ( kwargs )
form_class = choices_form_class or ChoicesFormField
return form_class ( ** defaults ) |
def deploy_config_from_estimator ( estimator , task_id , task_type , initial_instance_count , instance_type , model_name = None , endpoint_name = None , tags = None , ** kwargs ) :
"""Export Airflow deploy config from a SageMaker estimator
Args :
estimator ( sagemaker . model . EstimatorBase ) : The SageMaker estimator to export Airflow config from .
It has to be an estimator associated with a training job .
task _ id ( str ) : The task id of any airflow . contrib . operators . SageMakerTrainingOperator or
airflow . contrib . operators . SageMakerTuningOperator that generates training jobs in the DAG . The endpoint
config is built based on the training job generated in this operator .
task _ type ( str ) : Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator . Values can be
' training ' , ' tuning ' or None ( which means training job is not from any task ) .
initial _ instance _ count ( int ) : Minimum number of EC2 instances to deploy to an endpoint for prediction .
instance _ type ( str ) : Type of EC2 instance to deploy to an endpoint for prediction ,
for example , ' ml . c4 . xlarge ' .
model _ name ( str ) : Name to use for creating an Amazon SageMaker model . If not specified , one will be generated .
endpoint _ name ( str ) : Name to use for creating an Amazon SageMaker endpoint . If not specified , the name of
the SageMaker model is used .
tags ( list [ dict ] ) : List of tags for labeling a training job . For more , see
https : / / docs . aws . amazon . com / sagemaker / latest / dg / API _ Tag . html .
* * kwargs : Passed to invocation of ` ` create _ model ( ) ` ` . Implementations may customize
` ` create _ model ( ) ` ` to accept ` ` * * kwargs ` ` to customize model creation during deploy .
For more , see the implementation docs .
Returns :
dict : Deploy config that can be directly used by SageMakerEndpointOperator in Airflow .""" | update_estimator_from_task ( estimator , task_id , task_type )
model = estimator . create_model ( ** kwargs )
model . name = model_name
config = deploy_config ( model , initial_instance_count , instance_type , endpoint_name , tags )
return config |
def link_blob_into_repository ( self , session , digest , source_repo , target_repo ) :
"""Links ( " mounts " in Docker Registry terminology ) a blob from one repository in a
registry into another repository in the same registry .""" | self . log . debug ( "%s: Linking blob %s from %s to %s" , session . registry , digest , source_repo , target_repo )
# Check that it exists in the source repository
url = "/v2/{}/blobs/{}" . format ( source_repo , digest )
result = session . head ( url )
if result . status_code == requests . codes . NOT_FOUND :
self . log . debug ( "%s: blob %s, not present in %s, skipping" , session . registry , digest , source_repo )
# Assume we don ' t need to copy it - maybe it ' s a foreign layer
return
result . raise_for_status ( )
url = "/v2/{}/blobs/uploads/?mount={}&from={}" . format ( target_repo , digest , source_repo )
result = session . post ( url , data = '' )
result . raise_for_status ( )
if result . status_code != requests . codes . CREATED : # A 202 - Accepted would mean that the source blob didn ' t exist and
# we ' re starting an upload - but we ' ve checked that above
raise RuntimeError ( "Blob mount had unexpected status {}" . format ( result . status_code ) ) |
def get_conf ( self , test = False ) :
"""Send a HTTP request to the satellite ( GET / managed _ configurations )
and update the cfg _ managed attribute with the new information
Set to { } on failure
the managed configurations are a dictionary which keys are the scheduler link instance id
and the values are the push _ flavor
If test is True , returns the unit test internally stored configuration
Returns False if a timeout is raised
: return : see @ communicate , or the managed configuration""" | logger . debug ( "Get managed configuration for %s, %s %s" , self . name , self . alive , self . reachable )
if test :
self . cfg_managed = { }
self . have_conf = True
logger . debug ( "Get managed configuration test ..." )
if getattr ( self , 'unit_test_pushed_configuration' , None ) is not None : # Note this is a dict not a SatelliteLink object !
for scheduler_link in self . unit_test_pushed_configuration [ 'schedulers' ] . values ( ) :
self . cfg_managed [ scheduler_link [ 'instance_id' ] ] = { 'hash' : scheduler_link [ 'hash' ] , 'push_flavor' : scheduler_link [ 'push_flavor' ] , 'managed_conf_id' : scheduler_link [ 'managed_conf_id' ] }
# print ( " * * * unit tests - get managed configuration % s : % s "
# % ( self . name , self . cfg _ managed ) )
else :
self . cfg_managed = self . con . get ( 'managed_configurations' )
logger . debug ( "My (%s) fresh managed configuration: %s" , self . name , self . cfg_managed )
self . have_conf = ( self . cfg_managed != { } )
return self . cfg_managed |
def memoizemethod ( method ) :
"""Decorator to cause a method to cache it ' s results in self for each
combination of inputs and return the cached result on subsequent calls .
Does not support named arguments or arg values that are not hashable .
> > > class Foo ( object ) :
. . . @ memoizemethod
. . . def foo ( self , x , y = 0 ) :
. . . print ( ' running method with ' , x , y )
. . . return x + y + 3
> > > foo1 = Foo ( )
> > > foo2 = Foo ( )
> > > foo1 . foo ( 10)
running method with 10 0
13
> > > foo1 . foo ( 10)
13
> > > foo2 . foo ( 11 , y = 7)
running method with 11 7
21
> > > foo2 . foo ( 11)
running method with 11 0
14
> > > foo2 . foo ( 11 , y = 7)
21
> > > class Foo ( object ) :
. . . def _ _ init _ _ ( self , lower ) :
. . . self . lower = lower
. . . @ memoizemethod
. . . def range _ tuple ( self , upper ) :
. . . print ( ' running function ' )
. . . return tuple ( i for i in range ( self . lower , upper ) )
. . . @ memoizemethod
. . . def range _ iter ( self , upper ) :
. . . print ( ' running function ' )
. . . return ( i for i in range ( self . lower , upper ) )
> > > foo = Foo ( 3)
> > > foo . range _ tuple ( 6)
running function
(3 , 4 , 5)
> > > foo . range _ tuple ( 7)
running function
(3 , 4 , 5 , 6)
> > > foo . range _ tuple ( 6)
(3 , 4 , 5)
> > > foo . range _ iter ( 6)
Traceback ( most recent call last ) :
TypeError : Can ' t memoize a generator or non - hashable object !""" | @ wraps ( method )
def _wrapper ( self , * args , ** kwargs ) : # NOTE : a _ _ dict _ _ check is performed here rather than using the
# built - in hasattr function because hasattr will look up to an object ' s
# class if the attr is not directly found in the object ' s dict . That ' s
# bad for this if the class itself has a memoized classmethod for
# example that has been called before the memoized instance method ,
# then the instance method will use the class ' s result cache , causing
# its results to be globally stored rather than on a per instance
# basis .
if '_memoized_results' not in self . __dict__ :
self . _memoized_results = { }
memoized_results = self . _memoized_results
key = ( method . __name__ , args , tuple ( sorted ( kwargs . items ( ) ) ) )
if key in memoized_results :
return memoized_results [ key ]
else :
try :
result = method ( self , * args , ** kwargs )
except KeyError as e :
if '__wrapped__' in str ( e ) :
result = None
# is this the right thing to do ? happened during py3 conversion
else :
raise
if isinstance ( result , GeneratorType ) or not isinstance ( result , Hashable ) :
raise TypeError ( "Can't memoize a generator or non-hashable object!" )
return memoized_results . setdefault ( key , result )
return _wrapper |
def clean ( self ) :
"""Global cleanup .""" | super ( LineFormSet , self ) . clean ( )
if any ( self . errors ) : # Already seen errors , let ' s skip .
return
self . clean_unique_fields ( ) |
def paginate_results ( self , results , options ) :
"Return a django . core . paginator . Page of results ." | limit = options . get ( 'limit' , settings . SELECTABLE_MAX_LIMIT )
paginator = Paginator ( results , limit )
page = options . get ( 'page' , 1 )
try :
results = paginator . page ( page )
except ( EmptyPage , InvalidPage ) :
results = paginator . page ( paginator . num_pages )
return results |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.