signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def emit ( self , value ) :
"""Emits a value to output writer .
Args :
value : a value of type expected by the output writer .""" | if not self . _tstate . output_writer :
logging . error ( "emit is called, but no output writer is set." )
return
self . _tstate . output_writer . write ( value ) |
def get_prefixes ( self , ns_uri ) :
"""Gets ( a copy of ) the prefix set for the given namespace .""" | ni = self . __lookup_uri ( ns_uri )
return ni . prefixes . copy ( ) |
def get_user ( self , user_id ) :
"""Details for a specific user .
Will pull from cached users first , or get and add to cached users .
: param username : the username or userId of the user
: return : VoicebaseUser""" | if user_id in self . _users :
return self . _users . get ( user_id )
else : # Load user
# Save user in cache
return |
def write ( self , basename = "/tmp/resynclist.xml" ) :
"""Write a single sitemap or sitemapindex XML document .
Must be overridden to support multi - file lists .""" | self . default_capability ( )
fh = open ( basename , 'w' )
s = self . new_sitemap ( )
s . resources_as_xml ( self , fh = fh , sitemapindex = self . sitemapindex )
fh . close ( ) |
def _create_p ( s , h ) :
"""Parabolic derivative""" | p = np . zeros_like ( s )
p [ 1 : ] = ( s [ : - 1 ] * h [ 1 : ] + s [ 1 : ] * h [ : - 1 ] ) / ( h [ 1 : ] + h [ : - 1 ] )
return p |
def list ( self , sort = False ) :
""": param sort : Result will be sorted if it ' s True
: return : A list of : class : ` Processor ` or its children classes""" | prs = self . _processors . values ( )
if sort :
return sorted ( prs , key = operator . methodcaller ( "cid" ) )
return prs |
def set_vertex_colors ( self , colors , indexed = None ) :
"""Set the vertex color array
Parameters
colors : array
Array of colors . Must have shape ( Nv , 4 ) ( indexing by vertex )
or shape ( Nf , 3 , 4 ) ( vertices indexed by face ) .
indexed : str | None
Should be ' faces ' if colors are indexed by faces .""" | colors = _fix_colors ( np . asarray ( colors ) )
if indexed is None :
if colors . ndim != 2 :
raise ValueError ( 'colors must be 2D if indexed is None' )
if colors . shape [ 0 ] != self . n_vertices :
raise ValueError ( 'incorrect number of colors %s, expected %s' % ( colors . shape [ 0 ] , self . n_vertices ) )
self . _vertex_colors = colors
self . _vertex_colors_indexed_by_faces = None
elif indexed == 'faces' :
if colors . ndim != 3 :
raise ValueError ( 'colors must be 3D if indexed is "faces"' )
if colors . shape [ 0 ] != self . n_faces :
raise ValueError ( 'incorrect number of faces' )
self . _vertex_colors = None
self . _vertex_colors_indexed_by_faces = colors
else :
raise ValueError ( 'indexed must be None or "faces"' ) |
def filter_counter ( self , counter , min = 2 , max = 100000000 ) :
"""Filter the counted records .
Returns : List with record numbers .""" | records_filterd = { }
counter_all_records = 0
for item in counter :
counter_all_records += 1
if max > counter [ item ] >= min :
records_filterd [ item ] = counter [ item ]
self . stat [ 'user_record_events' ] = counter_all_records
self . stat [ 'records_filtered' ] = len ( records_filterd )
return records_filterd |
def _solve_scipy ( self , intern_x0 , tol = 1e-8 , method = None , ** kwargs ) :
"""Uses ` ` scipy . optimize . root ` `
See : http : / / docs . scipy . org / doc / scipy / reference / generated / scipy . optimize . root . html
Parameters
intern _ x0 : array _ like
initial guess
tol : float
Tolerance
method : str
What method to use . Defaults to ` ` ' lm ' ` ` if ` ` self . nf > self . nx ` ` otherwise ` ` ' hybr ' ` ` .""" | from scipy . optimize import root
if method is None :
if self . nf > self . nx :
method = 'lm'
elif self . nf == self . nx :
method = 'hybr'
else :
raise ValueError ( 'Underdetermined problem' )
if 'band' in kwargs :
raise ValueError ( "Set 'band' at initialization instead." )
if 'args' in kwargs :
raise ValueError ( "Set 'args' as params in initialization instead." )
new_kwargs = kwargs . copy ( )
if self . band is not None :
warnings . warn ( "Band argument ignored (see SciPy docs)" )
new_kwargs [ 'band' ] = self . band
new_kwargs [ 'args' ] = self . internal_params
return root ( self . f_cb , intern_x0 , jac = self . j_cb , method = method , tol = tol , ** new_kwargs ) |
def describe_reserved_instances_offerings ( DryRun = None , ReservedInstancesOfferingIds = None , InstanceType = None , AvailabilityZone = None , ProductDescription = None , Filters = None , InstanceTenancy = None , OfferingType = None , NextToken = None , MaxResults = None , IncludeMarketplace = None , MinDuration = None , MaxDuration = None , MaxInstanceCount = None , OfferingClass = None ) :
"""Describes Reserved Instance offerings that are available for purchase . With Reserved Instances , you purchase the right to launch instances for a period of time . During that time period , you do not receive insufficient capacity errors , and you pay a lower usage rate than the rate charged for On - Demand instances for the actual time used .
If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace , they will be excluded from these results . This is to ensure that you do not purchase your own Reserved Instances .
For more information , see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide .
See also : AWS API Documentation
: example : response = client . describe _ reserved _ instances _ offerings (
DryRun = True | False ,
ReservedInstancesOfferingIds = [
' string ' ,
InstanceType = ' t1 . micro ' | ' t2 . nano ' | ' t2 . micro ' | ' t2 . small ' | ' t2 . medium ' | ' t2 . large ' | ' t2 . xlarge ' | ' t2.2xlarge ' | ' m1 . small ' | ' m1 . medium ' | ' m1 . large ' | ' m1 . xlarge ' | ' m3 . medium ' | ' m3 . large ' | ' m3 . xlarge ' | ' m3.2xlarge ' | ' m4 . large ' | ' m4 . xlarge ' | ' m4.2xlarge ' | ' m4.4xlarge ' | ' m4.10xlarge ' | ' m4.16xlarge ' | ' m2 . xlarge ' | ' m2.2xlarge ' | ' m2.4xlarge ' | ' cr1.8xlarge ' | ' r3 . large ' | ' r3 . xlarge ' | ' r3.2xlarge ' | ' r3.4xlarge ' | ' r3.8xlarge ' | ' r4 . large ' | ' r4 . xlarge ' | ' r4.2xlarge ' | ' r4.4xlarge ' | ' r4.8xlarge ' | ' r4.16xlarge ' | ' x1.16xlarge ' | ' x1.32xlarge ' | ' i2 . xlarge ' | ' i2.2xlarge ' | ' i2.4xlarge ' | ' i2.8xlarge ' | ' i3 . large ' | ' i3 . xlarge ' | ' i3.2xlarge ' | ' i3.4xlarge ' | ' i3.8xlarge ' | ' i3.16xlarge ' | ' hi1.4xlarge ' | ' hs1.8xlarge ' | ' c1 . medium ' | ' c1 . xlarge ' | ' c3 . large ' | ' c3 . xlarge ' | ' c3.2xlarge ' | ' c3.4xlarge ' | ' c3.8xlarge ' | ' c4 . large ' | ' c4 . xlarge ' | ' c4.2xlarge ' | ' c4.4xlarge ' | ' c4.8xlarge ' | ' cc1.4xlarge ' | ' cc2.8xlarge ' | ' g2.2xlarge ' | ' g2.8xlarge ' | ' cg1.4xlarge ' | ' p2 . xlarge ' | ' p2.8xlarge ' | ' p2.16xlarge ' | ' d2 . xlarge ' | ' d2.2xlarge ' | ' d2.4xlarge ' | ' d2.8xlarge ' | ' f1.2xlarge ' | ' f1.16xlarge ' ,
AvailabilityZone = ' string ' ,
ProductDescription = ' Linux / UNIX ' | ' Linux / UNIX ( Amazon VPC ) ' | ' Windows ' | ' Windows ( Amazon VPC ) ' ,
Filters = [
' Name ' : ' string ' ,
' Values ' : [
' string ' ,
InstanceTenancy = ' default ' | ' dedicated ' | ' host ' ,
OfferingType = ' Heavy Utilization ' | ' Medium Utilization ' | ' Light Utilization ' | ' No Upfront ' | ' Partial Upfront ' | ' All Upfront ' ,
NextToken = ' string ' ,
MaxResults = 123,
IncludeMarketplace = True | False ,
MinDuration = 123,
MaxDuration = 123,
MaxInstanceCount = 123,
OfferingClass = ' standard ' | ' convertible '
: type DryRun : boolean
: param DryRun : Checks whether you have the required permissions for the action , without actually making the request , and provides an error response . If you have the required permissions , the error response is DryRunOperation . Otherwise , it is UnauthorizedOperation .
: type ReservedInstancesOfferingIds : list
: param ReservedInstancesOfferingIds : One or more Reserved Instances offering IDs .
( string ) - -
: type InstanceType : string
: param InstanceType : The instance type that the reservation will cover ( for example , m1 . small ) . For more information , see Instance Types in the Amazon Elastic Compute Cloud User Guide .
: type AvailabilityZone : string
: param AvailabilityZone : The Availability Zone in which the Reserved Instance can be used .
: type ProductDescription : string
: param ProductDescription : The Reserved Instance product platform description . Instances that include ( Amazon VPC ) in the description are for use with Amazon VPC .
: type Filters : list
: param Filters : One or more filters .
availability - zone - The Availability Zone where the Reserved Instance can be used .
duration - The duration of the Reserved Instance ( for example , one year or three years ) , in seconds ( 31536000 | 94608000 ) .
fixed - price - The purchase price of the Reserved Instance ( for example , 9800.0 ) .
instance - type - The instance type that is covered by the reservation .
marketplace - Set to true to show only Reserved Instance Marketplace offerings . When this filter is not used , which is the default behavior , all offerings from both AWS and the Reserved Instance Marketplace are listed .
product - description - The Reserved Instance product platform description . Instances that include ( Amazon VPC ) in the product platform description will only be displayed to EC2 - Classic account holders and are for use with Amazon VPC . ( Linux / UNIX | Linux / UNIX ( Amazon VPC ) | SUSE Linux | SUSE Linux ( Amazon VPC ) | Red Hat Enterprise Linux | Red Hat Enterprise Linux ( Amazon VPC ) | Windows | Windows ( Amazon VPC ) | Windows with SQL Server Standard | Windows with SQL Server Standard ( Amazon VPC ) | Windows with SQL Server Web | Windows with SQL Server Web ( Amazon VPC ) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise ( Amazon VPC ) )
reserved - instances - offering - id - The Reserved Instances offering ID .
scope - The scope of the Reserved Instance ( Availability Zone or Region ) .
usage - price - The usage price of the Reserved Instance , per hour ( for example , 0.84 ) .
( dict ) - - A filter name and value pair that is used to return a more specific list of results . Filters can be used to match a set of resources by various criteria , such as tags , attributes , or IDs .
Name ( string ) - - The name of the filter . Filter names are case - sensitive .
Values ( list ) - - One or more filter values . Filter values are case - sensitive .
( string ) - -
: type InstanceTenancy : string
: param InstanceTenancy : The tenancy of the instances covered by the reservation . A Reserved Instance with a tenancy of dedicated is applied to instances that run in a VPC on single - tenant hardware ( i . e . , Dedicated Instances ) .
Important : The host value cannot be used with this parameter . Use the default or dedicated values only .
Default : default
: type OfferingType : string
: param OfferingType : The Reserved Instance offering type . If you are using tools that predate the 2011-11-01 API version , you only have access to the Medium Utilization Reserved Instance offering type .
: type NextToken : string
: param NextToken : The token to retrieve the next page of results .
: type MaxResults : integer
: param MaxResults : The maximum number of results to return for the request in a single page . The remaining results of the initial request can be seen by sending another request with the returned NextToken value . The maximum is 100.
Default : 100
: type IncludeMarketplace : boolean
: param IncludeMarketplace : Include Reserved Instance Marketplace offerings in the response .
: type MinDuration : integer
: param MinDuration : The minimum duration ( in seconds ) to filter when searching for offerings .
Default : 2592000 ( 1 month )
: type MaxDuration : integer
: param MaxDuration : The maximum duration ( in seconds ) to filter when searching for offerings .
Default : 94608000 ( 3 years )
: type MaxInstanceCount : integer
: param MaxInstanceCount : The maximum number of instances to filter when searching for offerings .
Default : 20
: type OfferingClass : string
: param OfferingClass : The offering class of the Reserved Instance . Can be standard or convertible .
: rtype : dict
: return : {
' ReservedInstancesOfferings ' : [
' ReservedInstancesOfferingId ' : ' string ' ,
' InstanceType ' : ' t1 . micro ' | ' t2 . nano ' | ' t2 . micro ' | ' t2 . small ' | ' t2 . medium ' | ' t2 . large ' | ' t2 . xlarge ' | ' t2.2xlarge ' | ' m1 . small ' | ' m1 . medium ' | ' m1 . large ' | ' m1 . xlarge ' | ' m3 . medium ' | ' m3 . large ' | ' m3 . xlarge ' | ' m3.2xlarge ' | ' m4 . large ' | ' m4 . xlarge ' | ' m4.2xlarge ' | ' m4.4xlarge ' | ' m4.10xlarge ' | ' m4.16xlarge ' | ' m2 . xlarge ' | ' m2.2xlarge ' | ' m2.4xlarge ' | ' cr1.8xlarge ' | ' r3 . large ' | ' r3 . xlarge ' | ' r3.2xlarge ' | ' r3.4xlarge ' | ' r3.8xlarge ' | ' r4 . large ' | ' r4 . xlarge ' | ' r4.2xlarge ' | ' r4.4xlarge ' | ' r4.8xlarge ' | ' r4.16xlarge ' | ' x1.16xlarge ' | ' x1.32xlarge ' | ' i2 . xlarge ' | ' i2.2xlarge ' | ' i2.4xlarge ' | ' i2.8xlarge ' | ' i3 . large ' | ' i3 . xlarge ' | ' i3.2xlarge ' | ' i3.4xlarge ' | ' i3.8xlarge ' | ' i3.16xlarge ' | ' hi1.4xlarge ' | ' hs1.8xlarge ' | ' c1 . medium ' | ' c1 . xlarge ' | ' c3 . large ' | ' c3 . xlarge ' | ' c3.2xlarge ' | ' c3.4xlarge ' | ' c3.8xlarge ' | ' c4 . large ' | ' c4 . xlarge ' | ' c4.2xlarge ' | ' c4.4xlarge ' | ' c4.8xlarge ' | ' cc1.4xlarge ' | ' cc2.8xlarge ' | ' g2.2xlarge ' | ' g2.8xlarge ' | ' cg1.4xlarge ' | ' p2 . xlarge ' | ' p2.8xlarge ' | ' p2.16xlarge ' | ' d2 . xlarge ' | ' d2.2xlarge ' | ' d2.4xlarge ' | ' d2.8xlarge ' | ' f1.2xlarge ' | ' f1.16xlarge ' ,
' AvailabilityZone ' : ' string ' ,
' Duration ' : 123,
' UsagePrice ' : . . . ,
' FixedPrice ' : . . . ,
' ProductDescription ' : ' Linux / UNIX ' | ' Linux / UNIX ( Amazon VPC ) ' | ' Windows ' | ' Windows ( Amazon VPC ) ' ,
' InstanceTenancy ' : ' default ' | ' dedicated ' | ' host ' ,
' CurrencyCode ' : ' USD ' ,
' OfferingType ' : ' Heavy Utilization ' | ' Medium Utilization ' | ' Light Utilization ' | ' No Upfront ' | ' Partial Upfront ' | ' All Upfront ' ,
' RecurringCharges ' : [
' Frequency ' : ' Hourly ' ,
' Amount ' : 123.0
' Marketplace ' : True | False ,
' PricingDetails ' : [
' Price ' : 123.0,
' Count ' : 123
' OfferingClass ' : ' standard ' | ' convertible ' ,
' Scope ' : ' Availability Zone ' | ' Region '
' NextToken ' : ' string '""" | pass |
def wv45 ( msg ) :
"""Wake vortex .
Args :
msg ( String ) : 28 bytes hexadecimal message string
Returns :
int : Wake vortex level . 0 = NIL , 1 = Light , 2 = Moderate , 3 = Severe""" | d = hex2bin ( data ( msg ) )
if d [ 12 ] == '0' :
return None
ws = bin2int ( d [ 13 : 15 ] )
return ws |
def _get_magnitude_term ( self , C , mag ) :
"""Returns the magnitude scaling term provided in Equation ( 5)""" | dmag = mag - 8.0
return C [ "c0" ] + C [ "c3" ] * dmag + C [ "c4" ] * ( dmag ** 2. ) |
def find_videos_by_playlist ( self , playlist_id , page = 1 , count = 20 ) :
"""doc : http : / / open . youku . com / docs / doc ? id = 71""" | url = 'https://openapi.youku.com/v2/playlists/videos.json'
params = { 'client_id' : self . client_id , 'playlist_id' : playlist_id , 'page' : page , 'count' : count }
r = requests . get ( url , params = params )
check_error ( r )
return r . json ( ) |
def auto_migrate_storage_system ( * , persistent_storage_system = None , new_persistent_storage_system = None , data_item_uuids = None , deletions : typing . List [ uuid . UUID ] = None , utilized_deletions : typing . Set [ uuid . UUID ] = None , ignore_older_files : bool = True ) :
"""Migrate items from the storage system to the object context .
Files in data _ item _ uuids have already been loaded and are ignored ( not migrated ) .
Files in deletes have been deleted in object context and are ignored ( not migrated ) and then added
to the utilized deletions list .
Data items will have persistent _ object _ context set upon return , but caller will need to call finish _ reading
on each of the data items .""" | storage_handlers = persistent_storage_system . find_data_items ( )
ReaderInfo = collections . namedtuple ( "ReaderInfo" , [ "properties" , "changed_ref" , "large_format" , "storage_handler" , "identifier" ] )
reader_info_list = list ( )
for storage_handler in storage_handlers :
try :
large_format = isinstance ( storage_handler , HDF5Handler . HDF5Handler )
properties = Migration . transform_to_latest ( storage_handler . read_properties ( ) )
reader_info = ReaderInfo ( properties , [ False ] , large_format , storage_handler , storage_handler . reference )
reader_info_list . append ( reader_info )
except Exception as e :
logging . debug ( "Error reading %s" , storage_handler . reference )
import traceback
traceback . print_exc ( )
traceback . print_stack ( )
library_storage_properties = persistent_storage_system . library_storage_properties
for deletion in copy . deepcopy ( library_storage_properties . get ( "data_item_deletions" , list ( ) ) ) :
if not deletion in deletions :
deletions . append ( deletion )
preliminary_library_updates = dict ( )
library_updates = dict ( )
if not ignore_older_files :
Migration . migrate_to_latest ( reader_info_list , preliminary_library_updates )
good_reader_info_list = list ( )
count = len ( reader_info_list )
for index , reader_info in enumerate ( reader_info_list ) :
storage_handler = reader_info . storage_handler
properties = reader_info . properties
try :
version = properties . get ( "version" , 0 )
if version == DataItem . DataItem . writer_version :
data_item_uuid = uuid . UUID ( properties [ "uuid" ] )
if not data_item_uuid in data_item_uuids :
if str ( data_item_uuid ) in deletions :
utilized_deletions . add ( data_item_uuid )
else :
auto_migrate_data_item ( reader_info , persistent_storage_system , new_persistent_storage_system , index , count )
good_reader_info_list . append ( reader_info )
data_item_uuids . add ( data_item_uuid )
library_update = preliminary_library_updates . get ( data_item_uuid )
if library_update :
library_updates [ data_item_uuid ] = library_update
except Exception as e :
logging . debug ( "Error reading %s" , storage_handler . reference )
import traceback
traceback . print_exc ( )
traceback . print_stack ( )
return good_reader_info_list , library_updates |
def find_contiguous_packing_segments ( polypeptide , residues , max_dist = 10.0 ) :
"""Assembly containing segments of polypeptide , divided according to separation of contiguous residues .
Parameters
polypeptide : Polypeptide
residues : iterable containing Residues
max _ dist : float
Separation beyond which splitting of Polymer occurs .
Returns
segments : Assembly
Each segment contains a subset of residues , each not separated by more than max _ dist from the previous Residue .""" | segments = Assembly ( assembly_id = polypeptide . ampal_parent . id )
residues_in_polypeptide = list ( sorted ( residues . intersection ( set ( polypeptide . get_monomers ( ) ) ) , key = lambda x : int ( x . id ) ) )
if not residues_in_polypeptide :
return segments
# residue _ pots contains separate pots of residues divided according to their separation distance .
residue_pots = [ ]
pot = [ residues_in_polypeptide [ 0 ] ]
for r1 , r2 in zip ( residues_in_polypeptide , residues_in_polypeptide [ 1 : ] ) :
d = distance ( r1 [ 'CA' ] , r2 [ 'CA' ] )
if d <= max_dist :
pot . append ( r2 )
if sum ( [ len ( x ) for x in residue_pots ] + [ len ( pot ) ] ) == len ( residues_in_polypeptide ) :
residue_pots . append ( pot )
else :
residue_pots . append ( pot )
pot = [ r2 ]
for pot in residue_pots :
segment = polypeptide . get_slice_from_res_id ( pot [ 0 ] . id , pot [ - 1 ] . id )
segment . ampal_parent = polypeptide . ampal_parent
segments . append ( segment )
return segments |
def read_memory_block32 ( self , addr , size ) :
"""read a block of aligned words in memory . Returns
an array of word values""" | data = self . ap . read_memory_block32 ( addr , size )
return self . bp_manager . filter_memory_aligned_32 ( addr , size , data ) |
def add_records ( self , domain , records ) :
"""Adds the records to this domain . Each record should be a dict with the
following keys :
- type ( required )
- name ( required )
- data ( required )
- ttl ( optional )
- comment ( optional )
- priority ( required for MX and SRV records ; forbidden otherwise )""" | if isinstance ( records , dict ) : # Single record passed
records = [ records ]
dom_id = utils . get_id ( domain )
uri = "/domains/%s/records" % dom_id
body = { "records" : records }
resp , resp_body = self . _async_call ( uri , method = "POST" , body = body , error_class = exc . DomainRecordAdditionFailed , has_response = False )
records = resp_body . get ( "response" , { } ) . get ( "records" , [ ] )
for record in records :
record [ "domain_id" ] = dom_id
return [ CloudDNSRecord ( self , record , loaded = False ) for record in records if record ] |
def create_argparser ( self ) :
"""Factory for arg parser . Can be overridden as long as it returns
an ArgParser compatible instance .""" | if self . desc :
if self . title :
fulldesc = '%s\n\n%s' % ( self . title , self . desc )
else :
fulldesc = self . desc
else :
fulldesc = self . title
return self . ArgumentParser ( command = self , prog = self . name , description = fulldesc ) |
def add_classification_events ( obj , events , labels , signal_label = None , weights = None , test = False ) :
"""Add classification events to a TMVA : : Factory or TMVA : : DataLoader from NumPy arrays .
Parameters
obj : TMVA : : Factory or TMVA : : DataLoader
A TMVA : : Factory or TMVA : : DataLoader ( TMVA ' s interface as of ROOT
6.07/04 ) instance with variables already booked in exactly the same
order as the columns in ` ` events ` ` .
events : numpy array of shape [ n _ events , n _ variables ]
A two - dimensional NumPy array containing the rows of events and columns
of variables . The order of the columns must match the order in which
you called ` ` AddVariable ( ) ` ` for each variable .
labels : numpy array of shape [ n _ events ]
The class labels ( signal or background ) corresponding to each event in
` ` events ` ` .
signal _ label : float or int , optional ( default = None )
The value in ` ` labels ` ` for signal events , if ` ` labels ` ` contains only
two classes . If None , the highest value in ` ` labels ` ` is used .
weights : numpy array of shape [ n _ events ] , optional
Event weights .
test : bool , optional ( default = False )
If True , then the events will be added as test events , otherwise they
are added as training events by default .
Notes
* A TMVA : : Factory or TMVA : : DataLoader requires you to add both training and
test events even if you don ' t intend to call ` ` TestAllMethods ( ) ` ` .
* When using MethodCuts , the first event added must be a signal event ,
otherwise TMVA will fail with ` ` < FATAL > Interval : maximum lower than
minimum ` ` . To place a signal event first : :
# Get index of first signal event
first _ signal = np . nonzero ( labels = = signal _ label ) [ 0 ] [ 0]
# Swap this with first event
events [ 0 ] , events [ first _ signal ] = events [ first _ signal ] . copy ( ) , events [ 0 ] . copy ( )
labels [ 0 ] , labels [ first _ signal ] = labels [ first _ signal ] , labels [ 0]
weights [ 0 ] , weights [ first _ signal ] = weights [ first _ signal ] , weights [ 0]""" | if NEW_TMVA_API : # pragma : no cover
if not isinstance ( obj , TMVA . DataLoader ) :
raise TypeError ( "obj must be a TMVA.DataLoader " "instance for ROOT >= 6.07/04" )
else : # pragma : no cover
if not isinstance ( obj , TMVA . Factory ) :
raise TypeError ( "obj must be a TMVA.Factory instance" )
events = np . ascontiguousarray ( events , dtype = np . float64 )
if events . ndim == 1 : # convert to 2D
events = events [ : , np . newaxis ]
elif events . ndim != 2 :
raise ValueError ( "events must be a two-dimensional array " "with one event per row" )
class_labels , class_idx = np . unique ( labels , return_inverse = True )
if class_idx . shape [ 0 ] != events . shape [ 0 ] :
raise ValueError ( "numbers of events and labels do not match" )
if weights is not None :
weights = np . asarray ( weights , dtype = np . float64 )
if weights . shape [ 0 ] != events . shape [ 0 ] :
raise ValueError ( "numbers of events and weights do not match" )
if weights . ndim != 1 :
raise ValueError ( "weights must be one-dimensional" )
n_classes = class_labels . shape [ 0 ]
if n_classes > 2 : # multiclass classification
_libtmvanumpy . add_events_multiclass ( ROOT . AsCObject ( obj ) , events , class_idx , weights , test )
elif n_classes == 2 : # binary classification
if signal_label is None :
signal_label = class_labels [ 1 ]
signal_label = np . where ( class_labels == signal_label ) [ 0 ] [ 0 ]
_libtmvanumpy . add_events_twoclass ( ROOT . AsCObject ( obj ) , events , class_idx , signal_label , weights , test )
else :
raise ValueError ( "labels must contain at least two classes" ) |
def default_security_rule_get ( name , security_group , resource_group , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Get details about a default security rule within a security group .
: param name : The name of the security rule to query .
: param security _ group : The network security group containing the
security rule .
: param resource _ group : The resource group name assigned to the
network security group .
CLI Example :
. . code - block : : bash
salt - call azurearm _ network . default _ security _ rule _ get DenyAllOutBound testnsg testgroup''' | result = { }
default_rules = default_security_rules_list ( security_group = security_group , resource_group = resource_group , ** kwargs )
if isinstance ( default_rules , dict ) and 'error' in default_rules :
return default_rules
try :
for default_rule in default_rules :
if default_rule [ 'name' ] == name :
result = default_rule
if not result :
result = { 'error' : 'Unable to find {0} in {1}!' . format ( name , security_group ) }
except KeyError as exc :
log . error ( 'Unable to find %s in %s!' , name , security_group )
result = { 'error' : str ( exc ) }
return result |
def parse_names_and_default ( self ) :
"""parse for ` parse _ content `
{ title : [ ( ' - a , - - all = STH ' , ' default ' ) , . . . ] }""" | result = { }
for title , text in self . formal_content . items ( ) :
if not text :
result [ title ] = [ ]
continue
logger . debug ( '\n' + text )
collect = [ ]
to_list = text . splitlines ( )
# parse first line . Should NEVER failed .
# this will ensure in ` [ default : xxx ] ` ,
# the ` xxx ` ( e . g : ` \ t ` , ` , ` ) will not be changed by _ format _ line
previous_line = to_list . pop ( 0 )
collect . append ( self . parse_line_option_indent ( previous_line ) )
for line in to_list :
indent_match = self . indent_re . match ( line )
this_indent = len ( indent_match . groupdict ( ) [ 'indent' ] )
if this_indent >= collect [ - 1 ] [ 'indent' ] : # A multi line description
previous_line = line
continue
# new option line
# deal the default for previous option
collect [ - 1 ] [ 'default' ] = self . parse_default ( previous_line )
# deal this option
collect . append ( self . parse_line_option_indent ( line ) )
logger . debug ( collect [ - 1 ] )
previous_line = line
else :
collect [ - 1 ] [ 'default' ] = self . parse_default ( previous_line )
result [ title ] = [ ( each [ 'option' ] , each [ 'default' ] ) for each in collect ]
return result |
def remove ( key , val , delimiter = DEFAULT_TARGET_DELIM ) :
'''. . versionadded : : 0.17.0
Remove a value from a list in the grains config file
key
The grain key to remove .
val
The value to remove .
delimiter
The key can be a nested dict key . Use this parameter to
specify the delimiter you use , instead of the default ` ` : ` ` .
You can now append values to a list in nested dictionary grains . If the
list doesn ' t exist at this level , it will be created .
. . versionadded : : 2015.8.2
CLI Example :
. . code - block : : bash
salt ' * ' grains . remove key val''' | grains = get ( key , [ ] , delimiter )
if not isinstance ( grains , list ) :
return 'The key {0} is not a valid list' . format ( key )
if val not in grains :
return 'The val {0} was not in the list {1}' . format ( val , key )
grains . remove ( val )
while delimiter in key :
key , rest = key . rsplit ( delimiter , 1 )
_grain = get ( key , None , delimiter )
if isinstance ( _grain , dict ) :
_grain . update ( { rest : grains } )
grains = _grain
return setval ( key , grains ) |
def tags ( self ) :
"""The image ' s tags .""" | tags = self . attrs . get ( 'RepoTags' )
if tags is None :
tags = [ ]
return [ tag for tag in tags if tag != '<none>:<none>' ] |
def c_filler ( self , frequency ) :
'''Capacitance of an electrode covered in filler media ( e . g . , air or oil ) ,
normalized per unit area ( i . e . , units are F / mm ^ 2 ) .''' | try :
return np . interp ( frequency , self . _c_filler [ 'frequency' ] , self . _c_filler [ 'capacitance' ] )
except :
pass
return self . _c_filler |
def period ( self , value : float ) :
"""Set the period .
Args :
value ( float ) : seconds""" | if value < 0 :
raise ValueError ( "Period must be greater or equal than zero." )
self . _period = timedelta ( seconds = value ) |
def hasDefault ( self , param ) :
"""Checks whether a param has a default value .""" | param = self . _resolveParam ( param )
return param in self . _defaultParamMap |
def _parse_json ( self , response , exactly_one = True ) :
"""Parse responses as JSON objects .""" | if not len ( response ) :
return None
if exactly_one :
return self . _format_structured_address ( response [ 0 ] )
else :
return [ self . _format_structured_address ( c ) for c in response ] |
def _handle_load_unknown ( self , data , original ) :
"""Preserve unknown keys during deserialization .""" | for key , val in original . items ( ) :
if key not in self . fields :
data [ key ] = val
return data |
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog ( ) :
"""Set of hyperparameters .""" | hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales ( )
hparams . batch_size = 2048
hparams . max_length = 1024
hparams . filter_size = 3072
return hparams |
def find_first_available_template ( self , template_name_list ) :
"""Given a list of template names , find the first one that actually exists
and is available .""" | if isinstance ( template_name_list , six . string_types ) :
return template_name_list
else : # Take advantage of fluent _ pages ' internal implementation
return _select_template_name ( template_name_list ) |
def epsilon_crit ( self ) :
"""returns the critical projected mass density in units of M _ sun / Mpc ^ 2 ( physical units )""" | const_SI = const . c ** 2 / ( 4 * np . pi * const . G )
# c ^ 2 / ( 4 * pi * G ) in units of [ kg / m ]
conversion = const . Mpc / const . M_sun
# converts [ kg / m ] to [ M _ sun / Mpc ]
pre_const = const_SI * conversion
# c ^ 2 / ( 4 * pi * G ) in units of [ M _ sun / Mpc ]
Epsilon_Crit = self . D_s / ( self . D_d * self . D_ds ) * pre_const
# [ M _ sun / Mpc ^ 2]
return Epsilon_Crit |
def new_from_url ( cls , url , verify = True ) :
"""Constructs a new WebPage object for the URL ,
using the ` requests ` module to fetch the HTML .
Parameters
url : str
verify : bool""" | response = requests . get ( url , verify = verify , timeout = 2.5 )
return cls . new_from_response ( response ) |
def add_default_options ( self , optprs ) :
"""Adds the default reference viewer startup options to an
OptionParser instance ` optprs ` .""" | optprs . add_option ( "--bufsize" , dest = "bufsize" , metavar = "NUM" , type = "int" , default = 10 , help = "Buffer length to NUM" )
optprs . add_option ( '-c' , "--channels" , dest = "channels" , help = "Specify list of channels to create" )
optprs . add_option ( "--debug" , dest = "debug" , default = False , action = "store_true" , help = "Enter the pdb debugger on main()" )
optprs . add_option ( "--disable-plugins" , dest = "disable_plugins" , metavar = "NAMES" , help = "Specify plugins that should be disabled" )
optprs . add_option ( "--display" , dest = "display" , metavar = "HOST:N" , help = "Use X display on HOST:N" )
optprs . add_option ( "--fitspkg" , dest = "fitspkg" , metavar = "NAME" , default = None , help = "Prefer FITS I/O module NAME" )
optprs . add_option ( "-g" , "--geometry" , dest = "geometry" , default = None , metavar = "GEOM" , help = "X geometry for initial size and placement" )
optprs . add_option ( "--modules" , dest = "modules" , metavar = "NAMES" , help = "Specify additional modules to load" )
optprs . add_option ( "--norestore" , dest = "norestore" , default = False , action = "store_true" , help = "Don't restore the GUI from a saved layout" )
optprs . add_option ( "--nosplash" , dest = "nosplash" , default = False , action = "store_true" , help = "Don't display the splash screen" )
optprs . add_option ( "--numthreads" , dest = "numthreads" , type = "int" , default = 30 , metavar = "NUM" , help = "Start NUM threads in thread pool" )
optprs . add_option ( "--opencv" , dest = "opencv" , default = False , action = "store_true" , help = "Use OpenCv acceleration" )
optprs . add_option ( "--opencl" , dest = "opencl" , default = False , action = "store_true" , help = "Use OpenCL acceleration" )
optprs . add_option ( "--plugins" , dest = "plugins" , metavar = "NAMES" , help = "Specify additional plugins to load" )
optprs . add_option ( "--profile" , dest = "profile" , action = "store_true" , default = False , help = "Run the profiler on main()" )
optprs . add_option ( "--sep" , dest = "separate_channels" , default = False , action = "store_true" , help = "Load files in separate channels" )
optprs . add_option ( "-t" , "--toolkit" , dest = "toolkit" , metavar = "NAME" , default = None , help = "Prefer GUI toolkit (gtk|qt)" )
optprs . add_option ( "--wcspkg" , dest = "wcspkg" , metavar = "NAME" , default = None , help = "Prefer WCS module NAME" )
log . addlogopts ( optprs ) |
def getchar ( echo = False ) :
"""Fetches a single character from the terminal and returns it . This
will always return a unicode character and under certain rare
circumstances this might return more than one character . The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal .
Note that this will always read from the terminal , even if something
is piped into the standard input .
Note for Windows : in rare cases when typing non - ASCII characters , this
function might wait for a second character and then return both at once .
This is because certain Unicode characters look like special - key markers .
. . versionadded : : 2.0
: param echo : if set to ` True ` , the character read will also show up on
the terminal . The default is to not show it .""" | f = _getchar
if f is None :
from . _termui_impl import getchar as f
return f ( echo ) |
def create_header_from_parent ( self , parent_header : BlockHeader , ** header_params : HeaderParams ) -> BlockHeader :
"""Passthrough helper to the VM class of the block descending from the
given header .""" | return self . get_vm_class_for_block_number ( block_number = parent_header . block_number + 1 , ) . create_header_from_parent ( parent_header , ** header_params ) |
def set_debug_mode ( debug = True ) :
"""Set the global debug mode .""" | global debug_mode
global _setting_keep_wirevector_call_stack
global _setting_slower_but_more_descriptive_tmps
debug_mode = debug
_setting_keep_wirevector_call_stack = debug
_setting_slower_but_more_descriptive_tmps = debug |
def get_plc_datetime ( self ) :
"""Get date and time from PLC .
: return : date and time as datetime""" | type_ = c_int32
buffer = ( type_ * 9 ) ( )
result = self . library . Cli_GetPlcDateTime ( self . pointer , byref ( buffer ) )
check_error ( result , context = "client" )
return datetime ( year = buffer [ 5 ] + 1900 , month = buffer [ 4 ] + 1 , day = buffer [ 3 ] , hour = buffer [ 2 ] , minute = buffer [ 1 ] , second = buffer [ 0 ] ) |
def get_storage_conn ( storage_account = None , storage_key = None , conn_kwargs = None ) :
'''. . versionadded : : 2015.8.0
Return a storage _ conn object for the storage account''' | if conn_kwargs is None :
conn_kwargs = { }
if not storage_account :
storage_account = config . get_cloud_config_value ( 'storage_account' , get_configured_provider ( ) , __opts__ , search_global = False , default = conn_kwargs . get ( 'storage_account' , None ) )
if not storage_key :
storage_key = config . get_cloud_config_value ( 'storage_key' , get_configured_provider ( ) , __opts__ , search_global = False , default = conn_kwargs . get ( 'storage_key' , None ) )
return azure . storage . BlobService ( storage_account , storage_key ) |
def feed ( self , weights , data ) :
"""Evaluate the network with alternative weights on the input data and
return the output activation .""" | assert len ( data ) == self . layers [ 0 ] . size
self . layers [ 0 ] . apply ( data )
# Propagate trough the remaining layers .
connections = zip ( self . layers [ : - 1 ] , weights , self . layers [ 1 : ] )
for previous , weight , current in connections :
incoming = self . forward ( weight , previous . outgoing )
current . apply ( incoming )
# Return the activations of the output layer .
return self . layers [ - 1 ] . outgoing |
def batch ( self , reqs ) :
"""send batch request using jsonrpc 2.0""" | batch_data = [ ]
for req_id , req in enumerate ( reqs ) :
batch_data . append ( { "method" : req [ 0 ] , "params" : req [ 1 ] , "jsonrpc" : "2.0" , "id" : req_id } )
data = json . dumps ( batch_data )
response = self . session . post ( self . url , data = data ) . json ( )
return response |
def getGolangPackages ( self ) :
"""Get a list of all golang packages for all available branches""" | packages = { }
# get all packages
url = "%s/packages" % self . base_url
params = { "pattern" : "golang-*" , "limit" : 200 }
response = requests . get ( url , params = params )
if response . status_code != requests . codes . ok :
return { }
data = response . json ( )
for package in data [ "packages" ] :
packages [ package [ "name" ] ] = self . _processPackageData ( package )
# accumulate packages from all pages
for page in range ( 2 , data [ "page_total" ] + 1 ) :
params = { "pattern" : "golang-*" , "limit" : 200 , "page" : page }
response = requests . get ( url , params = params )
if response . status_code != requests . codes . ok :
continue
data = response . json ( )
for package in data [ "packages" ] :
packages [ package [ "name" ] ] = self . _processPackageData ( package )
# get branches of all packages
MAX_LEN = 30
# break the list of packages into lists of at most 50 packages
package_names = packages . keys ( )
packages_total = len ( package_names )
packages_counter = 0
logger . info ( "%s packages to process" % packages_total )
for i in range ( 0 , packages_total , MAX_LEN ) :
sublist = package_names [ i : i + MAX_LEN ]
branches = self . _getPackageBranches ( sublist )
for package in sublist :
packages [ package ] [ "branches" ] = branches [ package ]
packages_counter = packages_counter + len ( branches )
logger . info ( "%s/%s packages processed" % ( packages_counter , packages_total ) )
return packages |
def ready ( self ) :
"""Called once per Django process instance .
If the filesystem setup fails or if an error is found in settings . py ,
django . core . exceptions . ImproperlyConfigured is raised , causing Django not to
launch the main GMN app .""" | # Stop the startup code from running automatically from pytest unit tests .
# When running tests in parallel with xdist , an instance of GMN is launched
# before thread specific settings have been applied .
# if hasattr ( sys , ' _ launched _ by _ pytest ' ) :
# return
self . _assert_readable_file_if_set ( 'CLIENT_CERT_PATH' )
self . _assert_readable_file_if_set ( 'CLIENT_CERT_PRIVATE_KEY_PATH' )
self . _assert_dirs_exist ( 'OBJECT_FORMAT_CACHE_PATH' )
self . _assert_is_type ( 'SCIMETA_VALIDATION_ENABLED' , bool )
self . _assert_is_type ( 'SCIMETA_VALIDATION_MAX_SIZE' , int )
self . _assert_is_in ( 'SCIMETA_VALIDATION_OVER_SIZE_ACTION' , ( 'reject' , 'accept' ) )
self . _warn_unsafe_for_prod ( )
self . _check_resource_map_create ( )
if not d1_gmn . app . sciobj_store . is_existing_store ( ) :
self . _create_sciobj_store_root ( )
self . _add_xslt_mimetype ( ) |
def _get_library_metadata ( self , date_range ) :
"""Retrieve the libraries for the given date range , the assumption is that the date ranges do not overlap and
they are CLOSED _ CLOSED .
At the moment the date range is mandatory""" | if date_range is None :
raise Exception ( "A date range must be provided" )
if not ( date_range . start and date_range . end ) :
raise Exception ( "The date range {0} must contain a start and end date" . format ( date_range ) )
start = date_range . start if date_range . start . tzinfo is not None else date_range . start . replace ( tzinfo = mktz ( ) )
end = date_range . end if date_range . end . tzinfo is not None else date_range . end . replace ( tzinfo = mktz ( ) )
query = { '$or' : [ { 'start' : { '$lte' : start } , 'end' : { '$gte' : start } } , { 'start' : { '$gte' : start } , 'end' : { '$lte' : end } } , { 'start' : { '$lte' : end } , 'end' : { '$gte' : end } } ] }
cursor = self . _collection . find ( query , projection = { 'library_name' : 1 , 'start' : 1 , 'end' : 1 } , sort = [ ( 'start' , pymongo . ASCENDING ) ] )
results = [ ]
for res in cursor :
start = res [ 'start' ]
if date_range . start . tzinfo is not None and start . tzinfo is None :
start = start . replace ( tzinfo = mktz ( "UTC" ) ) . astimezone ( tz = date_range . start . tzinfo )
end = res [ 'end' ]
if date_range . end . tzinfo is not None and end . tzinfo is None :
end = end . replace ( tzinfo = mktz ( "UTC" ) ) . astimezone ( tz = date_range . end . tzinfo )
results . append ( TickStoreLibrary ( res [ 'library_name' ] , DateRange ( start , end , CLOSED_CLOSED ) ) )
return results |
def interface_direct_csvpath ( csvpath ) :
"""help to direct to the correct interface interacting with DB by csvfile path""" | with open ( csvpath ) as csvfile :
reader = csv . DictReader ( csvfile )
for row in reader :
data_class = row . pop ( 'amaasclass' , '' )
return interface_direct_class ( data_class ) |
def __on_presence ( self , data ) :
"""Got a presence stanza""" | room_jid = data [ 'from' ] . bare
muc_presence = data [ 'muc' ]
room = muc_presence [ 'room' ]
nick = muc_presence [ 'nick' ]
with self . __lock :
try : # Get room state machine
room_data = self . __rooms [ room ]
if room_data . nick != nick : # Not about the room creator
return
except KeyError : # Unknown room ( or not a room )
return
else : # Clean up , as we got what we wanted
del self . __rooms [ room ]
if not self . __rooms : # No more rooms : no need to listen to presence anymore
self . __xmpp . del_event_handler ( "presence" , self . __on_presence )
if data [ 'type' ] == 'error' : # Got an error : update the state machine and clean up
self . __safe_errback ( room_data , data [ 'error' ] [ 'condition' ] , data [ 'error' ] [ 'text' ] )
elif muc_presence [ 'affiliation' ] != 'owner' : # We are not the owner the room : consider it an error
self . __safe_errback ( room_data , 'not-owner' , 'We are not the owner of the room' )
else : # Success : we own the room
# Setup room configuration
try :
config = self . __muc . getRoomConfig ( room_jid )
except ValueError : # Can ' t differentiate IQ errors from a " no configuration "
# result : consider it OK
self . __logger . warning ( "Can't get the configuration form for " "XMPP room %s" , room_jid )
self . __safe_callback ( room_data )
else : # Prepare our configuration
custom_values = room_data . configuration or { }
# Filter options that are not known from the server
known_fields = config [ 'fields' ]
to_remove = [ key for key in custom_values if key not in known_fields ]
for key in to_remove :
del custom_values [ key ]
# Send configuration ( use a new form to avoid OpenFire to have
# an internal error )
form = self . __xmpp [ 'xep_0004' ] . make_form ( "submit" )
form [ 'values' ] = custom_values
self . __muc . setRoomConfig ( room_jid , form )
# Call back the creator
self . __safe_callback ( room_data ) |
def set ( self , value ) :
"""Sets the value of the object
: param value :
An integer or a tuple of integers 0 and 1
: raises :
ValueError - when an invalid value is passed""" | if isinstance ( value , set ) :
if self . _map is None :
raise ValueError ( unwrap ( '''
%s._map has not been defined
''' , type_name ( self ) ) )
bits = [ 0 ] * self . _size
self . _native = value
for index in range ( 0 , self . _size ) :
key = self . _map . get ( index )
if key is None :
continue
if key in value :
bits [ index ] = 1
value = '' . join ( map ( str_cls , bits ) )
elif value . __class__ == tuple :
if self . _map is None :
self . _native = value
else :
self . _native = set ( )
for index , bit in enumerate ( value ) :
if bit :
name = self . _map . get ( index , index )
self . _native . add ( name )
value = '' . join ( map ( str_cls , value ) )
else :
raise TypeError ( unwrap ( '''
%s value must be a tuple of ones and zeros or a set of unicode
strings, not %s
''' , type_name ( self ) , type_name ( value ) ) )
self . _chunk = None
if self . _map is not None :
if len ( value ) > self . _size :
raise ValueError ( unwrap ( '''
%s value must be at most %s bits long, specified was %s long
''' , type_name ( self ) , self . _size , len ( value ) ) )
# A NamedBitList must have trailing zero bit truncated . See
# https : / / www . itu . int / ITU - T / studygroups / com17 / languages / X . 690-0207 . pdf
# section 11.2,
# https : / / tools . ietf . org / html / rfc5280 # page - 134 and
# https : / / www . ietf . org / mail - archive / web / pkix / current / msg10443 . html
value = value . rstrip ( '0' )
size = len ( value )
size_mod = size % 8
extra_bits = 0
if size_mod != 0 :
extra_bits = 8 - size_mod
value += '0' * extra_bits
size_in_bytes = int ( math . ceil ( size / 8 ) )
if extra_bits :
extra_bits_byte = int_to_bytes ( extra_bits )
else :
extra_bits_byte = b'\x00'
if value == '' :
value_bytes = b''
else :
value_bytes = int_to_bytes ( int ( value , 2 ) )
if len ( value_bytes ) != size_in_bytes :
value_bytes = ( b'\x00' * ( size_in_bytes - len ( value_bytes ) ) ) + value_bytes
self . contents = extra_bits_byte + value_bytes
self . _header = None
if self . _indefinite :
self . _indefinite = False
self . method = 0
if self . _trailer != b'' :
self . _trailer = b'' |
def check_dependee_order ( depender , dependee , dependee_id ) :
"""Checks whether run orders are in the appropriate order .""" | # If it depends on a module id , then the module id should be higher up
# in the run order .
shutit_global . shutit_global_object . yield_to_draw ( )
if dependee . run_order > depender . run_order :
return 'depender module id:\n\n' + depender . module_id + '\n\n(run order: ' + str ( depender . run_order ) + ') ' + 'depends on dependee module_id:\n\n' + dependee_id + '\n\n(run order: ' + str ( dependee . run_order ) + ') ' + 'but the latter is configured to run after the former'
return '' |
def grants ( self ) :
"""Retrieves the grants for this user . If the user is unrestricted , this
will result in an ApiError . This is smart , and will only fetch from the
api once unless the object is invalidated .
: returns : The grants for this user .
: rtype : linode . objects . account . UserGrants""" | from linode_api4 . objects . account import UserGrants
if not hasattr ( self , '_grants' ) :
resp = self . _client . get ( UserGrants . api_endpoint . format ( username = self . username ) )
grants = UserGrants ( self . _client , self . username , resp )
self . _set ( '_grants' , grants )
return self . _grants |
def _compare ( self , other , method ) :
"""see https : / / regebro . wordpress . com / 2010/12/13 / python - implementing - rich - comparison - the - correct - way /""" | # This needs to be updated to take uncertainty into account :
if isinstance ( other , abc_mapping_primitives . Coordinate ) :
if self . get_dimensions ( ) != other . get_dimensions ( ) :
return False
other_values = other . get_values ( )
for index in range ( self . _dimensions ) :
if not method ( self . _values [ index ] , other_values [ index ] ) :
return False
return True
return NotImplemented |
def parse_params ( self , nb_candidate = 10 , overshoot = 0.02 , max_iter = 50 , clip_min = 0. , clip_max = 1. , ** kwargs ) :
""": param nb _ candidate : The number of classes to test against , i . e . ,
deepfool only consider nb _ candidate classes when
attacking ( thus accelerate speed ) . The nb _ candidate
classes are chosen according to the prediction
confidence during implementation .
: param overshoot : A termination criterion to prevent vanishing updates
: param max _ iter : Maximum number of iteration for deepfool
: param clip _ min : Minimum component value for clipping
: param clip _ max : Maximum component value for clipping""" | self . nb_candidate = nb_candidate
self . overshoot = overshoot
self . max_iter = max_iter
self . clip_min = clip_min
self . clip_max = clip_max
if len ( kwargs . keys ( ) ) > 0 :
warnings . warn ( "kwargs is unused and will be removed on or after " "2019-04-26." )
return True |
def do_add_signature ( input_file , output_file , signature_file ) :
"""Add a signature to the MAR file .""" | signature = open ( signature_file , 'rb' ) . read ( )
if len ( signature ) == 256 :
hash_algo = 'sha1'
elif len ( signature ) == 512 :
hash_algo = 'sha384'
else :
raise ValueError ( )
with open ( output_file , 'w+b' ) as dst :
with open ( input_file , 'rb' ) as src :
add_signature_block ( src , dst , hash_algo , signature ) |
def read ( self , uri ) :
"""Method takes uri and creates a RDF graph from Fedora Repository
Args :
uri ( str ) : URI of Fedora URI
Returns :
rdflib . Graph""" | read_response = self . connect ( uri )
fedora_graph = rdflib . Graph ( ) . parse ( data = read_response . read ( ) , format = 'turtle' )
return fedora_graph |
def add_query ( self , sql , auto_begin = True , bindings = None , abridge_sql_log = False ) :
"""Add a query to the current transaction . A thin wrapper around
ConnectionManager . add _ query .
: param str sql : The SQL query to add
: param bool auto _ begin : If set and there is no transaction in progress ,
begin a new one .
: param Optional [ List [ object ] ] : An optional list of bindings for the
query .
: param bool abridge _ sql _ log : If set , limit the raw sql logged to 512
characters""" | return self . connections . add_query ( sql , auto_begin , bindings , abridge_sql_log ) |
def load ( self ) :
"""Load the definition of the rule , searching in the specified rule dirs first , then in the built - in definitions
: return : None""" | file_name_valid = False
rule_type_valid = False
# Look for a locally - defined rule
for rule_dir in self . rule_dirs :
file_path = os . path . join ( rule_dir , self . file_name ) if rule_dir else self . file_name
if os . path . isfile ( file_path ) :
self . file_path = file_path
file_name_valid = True
break
# Look for a built - in rule
if not file_name_valid :
for rule_type in self . rule_types :
if self . file_name . startswith ( rule_type ) :
self . file_path = os . path . join ( self . rules_data_path , self . file_name )
rule_type_valid = True
file_name_valid = True
break
if not rule_type_valid :
for rule_type in self . rule_types :
self . file_path = os . path . join ( self . rules_data_path , rule_type , self . file_name )
if os . path . isfile ( self . file_path ) :
file_name_valid = True
break
else :
if os . path . isfile ( self . file_path ) :
file_name_valid = True
if not file_name_valid :
printError ( 'Error: could not find %s' % self . file_name )
else :
try :
with open ( self . file_path , 'rt' ) as f :
self . string_definition = f . read ( )
self . load_from_string_definition ( )
except Exception as e :
printException ( e )
printError ( 'Failed to load rule defined in %s' % file_path ) |
def write_index ( fn , index ) :
"""Writes the index to file .
Args :
fn ( str ) : the name of the file that will contain the index .
index ( pandas . DataFrame ) : the index .""" | with open ( fn , "wb" ) as o_file :
o_file . write ( _CHECK_STRING )
o_file . write ( zlib . compress ( bytes ( index . to_csv ( None , index = False , encoding = "utf-8" ) , encoding = "utf-8" , ) ) ) |
def set_mode ( filename , flags ) :
"""Set mode flags for given filename if not already set .""" | try :
mode = os . lstat ( filename ) . st_mode
except OSError : # ignore
return
if not ( mode & flags ) :
try :
os . chmod ( filename , flags | mode )
except OSError as msg :
log_error ( "could not set mode flags for `%s': %s" % ( filename , msg ) ) |
def _buildFromPerPartition ( self , item , partition ) :
"""This function will get the partition info and then it ' ll write the container and preservation data
to the dictionary ' item '
: param item : a dict which contains the ARTeplate data columns
: param partition : a dict with some partition info
: returns : the item dict with the partition ' s data""" | uc = getToolByName ( self , 'uid_catalog' )
container = uc ( UID = partition . get ( 'container_uid' , '' ) )
preservation = uc ( UID = partition . get ( 'preservation_uid' , '' ) )
if container :
container = container [ 0 ] . getObject ( )
item [ 'ContainerTitle' ] = container . title
item [ 'replace' ] [ 'ContainerTitle' ] = "<a href='%s'>%s</a>" % ( container . absolute_url ( ) , item [ 'ContainerTitle' ] )
item [ 'ContainerVolume' ] = container . getCapacity ( )
else :
item [ 'ContainerTitle' ] = ''
item [ 'ContainerVolume' ] = ''
if preservation :
preservation = preservation [ 0 ] . getObject ( )
item [ 'Preservation' ] = preservation . title
item [ 'replace' ] [ 'Preservation' ] = "<a href='%s'>%s</a>" % ( preservation . absolute_url ( ) , item [ 'Preservation' ] )
else :
item [ 'Preservation' ] = ''
item [ 'PreparationMethod' ] = ''
return item |
def _get_file_size ( self ) :
"""Fetches file size by reading the Content - Length header
for the resource .
: return : File size .""" | file_size = retry ( self . _retry_count ) ( _get_content_length ) ( self . _session , self . url , self . _timeout )
file_size = int ( file_size )
if file_size == 0 :
with io . open ( self . _file_path , 'a' , encoding = 'utf-8' ) :
pass
return file_size |
def _PrintExtractionStatusUpdateWindow ( self , processing_status ) :
"""Prints an extraction status update in window mode .
Args :
processing _ status ( ProcessingStatus ) : processing status .""" | if self . _stdout_output_writer :
self . _ClearScreen ( )
output_text = 'plaso - {0:s} version {1:s}\n\n' . format ( self . _tool_name , plaso . __version__ )
self . _output_writer . Write ( output_text )
self . PrintExtractionStatusHeader ( processing_status )
table_view = views . CLITabularTableView ( column_names = [ 'Identifier' , 'PID' , 'Status' , 'Memory' , 'Sources' , 'Events' , 'File' ] , column_sizes = [ 15 , 7 , 15 , 15 , 15 , 15 , 0 ] )
self . _AddExtractionProcessStatusTableRow ( processing_status . foreman_status , table_view )
for worker_status in processing_status . workers_status :
self . _AddExtractionProcessStatusTableRow ( worker_status , table_view )
table_view . Write ( self . _output_writer )
self . _output_writer . Write ( '\n' )
if processing_status . aborted :
self . _output_writer . Write ( 'Processing aborted - waiting for clean up.\n\n' )
# TODO : remove update flicker . For win32console we could set the cursor
# top left , write the table , clean the remainder of the screen buffer
# and set the cursor at the end of the table .
if self . _stdout_output_writer : # We need to explicitly flush stdout to prevent partial status updates .
sys . stdout . flush ( ) |
def openOrder ( self , orderId , contract , order , orderState ) :
"""This wrapper is called to :
* feed in open orders at startup ;
* feed in open orders or order updates from other clients and TWS
if clientId = master id ;
* feed in manual orders and order updates from TWS if clientId = 0;
* handle openOrders and allOpenOrders responses .""" | if order . whatIf : # response to whatIfOrder
self . _endReq ( order . orderId , orderState )
else :
key = self . orderKey ( order . clientId , order . orderId , order . permId )
trade = self . trades . get ( key )
# ignore ' ? ' values in the order
d = { k : v for k , v in order . dict ( ) . items ( ) if v != '?' }
if trade :
trade . order . update ( ** d )
else :
contract = Contract . create ( ** contract . dict ( ) )
order = Order ( ** d )
orderStatus = OrderStatus ( status = orderState . status )
trade = Trade ( contract , order , orderStatus , [ ] , [ ] )
self . trades [ key ] = trade
self . _logger . info ( f'openOrder: {trade}' )
results = self . _results . get ( 'openOrders' )
if results is None :
self . ib . openOrderEvent . emit ( trade )
else : # response to reqOpenOrders or reqAllOpenOrders
results . append ( order ) |
def calculate_subscription_lifecycle ( subscription_id ) :
"""Calculates the expected lifecycle position the subscription in
subscription _ ids , and creates a BehindSubscription entry for them .
Args :
subscription _ id ( str ) : ID of subscription to calculate lifecycle for""" | subscription = Subscription . objects . select_related ( "messageset" , "schedule" ) . get ( id = subscription_id )
behind = subscription . messages_behind ( )
if behind == 0 :
return
current_messageset = subscription . messageset
current_sequence_number = subscription . next_sequence_number
end_subscription = Subscription . fast_forward_lifecycle ( subscription , save = False ) [ - 1 ]
BehindSubscription . objects . create ( subscription = subscription , messages_behind = behind , current_messageset = current_messageset , current_sequence_number = current_sequence_number , expected_messageset = end_subscription . messageset , expected_sequence_number = end_subscription . next_sequence_number , ) |
def put_observation_field_values ( observation_id : int , observation_field_id : int , value : Any , access_token : str ) -> Dict [ str , Any ] : # TODO : Also implement a put _ or _ update _ observation _ field _ values ( ) that deletes then recreates the field _ value ?
# TODO : Write example use in docstring .
# TODO : Return some meaningful exception if it fails because the field is already set .
# TODO : Also show in example to obtain the observation _ field _ id ?
# TODO : What happens when parameters are invalid
# TODO : It appears pushing the same value / pair twice in a row ( but deleting it meanwhile via the UI ) . . .
# TODO : . . . triggers an error 404 the second time ( report to iNaturalist ? )
"""Sets an observation field ( value ) on an observation .
: param observation _ id :
: param observation _ field _ id :
: param value
: param access _ token : access _ token : the access token , as returned by : func : ` get _ access _ token ( ) `
: returns : iNaturalist ' s response as a dict , for example :
{ ' id ' : 31,
' observation _ id ' : 18166477,
' observation _ field _ id ' : 31,
' value ' : ' fouraging ' ,
' created _ at ' : ' 2012-09-29T11:05:44.935 + 02:00 ' ,
' updated _ at ' : ' 2018-11-13T10:49:47.985 + 01:00 ' ,
' user _ id ' : 1,
' updater _ id ' : 1263313,
' uuid ' : ' b404b654-1bf0-4299-9288-52eeda7ac0db ' ,
' created _ at _ utc ' : ' 2012-09-29T09:05:44.935Z ' ,
' updated _ at _ utc ' : ' 2018-11-13T09:49:47.985Z ' }
Will fail if this observation _ field is already set for this observation .""" | payload = { 'observation_field_value' : { 'observation_id' : observation_id , 'observation_field_id' : observation_field_id , 'value' : value } }
response = requests . put ( "{base_url}/observation_field_values/{id}" . format ( base_url = INAT_BASE_URL , id = observation_field_id ) , headers = _build_auth_header ( access_token ) , json = payload )
response . raise_for_status ( )
return response . json ( ) |
def cleanup_subprocesses ( ) :
"""On python exit : find possibly running subprocesses and kill them .""" | # pylint : disable = redefined - outer - name , reimported
# atexit functions tends to loose global imports sometimes so reimport
# everything what is needed again here :
import os
import errno
from mirakuru . base_env import processes_with_env
from mirakuru . compat import SIGKILL
pids = processes_with_env ( ENV_UUID , str ( os . getpid ( ) ) )
for pid in pids :
try :
os . kill ( pid , SIGKILL )
except OSError as err :
if err . errno != errno . ESRCH :
print ( "Can not kill the" , pid , "leaked process" , err ) |
def fit ( self , P ) :
"""Fit the diagonal matrices in Sinkhorn Knopp ' s algorithm
Parameters
P : 2d array - like
Must be a square non - negative 2d array - like object , that
is convertible to a numpy array . The matrix must not be
equal to 0 and it must have total support for the algorithm
to converge .
Returns
A double stochastic matrix .""" | P = np . asarray ( P )
assert np . all ( P >= 0 )
assert P . ndim == 2
assert P . shape [ 0 ] == P . shape [ 1 ]
N = P . shape [ 0 ]
max_thresh = 1 + self . _epsilon
min_thresh = 1 - self . _epsilon
# Initialize r and c , the diagonals of D1 and D2
# and warn if the matrix does not have support .
r = np . ones ( ( N , 1 ) )
pdotr = P . T . dot ( r )
total_support_warning_str = ( "Matrix P must have total support. " "See documentation" )
if not np . all ( pdotr != 0 ) :
warnings . warn ( total_support_warning_str , UserWarning )
c = 1 / pdotr
pdotc = P . dot ( c )
if not np . all ( pdotc != 0 ) :
warnings . warn ( total_support_warning_str , UserWarning )
r = 1 / pdotc
del pdotr , pdotc
P_eps = np . copy ( P )
while np . any ( np . sum ( P_eps , axis = 1 ) < min_thresh ) or np . any ( np . sum ( P_eps , axis = 1 ) > max_thresh ) or np . any ( np . sum ( P_eps , axis = 0 ) < min_thresh ) or np . any ( np . sum ( P_eps , axis = 0 ) > max_thresh ) :
c = 1 / P . T . dot ( r )
r = 1 / P . dot ( c )
self . _D1 = np . diag ( np . squeeze ( r ) )
self . _D2 = np . diag ( np . squeeze ( c ) )
P_eps = self . _D1 . dot ( P ) . dot ( self . _D2 )
self . _iterations += 1
if self . _iterations >= self . _max_iter :
self . _stopping_condition = "max_iter"
break
if not self . _stopping_condition :
self . _stopping_condition = "epsilon"
self . _D1 = np . diag ( np . squeeze ( r ) )
self . _D2 = np . diag ( np . squeeze ( c ) )
P_eps = self . _D1 . dot ( P ) . dot ( self . _D2 )
return P_eps |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'field_name' ) and self . field_name is not None :
_dict [ 'field' ] = self . field_name
if hasattr ( self , 'field_type' ) and self . field_type is not None :
_dict [ 'type' ] = self . field_type
return _dict |
def create_event_object ( self , event_type , code , value , timeval = None ) :
"""Create an evdev style object .""" | if not timeval :
timeval = self . __get_timeval ( )
try :
event_code = self . manager . codes [ 'type_codes' ] [ event_type ]
except KeyError :
raise UnknownEventType ( "We don't know what kind of event a %s is." % event_type )
event = struct . pack ( EVENT_FORMAT , timeval [ 0 ] , timeval [ 1 ] , event_code , code , value )
return event |
def export ( self ) :
"""Returns a dictionary with all song information .
Use the : meth : ` from _ export ` method to recreate the
: class : ` Song ` object .""" | return { 'artists' : self . _artists , 'radio' : self . _radio , 'recent_artists' : self . _recent_artists , 'songs_already_seen' : self . _songs_already_seen } |
def is_group ( value ) :
"""Check whether groupname or gid as argument exists .
if this function recieved groupname , convert gid and exec validation .""" | if type ( value ) == str :
try :
entry = grp . getgrnam ( value )
value = entry . gr_gid
except KeyError :
err_message = ( '{0}: No such group.' . format ( value ) )
raise validate . VdtValueError ( err_message )
return value
elif type ( value ) == int :
try :
grp . getgrgid ( value )
except KeyError :
err_message = ( '{0}: No such group.' . format ( value ) )
raise validate . VdtValueError ( err_message )
return value
else :
err_message = ( 'Please, use str or int to "user" parameter.' )
raise validate . VdtTypeError ( err_message ) |
def mbar_W_nk ( u_kn , N_k , f_k ) :
"""Calculate the weight matrix .
Parameters
u _ kn : np . ndarray , shape = ( n _ states , n _ samples ) , dtype = ' float '
The reduced potential energies , i . e . - log unnormalized probabilities
N _ k : np . ndarray , shape = ( n _ states ) , dtype = ' int '
The number of samples in each state
f _ k : np . ndarray , shape = ( n _ states ) , dtype = ' float '
The reduced free energies of each state
Returns
W _ nk : np . ndarray , dtype = ' float ' , shape = ( n _ samples , n _ states )
The normalized weights .
Notes
Equation ( 9 ) in JCP MBAR paper .""" | return np . exp ( mbar_log_W_nk ( u_kn , N_k , f_k ) ) |
def Append ( self , ** kw ) :
"""Append values to existing construction variables
in an Environment .""" | kw = copy_non_reserved_keywords ( kw )
for key , val in kw . items ( ) : # It would be easier on the eyes to write this using
# " continue " statements whenever we finish processing an item ,
# but Python 1.5.2 apparently doesn ' t let you use " continue "
# within try : - except : blocks , so we have to nest our code .
try :
if key == 'CPPDEFINES' and SCons . Util . is_String ( self . _dict [ key ] ) :
self . _dict [ key ] = [ self . _dict [ key ] ]
orig = self . _dict [ key ]
except KeyError : # No existing variable in the environment , so just set
# it to the new value .
if key == 'CPPDEFINES' and SCons . Util . is_String ( val ) :
self . _dict [ key ] = [ val ]
else :
self . _dict [ key ] = val
else :
try : # Check if the original looks like a dictionary .
# If it is , we can ' t just try adding the value because
# dictionaries don ' t have _ _ add _ _ ( ) methods , and
# things like UserList will incorrectly coerce the
# original dict to a list ( which we don ' t want ) .
update_dict = orig . update
except AttributeError :
try : # Most straightforward : just try to add them
# together . This will work in most cases , when the
# original and new values are of compatible types .
self . _dict [ key ] = orig + val
except ( KeyError , TypeError ) :
try : # Check if the original is a list .
add_to_orig = orig . append
except AttributeError : # The original isn ' t a list , but the new
# value is ( by process of elimination ) ,
# so insert the original in the new value
# ( if there ' s one to insert ) and replace
# the variable with it .
if orig :
val . insert ( 0 , orig )
self . _dict [ key ] = val
else : # The original is a list , so append the new
# value to it ( if there ' s a value to append ) .
if val :
add_to_orig ( val )
else : # The original looks like a dictionary , so update it
# based on what we think the value looks like .
if SCons . Util . is_List ( val ) :
if key == 'CPPDEFINES' :
tmp = [ ]
for ( k , v ) in orig . items ( ) :
if v is not None :
tmp . append ( ( k , v ) )
else :
tmp . append ( ( k , ) )
orig = tmp
orig += val
self . _dict [ key ] = orig
else :
for v in val :
orig [ v ] = None
else :
try :
update_dict ( val )
except ( AttributeError , TypeError , ValueError ) :
if SCons . Util . is_Dict ( val ) :
for k , v in val . items ( ) :
orig [ k ] = v
else :
orig [ val ] = None
self . scanner_map_delete ( kw ) |
def ignore_reports ( self ) :
"""Ignore future reports on this object .
This prevents future reports from causing notifications or appearing
in the various moderation listing . The report count will still
increment .""" | url = self . reddit_session . config [ 'ignore_reports' ]
data = { 'id' : self . fullname }
return self . reddit_session . request_json ( url , data = data ) |
def associate_dhcp_options_to_vpc ( dhcp_options_id , vpc_id = None , vpc_name = None , region = None , key = None , keyid = None , profile = None ) :
'''Given valid DHCP options id and a valid VPC id , associate the DHCP options record with the VPC .
Returns True if the DHCP options record were associated and returns False if the DHCP options record was not associated .
CLI Example :
. . code - block : : bash
salt myminion boto _ vpc . associate _ dhcp _ options _ to _ vpc ' dhcp - a0bl34pp ' ' vpc - 6b1fe402' ''' | try :
vpc_id = check_vpc ( vpc_id , vpc_name , region , key , keyid , profile )
if not vpc_id :
return { 'associated' : False , 'error' : { 'message' : 'VPC {0} does not exist.' . format ( vpc_name or vpc_id ) } }
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if conn . associate_dhcp_options ( dhcp_options_id , vpc_id ) :
log . info ( 'DHCP options with id %s were associated with VPC %s' , dhcp_options_id , vpc_id )
return { 'associated' : True }
else :
log . warning ( 'DHCP options with id %s were not associated with VPC %s' , dhcp_options_id , vpc_id )
return { 'associated' : False , 'error' : { 'message' : 'DHCP options could not be associated.' } }
except BotoServerError as e :
return { 'associated' : False , 'error' : __utils__ [ 'boto.get_error' ] ( e ) } |
def organize_objects ( self ) :
"""Organize objects and namespaces""" | def _render_children ( obj ) :
for child in obj . children_strings :
child_object = self . objects . get ( child )
if child_object :
obj . item_map [ child_object . plural ] . append ( child_object )
obj . children . append ( child_object )
for key in obj . item_map :
obj . item_map [ key ] . sort ( )
def _recurse_ns ( obj ) :
if not obj :
return
namespace = obj . top_namespace
if namespace is not None :
ns_obj = self . top_namespaces . get ( namespace )
if ns_obj is None or not isinstance ( ns_obj , DotNetNamespace ) :
for ns_obj in self . create_class ( { "uid" : namespace , "type" : "namespace" } ) :
self . top_namespaces [ ns_obj . id ] = ns_obj
if obj not in ns_obj . children and namespace != obj . id :
ns_obj . children . append ( obj )
for obj in self . objects . values ( ) :
_render_children ( obj )
_recurse_ns ( obj )
# Clean out dead namespaces
for key , ns in self . top_namespaces . copy ( ) . items ( ) :
if not ns . children :
del self . top_namespaces [ key ]
for key , ns in self . namespaces . items ( ) :
if not ns . children :
del self . namespaces [ key ] |
def add_arguments ( self , parser ) :
"""Add arguments to the command parser .
Uses argparse syntax . See documentation at
https : / / docs . python . org / 3 / library / argparse . html .""" | parser . add_argument ( '--start' , '-s' , default = 0 , type = int , help = u"The Submission.id at which to begin updating rows. 0 by default." )
parser . add_argument ( '--chunk' , '-c' , default = 1000 , type = int , help = u"Batch size, how many rows to update in a given transaction. Default 1000." , )
parser . add_argument ( '--wait' , '-w' , default = 2 , type = int , help = u"Wait time between transactions, in seconds. Default 2." , ) |
def image_predict ( self , X ) :
"""Predicts class label for the entire image .
Parameters :
X : array , shape = [ n _ samples , n _ pixels _ y , n _ pixels _ x , n _ bands ]
Array of training images
y : array , shape = [ n _ samples ] or [ n _ samples , n _ pixels _ y , n _ pixels _ x ]
Target labels or masks .""" | self . _check_image ( X )
patches , patches_shape = self . _to_patches ( X )
predictions = self . classifier . predict ( self . _transform_input ( patches ) )
image_predictions = predictions . reshape ( patches_shape [ 0 : 3 ] )
image_results = np . zeros ( ( self . _samples , ) + self . _image_size )
nx , ny = self . receptive_field
row_steps = self . _image_size [ 0 ] // nx
col_steps = self . _image_size [ 1 ] // ny
# how can this be optimised ?
for i , j , k in itertools . product ( range ( row_steps ) , range ( col_steps ) , range ( self . _samples ) ) :
image_results [ k , nx * i : nx * ( i + 1 ) , ny * j : ny * ( j + 1 ) ] = image_predictions [ k , i , j ]
return image_results |
def roots ( g ) :
"Get nodes from graph G with indegree 0" | return set ( n for n , d in iteritems ( g . in_degree ( ) ) if d == 0 ) |
def _connect ( self ) :
"""Connect to our domain""" | if not self . _db :
import boto
sdb = boto . connect_sdb ( )
if not self . domain_name :
self . domain_name = boto . config . get ( "DB" , "sequence_db" , boto . config . get ( "DB" , "db_name" , "default" ) )
try :
self . _db = sdb . get_domain ( self . domain_name )
except SDBResponseError , e :
if e . status == 400 :
self . _db = sdb . create_domain ( self . domain_name )
else :
raise
return self . _db |
def add ( self , element ) :
"""Add an element to this set .""" | key = self . _transform ( element )
if key not in self . _elements :
self . _elements [ key ] = element |
def promptyn ( msg , default = None ) :
"""Display a blocking prompt until the user confirms""" | while True :
yes = "Y" if default else "y"
if default or default is None :
no = "n"
else :
no = "N"
confirm = prompt ( "%s [%s/%s]" % ( msg , yes , no ) , "" ) . lower ( )
if confirm in ( "y" , "yes" ) :
return True
elif confirm in ( "n" , "no" ) :
return False
elif not confirm and default is not None :
return default |
def _get_balance ( self ) :
"""Get to know how much you totally have and how much you get today .""" | response = self . session . get ( self . balance_url , verify = False )
soup = BeautifulSoup ( response . text , 'html.parser' )
first_line = soup . select ( "table.data tr:nth-of-type(2)" ) [ 0 ] . text . strip ( ) . split ( '\n' )
total , today = first_line [ - 2 : ]
logging . info ( '%-26sTotal:%-8s' , today , total )
return '\n' . join ( [ u"Today: {0}" . format ( today ) , "Total: {0}" . format ( total ) ] ) |
def _render_log ( ) :
"""Totally tap into Towncrier internals to get an in - memory result .""" | config = load_config ( ROOT )
definitions = config [ 'types' ]
fragments , fragment_filenames = find_fragments ( pathlib . Path ( config [ 'directory' ] ) . absolute ( ) , config [ 'sections' ] , None , definitions , )
rendered = render_fragments ( pathlib . Path ( config [ 'template' ] ) . read_text ( encoding = 'utf-8' ) , config [ 'issue_format' ] , split_fragments ( fragments , definitions ) , definitions , config [ 'underlines' ] [ 1 : ] , )
return rendered |
def set_state ( self , state ) :
"""Switches state of the TDS session .
It also does state transitions checks .
: param state : New state , one of TDS _ PENDING / TDS _ READING / TDS _ IDLE / TDS _ DEAD / TDS _ QUERING""" | prior_state = self . state
if state == prior_state :
return state
if state == tds_base . TDS_PENDING :
if prior_state in ( tds_base . TDS_READING , tds_base . TDS_QUERYING ) :
self . state = tds_base . TDS_PENDING
else :
raise tds_base . InterfaceError ( 'logic error: cannot chage query state from {0} to {1}' . format ( tds_base . state_names [ prior_state ] , tds_base . state_names [ state ] ) )
elif state == tds_base . TDS_READING : # transition to READING are valid only from PENDING
if self . state != tds_base . TDS_PENDING :
raise tds_base . InterfaceError ( 'logic error: cannot change query state from {0} to {1}' . format ( tds_base . state_names [ prior_state ] , tds_base . state_names [ state ] ) )
else :
self . state = state
elif state == tds_base . TDS_IDLE :
if prior_state == tds_base . TDS_DEAD :
raise tds_base . InterfaceError ( 'logic error: cannot change query state from {0} to {1}' . format ( tds_base . state_names [ prior_state ] , tds_base . state_names [ state ] ) )
self . state = state
elif state == tds_base . TDS_DEAD :
self . state = state
elif state == tds_base . TDS_QUERYING :
if self . state == tds_base . TDS_DEAD :
raise tds_base . InterfaceError ( 'logic error: cannot change query state from {0} to {1}' . format ( tds_base . state_names [ prior_state ] , tds_base . state_names [ state ] ) )
elif self . state != tds_base . TDS_IDLE :
raise tds_base . InterfaceError ( 'logic error: cannot change query state from {0} to {1}' . format ( tds_base . state_names [ prior_state ] , tds_base . state_names [ state ] ) )
else :
self . rows_affected = tds_base . TDS_NO_COUNT
self . internal_sp_called = 0
self . state = state
else :
assert False
return self . state |
def show_message ( self , text_string , scroll_speed = .1 , text_colour = [ 255 , 255 , 255 ] , back_colour = [ 0 , 0 , 0 ] ) :
"""Scrolls a string of text across the LED matrix using the specified
speed and colours""" | # We must rotate the pixel map left through 90 degrees when drawing
# text , see _ load _ text _ assets
previous_rotation = self . _rotation
self . _rotation -= 90
if self . _rotation < 0 :
self . _rotation = 270
dummy_colour = [ None , None , None ]
string_padding = [ dummy_colour ] * 64
letter_padding = [ dummy_colour ] * 8
# Build pixels from dictionary
scroll_pixels = [ ]
scroll_pixels . extend ( string_padding )
for s in text_string :
scroll_pixels . extend ( self . _trim_whitespace ( self . _get_char_pixels ( s ) ) )
scroll_pixels . extend ( letter_padding )
scroll_pixels . extend ( string_padding )
# Recolour pixels as necessary
coloured_pixels = [ text_colour if pixel == [ 255 , 255 , 255 ] else back_colour for pixel in scroll_pixels ]
# Shift right by 8 pixels per frame to scroll
scroll_length = len ( coloured_pixels ) // 8
for i in range ( scroll_length - 8 ) :
start = i * 8
end = start + 64
self . set_pixels ( coloured_pixels [ start : end ] )
time . sleep ( scroll_speed )
self . _rotation = previous_rotation |
def find_enclosing_bracket_right ( self , left_ch , right_ch , end_pos = None ) :
"""Find the right bracket enclosing current position . Return the relative
position to the cursor position .
When ` end _ pos ` is given , don ' t look past the position .""" | if self . current_char == right_ch :
return 0
if end_pos is None :
end_pos = len ( self . text )
else :
end_pos = min ( len ( self . text ) , end_pos )
stack = 1
# Look forward .
for i in range ( self . cursor_position + 1 , end_pos ) :
c = self . text [ i ]
if c == left_ch :
stack += 1
elif c == right_ch :
stack -= 1
if stack == 0 :
return i - self . cursor_position |
def get_recent_mtime ( t ) :
'''获取更精简的时间 .
如果是当天的 , 就返回时间 ; 如果是当年的 , 就近回月份和日期 ; 否则返回完整的时间''' | if isinstance ( t , int ) : # ignore micro seconds
if len ( str ( t ) ) == 13 :
t = t // 1000
t = datetime . datetime . fromtimestamp ( t )
now = datetime . datetime . now ( )
delta = now - t
if delta . days == 0 :
return datetime . datetime . strftime ( t , '%H:%M:%S' )
elif now . year == t . year :
return datetime . datetime . strftime ( t , '%b %d' )
else :
return datetime . datetime . strftime ( t , '%b %d %Y' ) |
def _metaconfigure ( self , argv = None ) :
"""Initialize metaconfig for provisioning self .""" | metaconfig = self . _get_metaconfig_class ( )
if not metaconfig :
return
if self . __class__ is metaconfig : # don ' t get too meta
return
override = { 'conflict_handler' : 'resolve' , 'add_help' : False , 'prog' : self . _parser_kwargs . get ( 'prog' ) , }
self . _metaconf = metaconfig ( ** override )
metaparser = self . _metaconf . build_parser ( options = self . _metaconf . _options , permissive = False , ** override )
self . _parser_kwargs . setdefault ( 'parents' , [ ] )
self . _parser_kwargs [ 'parents' ] . append ( metaparser )
self . _metaconf . _values = self . _metaconf . load_options ( argv = argv )
self . _metaconf . provision ( self ) |
def _init ( self , * args , ** kwargs ) :
"""_ deps is used for common dependencies .
When a expr depend on other exprs , and the expr is not calculated from the others ,
the _ deps are specified to identify the dependencies .""" | self . _init_attr ( '_deps' , None )
self . _init_attr ( '_ban_optimize' , False )
self . _init_attr ( '_engine' , None )
self . _init_attr ( '_Expr__execution' , None )
self . _init_attr ( '_need_cache' , False )
self . _init_attr ( '_mem_cache' , False )
if '_id' not in kwargs :
kwargs [ '_id' ] = new_id ( )
super ( Expr , self ) . _init ( * args , ** kwargs ) |
def columns ( self ) :
"""Returns the list of selected column names .""" | fields = [ f . label for f in self . form_fields if self . cleaned_data [ "field_%s_export" % f . id ] ]
if self . cleaned_data [ "field_0_export" ] :
fields . append ( self . entry_time_name )
return fields |
def media_new ( self , mrl , * options ) :
"""Create a new Media instance .
If mrl contains a colon ( : ) preceded by more than 1 letter , it
will be treated as a URL . Else , it will be considered as a
local path . If you need more control , directly use
media _ new _ location / media _ new _ path methods .
Options can be specified as supplementary string parameters ,
but note that many options cannot be set at the media level ,
and rather at the Instance level . For instance , the marquee
filter must be specified when creating the vlc . Instance or
vlc . MediaPlayer .
Alternatively , options can be added to the media using the
Media . add _ options method ( with the same limitation ) .
@ param options : optional media option = value strings""" | if ':' in mrl and mrl . index ( ':' ) > 1 : # Assume it is a URL
m = libvlc_media_new_location ( self , str_to_bytes ( mrl ) )
else : # Else it should be a local path .
m = libvlc_media_new_path ( self , str_to_bytes ( os . path . normpath ( mrl ) ) )
for o in options :
libvlc_media_add_option ( m , str_to_bytes ( o ) )
m . _instance = self
return m |
def bna_config_cmd_output_status_string ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
bna_config_cmd = ET . Element ( "bna_config_cmd" )
config = bna_config_cmd
output = ET . SubElement ( bna_config_cmd , "output" )
status_string = ET . SubElement ( output , "status-string" )
status_string . text = kwargs . pop ( 'status_string' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def contains_index ( self ) :
"""Returns the * line number * that has the CONTAINS keyword separating the
member and type definitions from the subroutines and functions .""" | if self . _contains_index is None :
max_t = 0
for tkey in self . types :
if self . types [ tkey ] . end > max_t and not self . types [ tkey ] . embedded :
max_t = self . types [ tkey ] . end
# Now we have a good first guess . Continue to iterate the next few lines
# of the the refstring until we find a solid " CONTAINS " keyword . If there
# are no types in the module , then max _ t will be zero and we don ' t have
# the danger of running into a contains keyword as part of a type . In that
# case we can just keep going until we find it .
i = 0
start = self . linenum ( max_t ) [ 0 ]
max_i = 10 if max_t > 0 else len ( self . _lines )
while ( self . _contains_index is None and i < max_i and start + i < len ( self . _lines ) ) :
iline = self . _lines [ start + i ] . lower ( )
if "contains" in iline :
if '!' not in iline or ( iline . index ( '!' ) > iline . index ( "contains" ) ) :
self . _contains_index = start + i
i += 1
if self . _contains_index is None : # There must not be a CONTAINS keyword in the module
self . _contains_index = len ( self . _lines ) - 1
return self . _contains_index |
def build_request_include ( include , params ) :
"""Augment request parameters with includes .
When one or all resources are requested an additional set of
resources can be requested as part of the request . This function
extends the given parameters for a request with a list of resource
types passed in as a list of : class : ` Resource ` subclasses .
Args :
include ( [ Resource class ] ) : A list of resource classes to include
params ( dict ) : The ( optional ) dictionary of request parameters to extend
Returns :
An updated or new dictionary of parameters extended with an
include query parameter .""" | params = params or OrderedDict ( )
if include is not None :
params [ 'include' ] = ',' . join ( [ cls . _resource_type ( ) for cls in include ] )
return params |
def start_discovery ( self , service_uuids = [ ] ) :
"""Starts a discovery for BLE devices with given service UUIDs .
: param service _ uuids : Filters the search to only return devices with given UUIDs .""" | discovery_filter = { 'Transport' : 'le' }
if service_uuids : # D - Bus doesn ' t like empty lists , it needs to guess the type
discovery_filter [ 'UUIDs' ] = service_uuids
try :
self . _adapter . SetDiscoveryFilter ( discovery_filter )
self . _adapter . StartDiscovery ( )
except dbus . exceptions . DBusException as e :
if e . get_dbus_name ( ) == 'org.bluez.Error.NotReady' :
raise errors . NotReady ( "Bluetooth adapter not ready. " "Set `is_adapter_powered` to `True` or run 'echo \"power on\" | sudo bluetoothctl'." )
if e . get_dbus_name ( ) == 'org.bluez.Error.InProgress' : # Discovery was already started - ignore exception
pass
else :
raise _error_from_dbus_error ( e ) |
def _clone ( self ) :
'''Must clone additional fields to those cloned by elasticsearch - dsl - py .''' | instance = super ( Bungiesearch , self ) . _clone ( )
instance . _raw_results_only = self . _raw_results_only
return instance |
def queryset ( self , request ) :
"""Returns a Queryset of all model instances that can be edited by the
admin site . This is used by changelist _ view .""" | query_set = self . model . _default_manager . all_with_deleted ( )
ordering = self . ordering or ( )
if ordering :
query_set = query_set . order_by ( * ordering )
return query_set |
def _sanitize_dates ( start , end ) :
"""Return ( datetime _ start , datetime _ end ) tuple
if start is None - default is 2015/01/01
if end is None - default is today""" | if isinstance ( start , int ) : # regard int as year
start = datetime ( start , 1 , 1 )
start = to_datetime ( start )
if isinstance ( end , int ) :
end = datetime ( end , 1 , 1 )
end = to_datetime ( end )
if start is None :
start = datetime ( 2015 , 1 , 1 )
if end is None :
end = datetime . today ( )
if start > end :
raise ValueError ( 'start must be an earlier date than end' )
return start , end |
def get_rollback_status_reason ( self , stack_name ) :
"""Process events and returns latest roll back reason""" | event = next ( ( item for item in self . get_events ( stack_name , False ) if item [ "ResourceStatus" ] == "UPDATE_ROLLBACK_IN_PROGRESS" ) , None )
if event :
reason = event [ "ResourceStatusReason" ]
return reason
else :
event = next ( ( item for item in self . get_events ( stack_name ) if item [ "ResourceStatus" ] == "ROLLBACK_IN_PROGRESS" ) , None )
reason = event [ "ResourceStatusReason" ]
return reason |
def user_parse ( data ) :
"""Parse information from provider .""" | id_ = data . get ( 'id' )
yield 'id' , id_
yield 'email' , data . get ( 'email' )
yield 'first_name' , data . get ( 'first_name' )
yield 'last_name' , data . get ( 'last_name' )
yield 'username' , data . get ( 'name' )
yield 'picture' , 'http://graph.facebook.com/{0}/picture?' 'type=large' . format ( id_ )
yield 'link' , data . get ( 'link' )
yield 'locale' , data . get ( 'locale' )
yield 'gender' , data . get ( 'gender' )
location = data . get ( 'location' , { } ) . get ( 'name' )
if location :
split_location = location . split ( ', ' )
yield 'city' , split_location [ 0 ] . strip ( )
if len ( split_location ) > 1 :
yield 'country' , split_location [ 1 ] . strip ( ) |
def version ( app , appbuilder ) :
"""Flask - AppBuilder package version""" | _appbuilder = import_application ( app , appbuilder )
click . echo ( click . style ( "F.A.B Version: {0}." . format ( _appbuilder . version ) , bg = "blue" , fg = "white" ) ) |
def reorder ( self , fields_in_new_order ) :
"""Pass in field names in the order you wish them to be swapped .""" | if not len ( fields_in_new_order ) == len ( self . fields ) :
raise Exception ( "Fields to reorder with are not the same length " "(%s) as the original fields (%s)" % ( len ( fields_in_new_order ) , len ( self . fields ) ) )
if not set ( fields_in_new_order ) == set ( self . fields ) :
raise Exception ( "Fields to reorder with should be the same " "as the original fields" )
new = OrderedDict ( )
for field in fields_in_new_order :
new [ field ] = self . __data [ field ]
self . __data = new |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.