signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def commit ( self , raise_on_error = True ) :
'''Send commands to redis .''' | cmds = list ( chain ( [ ( ( 'multi' , ) , { } ) ] , self . command_stack , [ ( ( 'exec' , ) , { } ) ] ) )
self . reset ( )
return self . store . execute_pipeline ( cmds , raise_on_error ) |
def calc_mean_and_variance_of_variances ( self , NumberOfOscillations ) :
"""Calculates the mean and variance of a set of varainces .
This set is obtained by splitting the timetrace into chunks
of points with a length of NumberOfOscillations oscillations .
Parameters
NumberOfOscillations : int
The number of oscillations each chunk of the timetrace
used to calculate the variance should contain .
Returns
Mean : float
Variance : float""" | SplittedArraySize = int ( self . SampleFreq / self . FTrap . n ) * NumberOfOscillations
VoltageArraySize = len ( self . voltage )
SnippetsVariances = _np . var ( self . voltage [ : VoltageArraySize - _np . mod ( VoltageArraySize , SplittedArraySize ) ] . reshape ( - 1 , SplittedArraySize ) , axis = 1 )
return _np . mean ( SnippetsVariances ) , _np . var ( SnippetsVariances ) |
def keys ( self ) :
"Returns a list of ConfigMap keys ." | return ( list ( self . _pb . IntMap . keys ( ) ) + list ( self . _pb . StringMap . keys ( ) ) + list ( self . _pb . FloatMap . keys ( ) ) + list ( self . _pb . BoolMap . keys ( ) ) ) |
def options ( self , url : StrOrURL , * , allow_redirects : bool = True , ** kwargs : Any ) -> '_RequestContextManager' :
"""Perform HTTP OPTIONS request .""" | return _RequestContextManager ( self . _request ( hdrs . METH_OPTIONS , url , allow_redirects = allow_redirects , ** kwargs ) ) |
def _set_level_1 ( self , v , load = False ) :
"""Setter method for level _ 1 , mapped from YANG variable / routing _ system / router / isis / router _ isis _ cmds _ holder / address _ family / ipv6 / af _ ipv6 _ unicast / af _ ipv6 _ attributes / af _ common _ attributes / redistribute / isis / level _ 1 ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ level _ 1 is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ level _ 1 ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = level_1 . level_1 , is_container = 'container' , presence = False , yang_name = "level-1" , rest_name = "level-1" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Level-1 routes' } } , namespace = 'urn:brocade.com:mgmt:brocade-isis' , defining_module = 'brocade-isis' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """level_1 must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=level_1.level_1, is_container='container', presence=False, yang_name="level-1", rest_name="level-1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Level-1 routes'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""" , } )
self . __level_1 = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def associate_azure_publisher ( self , publisher_name , azure_publisher_id ) :
"""AssociateAzurePublisher .
[ Preview API ]
: param str publisher _ name :
: param str azure _ publisher _ id :
: rtype : : class : ` < AzurePublisher > < azure . devops . v5_0 . gallery . models . AzurePublisher > `""" | route_values = { }
if publisher_name is not None :
route_values [ 'publisherName' ] = self . _serialize . url ( 'publisher_name' , publisher_name , 'str' )
query_parameters = { }
if azure_publisher_id is not None :
query_parameters [ 'azurePublisherId' ] = self . _serialize . query ( 'azure_publisher_id' , azure_publisher_id , 'str' )
response = self . _send ( http_method = 'PUT' , location_id = 'efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'AzurePublisher' , response ) |
def downsample ( time_series : DataFrame , freq : str ) -> DataFrame :
"""Downsample the given route , stop , or feed time series ,
( outputs of : func : ` . routes . compute _ route _ time _ series ` ,
: func : ` . stops . compute _ stop _ time _ series ` , or
: func : ` . miscellany . compute _ feed _ time _ series ` ,
respectively ) to the given Pandas frequency string ( e . g . ' 15Min ' ) .
Return the given time series unchanged if the given frequency is
shorter than the original frequency .""" | f = time_series . copy ( )
# Can ' t downsample to a shorter frequency
if f . empty or pd . tseries . frequencies . to_offset ( freq ) < f . index . freq :
return f
result = None
if "stop_id" in time_series . columns . names : # It ' s a stops time series
result = f . resample ( freq ) . sum ( )
else : # It ' s a route or feed time series .
inds = [ "num_trips" , "num_trip_starts" , "num_trip_ends" , "service_distance" , "service_duration" , ]
frames = [ ]
# Resample num _ trips in a custom way that depends on
# num _ trips and num _ trip _ ends
def agg_num_trips ( group ) :
return ( group [ "num_trips" ] . iloc [ - 1 ] + group [ "num_trip_ends" ] . iloc [ : - 1 ] . sum ( ) )
num_trips = f . groupby ( pd . Grouper ( freq = freq ) ) . apply ( agg_num_trips )
frames . append ( num_trips )
# Resample the rest of the indicators via summing
frames . extend ( [ f [ ind ] . resample ( freq ) . agg ( "sum" ) for ind in inds [ 1 : ] ] )
g = pd . concat ( frames , axis = 1 , keys = inds )
# Calculate speed and add it to f . Can ' t resample it .
speed = g [ "service_distance" ] / g [ "service_duration" ]
speed = pd . concat ( { "service_speed" : speed } , axis = 1 )
result = pd . concat ( [ g , speed ] , axis = 1 )
# Reset column names and sort the hierarchical columns to allow slicing ;
# see http : / / pandas . pydata . org / pandas - docs / stable / advanced . html # sorting - a - multiindex
result . columns . names = f . columns . names
result = result . sort_index ( axis = 1 , sort_remaining = True )
return result |
def query_snl ( self , criteria ) :
"""Query for submitted SNLs .
. . note : :
As of now , this MP REST feature is open only to a select group of
users . Opening up submissions to all users is being planned for
the future .
Args :
criteria ( dict ) : Query criteria .
Returns :
A dict , with a list of submitted SNLs in the " response " key .
Raises :
MPRestError""" | try :
payload = { "criteria" : json . dumps ( criteria ) }
response = self . session . post ( "{}/snl/query" . format ( self . preamble ) , data = payload )
if response . status_code in [ 200 , 400 ] :
resp = json . loads ( response . text )
if resp [ "valid_response" ] :
if resp . get ( "warning" ) :
warnings . warn ( resp [ "warning" ] )
return resp [ "response" ]
else :
raise MPRestError ( resp [ "error" ] )
raise MPRestError ( "REST error with status code {} and error {}" . format ( response . status_code , response . text ) )
except Exception as ex :
raise MPRestError ( str ( ex ) ) |
def get_cmd_output_from_stdin ( stdint_content_binary : bytes , * args , encoding : str = SYS_ENCODING ) -> str :
"""Returns text output of a command , passing binary data in via stdin .""" | p = subprocess . Popen ( args , stdin = subprocess . PIPE , stdout = subprocess . PIPE )
stdout , stderr = p . communicate ( input = stdint_content_binary )
return stdout . decode ( encoding , errors = 'ignore' ) |
def attachable ( name , path = None ) :
'''Return True if the named container can be attached to via the lxc - attach
command
path
path to the container parent
default : / var / lib / lxc ( system default )
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt ' minion ' lxc . attachable ubuntu''' | cachekey = 'lxc.attachable{0}{1}' . format ( name , path )
try :
return __context__ [ cachekey ]
except KeyError :
_ensure_exists ( name , path = path )
# Can ' t use run ( ) here because it uses attachable ( ) and would
# endlessly recurse , resulting in a traceback
log . debug ( 'Checking if LXC container %s is attachable' , name )
cmd = 'lxc-attach'
if path :
cmd += ' -P {0}' . format ( pipes . quote ( path ) )
cmd += ' --clear-env -n {0} -- /usr/bin/env' . format ( name )
result = __salt__ [ 'cmd.retcode' ] ( cmd , python_shell = False , output_loglevel = 'quiet' , ignore_retcode = True ) == 0
__context__ [ cachekey ] = result
return __context__ [ cachekey ] |
def run ( self , * args ) :
"""Export data from the registry .
By default , it writes the data to the standard output . If a
positional argument is given , it will write the data on that
file .""" | params = self . parser . parse_args ( args )
with params . outfile as outfile :
if params . identities :
code = self . export_identities ( outfile , params . source )
elif params . orgs :
code = self . export_organizations ( outfile )
else : # The running proccess never should reach this section
raise RuntimeError ( "Unexpected export option" )
return code |
def _branch ( self , live_defs , node , path = "" ) :
"""Recursive function , it branches in every possible path in the VFG .
@ live _ defs : a dict { addr : stmt } of live definitions at the start point
@ node : the starting vfg node
Returns : the address of the block where the execution stops""" | irsb = self . _irsb ( node . state )
path = path + " -> " + hex ( irsb . addr )
if isinstance ( irsb , SimProcedure ) :
self . _simproc_map [ irsb . addr ] = repr ( irsb )
l . debug ( "--> Branch: running block 0x%x" % irsb . addr )
block = self . _make_block ( irsb , live_defs )
self . _imarks . update ( block . _imarks )
if block . stop == True : # l . debug ( " # # # Stopping at block 0x % x " % ( irsb . addr ) )
l . debug ( " ### End of path %s" % path )
return irsb . addr
succ = self . _vfg . _graph . successors ( node )
defer = [ ]
for s in succ : # Consider fake returns last
if self . _vfg . _graph . edge [ node ] [ s ] [ 'jumpkind' ] == 'Ijk_FakeRet' :
defer . append ( s )
continue
# We need to make a copy of the dict !
self . _branch ( dict ( block . live_defs ) , s , path )
# We explore every other paths before taking fake rets .
# Ideally , we want to take fake rets only when functions don ' t
# return .
for s in defer :
self . _branch ( dict ( block . live_defs ) , s , path ) |
def Run ( self , request ) :
"""Runs the Find action .""" | self . request = request
filters = self . BuildChecks ( request )
files_checked = 0
for f in self . ListDirectory ( request . pathspec ) :
self . Progress ( )
# Ignore this file if any of the checks fail .
if not any ( ( check ( f ) for check in filters ) ) :
self . SendReply ( f )
files_checked += 1
if files_checked >= self . MAX_FILES_TO_CHECK :
return |
def copy ( self , deep = True , data = None ) :
"""Returns a copy of this object .
If ` deep = True ` , the data array is loaded into memory and copied onto
the new object . Dimensions , attributes and encodings are always copied .
Use ` data ` to create a new object with the same structure as
original but entirely new data .
Parameters
deep : bool , optional
Whether the data array is loaded into memory and copied onto
the new object . Default is True .
data : array _ like , optional
Data to use in the new object . Must have same shape as original .
When ` data ` is used , ` deep ` is ignored .
Returns
object : Variable
New object with dimensions , attributes , encodings , and optionally
data copied from original .
Examples
Shallow copy versus deep copy
> > > var = xr . Variable ( data = [ 1 , 2 , 3 ] , dims = ' x ' )
> > > var . copy ( )
< xarray . Variable ( x : 3 ) >
array ( [ 1 , 2 , 3 ] )
> > > var _ 0 = var . copy ( deep = False )
> > > var _ 0[0 ] = 7
> > > var _ 0
< xarray . Variable ( x : 3 ) >
array ( [ 7 , 2 , 3 ] )
> > > var
< xarray . Variable ( x : 3 ) >
array ( [ 7 , 2 , 3 ] )
Changing the data using the ` ` data ` ` argument maintains the
structure of the original object , but with the new data . Original
object is unaffected .
> > > var . copy ( data = [ 0.1 , 0.2 , 0.3 ] )
< xarray . Variable ( x : 3 ) >
array ( [ 0.1 , 0.2 , 0.3 ] )
> > > var
< xarray . Variable ( x : 3 ) >
array ( [ 7 , 2 , 3 ] )
See Also
pandas . DataFrame . copy""" | if data is None :
data = self . _data
if isinstance ( data , indexing . MemoryCachedArray ) : # don ' t share caching between copies
data = indexing . MemoryCachedArray ( data . array )
if deep :
if isinstance ( data , dask_array_type ) :
data = data . copy ( )
elif not isinstance ( data , PandasIndexAdapter ) : # pandas . Index is immutable
data = np . array ( data )
else :
data = as_compatible_data ( data )
if self . shape != data . shape :
raise ValueError ( "Data shape {} must match shape of object {}" . format ( data . shape , self . shape ) )
# note :
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return type ( self ) ( self . dims , data , self . _attrs , self . _encoding , fastpath = True ) |
def stats ( path , hash_type = None , follow_symlinks = True ) :
'''Return a dict containing the stats for a given file
CLI Example :
. . code - block : : bash
salt ' * ' file . stats / etc / passwd''' | path = os . path . expanduser ( path )
ret = { }
if not os . path . exists ( path ) :
try : # Broken symlinks will return False for os . path . exists ( ) , but still
# have a uid and gid
pstat = os . lstat ( path )
except OSError : # Not a broken symlink , just a nonexistent path
# NOTE : The file . directory state checks the content of the error
# message in this exception . Any changes made to the message for this
# exception will reflect the file . directory state as well , and will
# likely require changes there .
raise CommandExecutionError ( 'Path not found: {0}' . format ( path ) )
else :
if follow_symlinks :
pstat = os . stat ( path )
else :
pstat = os . lstat ( path )
ret [ 'inode' ] = pstat . st_ino
ret [ 'uid' ] = pstat . st_uid
ret [ 'gid' ] = pstat . st_gid
ret [ 'group' ] = gid_to_group ( pstat . st_gid )
ret [ 'user' ] = uid_to_user ( pstat . st_uid )
ret [ 'atime' ] = pstat . st_atime
ret [ 'mtime' ] = pstat . st_mtime
ret [ 'ctime' ] = pstat . st_ctime
ret [ 'size' ] = pstat . st_size
ret [ 'mode' ] = six . text_type ( oct ( stat . S_IMODE ( pstat . st_mode ) ) )
if hash_type :
ret [ 'sum' ] = get_hash ( path , hash_type )
ret [ 'type' ] = 'file'
if stat . S_ISDIR ( pstat . st_mode ) :
ret [ 'type' ] = 'dir'
if stat . S_ISCHR ( pstat . st_mode ) :
ret [ 'type' ] = 'char'
if stat . S_ISBLK ( pstat . st_mode ) :
ret [ 'type' ] = 'block'
if stat . S_ISREG ( pstat . st_mode ) :
ret [ 'type' ] = 'file'
if stat . S_ISLNK ( pstat . st_mode ) :
ret [ 'type' ] = 'link'
if stat . S_ISFIFO ( pstat . st_mode ) :
ret [ 'type' ] = 'pipe'
if stat . S_ISSOCK ( pstat . st_mode ) :
ret [ 'type' ] = 'socket'
ret [ 'target' ] = os . path . realpath ( path )
return ret |
def reset ( self ) :
"""Reset the calibration to it initial state""" | simulation = self . survey_scenario . simulation
holder = simulation . get_holder ( self . weight_name )
holder . array = numpy . array ( self . initial_weight , dtype = holder . variable . dtype ) |
def enter_linking_mode ( self , group = 0x01 ) :
"""Tell a device to enter All - Linking Mode .
Same as holding down the Set button for 10 sec .
Default group is 0x01.
Not supported by i1 devices .""" | msg = ExtendedSend ( self . _address , COMMAND_ENTER_LINKING_MODE_0X09_NONE , cmd2 = group , userdata = Userdata ( ) )
msg . set_checksum ( )
self . _send_msg ( msg ) |
def filterstr_to_filterfunc ( filter_str : str , item_type : type ) :
"""Takes an - - post - filter = . . . or - - storyitem - filter = . . . filter
specification and makes a filter _ func Callable out of it .""" | # The filter _ str is parsed , then all names occurring in its AST are replaced by loads to post . < name > . A
# function Post - > bool is returned which evaluates the filter with the post as ' post ' in its namespace .
class TransformFilterAst ( ast . NodeTransformer ) :
def visit_Name ( self , node : ast . Name ) : # pylint : disable = no - self - use
if not isinstance ( node . ctx , ast . Load ) :
raise InvalidArgumentException ( "Invalid filter: Modifying variables ({}) not allowed." . format ( node . id ) )
if node . id == "datetime" :
return node
if not hasattr ( item_type , node . id ) :
raise InvalidArgumentException ( "Invalid filter: {} not a {} attribute." . format ( node . id , item_type . __name__ ) )
new_node = ast . Attribute ( ast . copy_location ( ast . Name ( 'item' , ast . Load ( ) ) , node ) , node . id , ast . copy_location ( ast . Load ( ) , node ) )
return ast . copy_location ( new_node , node )
input_filename = '<command line filter parameter>'
compiled_filter = compile ( TransformFilterAst ( ) . visit ( ast . parse ( filter_str , filename = input_filename , mode = 'eval' ) ) , filename = input_filename , mode = 'eval' )
def filterfunc ( item ) -> bool : # pylint : disable = eval - used
return bool ( eval ( compiled_filter , { 'item' : item , 'datetime' : datetime . datetime } ) )
return filterfunc |
def build_specfile_filesection ( spec , files ) :
"""builds the % file section of the specfile""" | str = '%files\n'
if 'X_RPM_DEFATTR' not in spec :
spec [ 'X_RPM_DEFATTR' ] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec [ 'X_RPM_DEFATTR' ]
supported_tags = { 'PACKAGING_CONFIG' : '%%config %s' , 'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s' , 'PACKAGING_DOC' : '%%doc %s' , 'PACKAGING_UNIX_ATTR' : '%%attr %s' , 'PACKAGING_LANG_' : '%%lang(%s) %s' , 'PACKAGING_X_RPM_VERIFY' : '%%verify %s' , 'PACKAGING_X_RPM_DIR' : '%%dir %s' , 'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s' , 'PACKAGING_X_RPM_GHOST' : '%%ghost %s' , }
for file in files : # build the tagset
tags = { }
for k in list ( supported_tags . keys ( ) ) :
try :
v = file . GetTag ( k )
if v :
tags [ k ] = v
except AttributeError :
pass
# compile the tagset
str = str + SimpleTagCompiler ( supported_tags , mandatory = 0 ) . compile ( tags )
str = str + ' '
str = str + file . GetTag ( 'PACKAGING_INSTALL_LOCATION' )
str = str + '\n\n'
return str |
def query_configuration ( parser_args ) :
"""Queries process state""" | from synergy . system import process_helper
from synergy . conf import context
process_names = [ parser_args . process_name ] if parser_args . process_name else list ( context . process_context )
for process_name in process_names :
process_helper . poll_process ( process_name )
sys . stdout . write ( '\n' ) |
def create_all ( graph ) :
"""Create all database tables .""" | head = get_current_head ( graph )
if head is None :
Model . metadata . create_all ( graph . postgres )
stamp_head ( graph ) |
def max_pulse_sp ( self ) :
"""Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the maximum ( clockwise ) position _ sp . Default value is 2400.
Valid values are 2300 to 2700 . You must write to the position _ sp attribute for
changes to this attribute to take effect .""" | self . _max_pulse_sp , value = self . get_attr_int ( self . _max_pulse_sp , 'max_pulse_sp' )
return value |
def _remove_unexpected_query_parameters ( schema , req ) :
"""Remove unexpected properties from the req . GET .""" | additional_properties = schema . get ( 'addtionalProperties' , True )
if additional_properties :
pattern_regexes = [ ]
patterns = schema . get ( 'patternProperties' , None )
if patterns :
for regex in patterns :
pattern_regexes . append ( re . compile ( regex ) )
for param in set ( req . GET . keys ( ) ) :
if param not in schema [ 'properties' ] . keys ( ) :
if not ( list ( regex for regex in pattern_regexes if regex . match ( param ) ) ) :
del req . GET [ param ] |
def _get_section_start_index ( self , section ) :
'''Get start of a section ' s content .
: param section : string name of section
: return : integer index of section ' s beginning
: raises : NonextantSectionException''' | sec_start_re = r'%s\s*\{' % section
found = re . search ( sec_start_re , self . template_str )
if found :
return found . end ( ) - 1
raise NonextantSectionException ( 'Section %s not found in template' % section ) |
def prefix ( self , sign : Optional [ PrefixSign ] = None , symbol : bool = False ) -> str :
"""Get a random prefix for the International System of Units .
: param sign : Sing of number .
: param symbol : Return symbol of prefix .
: return : Prefix for SI .
: raises NonEnumerableError : if sign is not supported .
: Example :
mega""" | prefixes = SI_PREFIXES_SYM if symbol else SI_PREFIXES
key = self . _validate_enum ( item = sign , enum = PrefixSign )
return self . random . choice ( prefixes [ key ] ) |
def LoadElement ( href , only_etag = False ) :
"""Return an instance of a element as a ElementCache dict
used as a cache .
: rtype ElementCache""" | request = SMCRequest ( href = href )
request . exception = FetchElementFailed
result = request . read ( )
if only_etag :
return result . etag
return ElementCache ( result . json , etag = result . etag ) |
def mesh_surface_area ( mesh = None , verts = None , faces = None ) :
r"""Calculates the surface area of a meshed region
Parameters
mesh : tuple
The tuple returned from the ` ` mesh _ region ` ` function
verts : array
An N - by - ND array containing the coordinates of each mesh vertex
faces : array
An N - by - ND array indicating which elements in ` ` verts ` ` form a mesh
element .
Returns
surface _ area : float
The surface area of the mesh , calculated by
` ` skimage . measure . mesh _ surface _ area ` `
Notes
This function simply calls ` ` scikit - image . measure . mesh _ surface _ area ` ` , but
it allows for the passing of the ` ` mesh ` ` tuple returned by the
` ` mesh _ region ` ` function , entirely for convenience .""" | if mesh :
verts = mesh . verts
faces = mesh . faces
else :
if ( verts is None ) or ( faces is None ) :
raise Exception ( 'Either mesh or verts and faces must be given' )
surface_area = measure . mesh_surface_area ( verts , faces )
return surface_area |
def find_cell_content ( self , lines ) :
"""Parse cell till its end and set content , lines _ to _ next _ cell .
Return the position of next cell start""" | cell_end_marker , next_cell_start , self . explicit_eoc = self . find_cell_end ( lines )
# Metadata to dict
if self . metadata is None :
cell_start = 0
self . metadata = { }
else :
cell_start = 1
# Cell content
source = lines [ cell_start : cell_end_marker ]
self . org_content = [ line for line in source ]
# Exactly two empty lines at the end of cell ( caused by PEP8 ) ?
if self . ext == '.py' and self . explicit_eoc :
if last_two_lines_blank ( source ) :
source = source [ : - 2 ]
lines_to_end_of_cell_marker = 2
else :
lines_to_end_of_cell_marker = 0
pep8_lines = pep8_lines_between_cells ( source , lines [ cell_end_marker : ] , self . ext )
if lines_to_end_of_cell_marker != ( 0 if pep8_lines == 1 else 2 ) :
self . metadata [ 'lines_to_end_of_cell_marker' ] = lines_to_end_of_cell_marker
if not is_active ( self . ext , self . metadata ) or ( 'active' not in self . metadata and self . language and self . language != self . default_language ) :
self . content = uncomment ( source , self . comment if self . ext not in [ '.r' , '.R' ] else '#' )
else :
self . content = self . uncomment_code_and_magics ( source )
# Is this a raw cell ?
if ( 'active' in self . metadata and not is_active ( 'ipynb' , self . metadata ) ) or ( self . ext == '.md' and self . cell_type == 'code' and self . language is None ) :
if self . metadata . get ( 'active' ) == '' :
del self . metadata [ 'active' ]
self . cell_type = 'raw'
# Explicit end of cell marker ?
if ( next_cell_start + 1 < len ( lines ) and _BLANK_LINE . match ( lines [ next_cell_start ] ) and not _BLANK_LINE . match ( lines [ next_cell_start + 1 ] ) ) :
next_cell_start += 1
elif ( self . explicit_eoc and next_cell_start + 2 < len ( lines ) and _BLANK_LINE . match ( lines [ next_cell_start ] ) and _BLANK_LINE . match ( lines [ next_cell_start + 1 ] ) and not _BLANK_LINE . match ( lines [ next_cell_start + 2 ] ) ) :
next_cell_start += 2
self . lines_to_next_cell = count_lines_to_next_cell ( cell_end_marker , next_cell_start , len ( lines ) , self . explicit_eoc )
return next_cell_start |
def create_from_format_name ( self , format_name ) :
"""Create a file loader from a format name .
Supported file formats are as follows :
Format name Loader
` ` " csv " ` ` : py : class : ` ~ . CsvTableFileLoader `
` ` " excel " ` ` : py : class : ` ~ . ExcelTableFileLoader `
` ` " html " ` ` : py : class : ` ~ . HtmlTableFileLoader `
` ` " json " ` ` : py : class : ` ~ . JsonTableFileLoader `
` ` " json " ` ` : py : class : ` ~ . JsonTableFileLoader `
` ` " json _ lines " ` ` : py : class : ` ~ . JsonTableFileLoader `
` ` " jsonl " ` ` : py : class : ` ~ . JsonLinesTableFileLoader `
` ` " ltsv " ` ` : py : class : ` ~ . LtsvTableFileLoader `
` ` " markdown " ` ` : py : class : ` ~ . MarkdownTableFileLoader `
` ` " mediawiki " ` ` : py : class : ` ~ . MediaWikiTableFileLoader `
` ` " ndjson " ` ` : py : class : ` ~ . JsonLinesTableFileLoader `
` ` " sqlite " ` ` : py : class : ` ~ . SqliteFileLoader `
` ` " ssv " ` ` : py : class : ` ~ . CsvTableFileLoader `
` ` " tsv " ` ` : py : class : ` ~ . TsvTableFileLoader `
: param str format _ name : Format name string ( case insensitive ) .
: return : Loader that coincides with the ` ` format _ name ` ` :
: raises pytablereader . LoaderNotFoundError :
| LoaderNotFoundError _ desc | the format .""" | loader = self . _create_from_format_name ( format_name )
logger . debug ( "TableFileLoaderFactory.create_from_format_name: name={}, loader={}" . format ( format_name , loader . format_name ) )
return loader |
def run_evaluate ( self , block : TimeAggregate ) -> bool :
"""Evaluates the anchor condition against the specified block .
: param block : Block to run the anchor condition against .
: return : True , if the anchor condition is met , otherwise , False .""" | if self . _anchor . evaluate_anchor ( block , self . _evaluation_context ) :
try :
self . run_reset ( )
self . _evaluation_context . global_add ( 'anchor' , block )
self . _evaluate ( )
self . _anchor . add_condition_met ( )
return True
finally :
self . _evaluation_context . global_remove ( 'anchor' )
return False |
def stratify_by_scores ( scores , goal_n_strata = 'auto' , method = 'cum_sqrt_F' , n_bins = 'auto' ) :
"""Stratify by binning the items based on their scores
Parameters
scores : array - like , shape = ( n _ items , )
ordered array of scores which quantify the classifier confidence for
the items in the pool . High scores indicate a high confidence that
the true label is a " 1 " ( and vice versa for label " 0 " ) .
goal _ n _ strata : int or ' auto ' , optional , default ' auto '
desired number of strata . If set to ' auto ' , the number is selected
using the Freedman - Diaconis rule . Note that for the ' cum _ sqrt _ F ' method
this number is a goal - - the actual number of strata created may be
less than the goal .
method : { ' cum _ sqrt _ F ' or ' equal _ size ' } , optional , default ' cum _ sqrt _ F '
stratification method to use . ' equal _ size ' aims to create s
Other Parameters
n _ bins : int or ' auto ' , optional , default ' auto '
specify the number of bins to use when estimating the distribution of
the score function . This is used when ` ` goal _ n _ strata = ' auto ' ` `
and / or when ` ` method = ' cum _ sqrt _ F ' ` ` . If set to ' auto ' , the number is
selected using the Freedman - Diaconis rule .
Returns
Strata instance""" | available_methods = [ 'equal_size' , 'cum_sqrt_F' ]
if method not in available_methods :
raise ValueError ( "method argument is invalid" )
if ( method == 'cum_sqrt_F' ) or ( goal_n_strata == 'auto' ) : # computation below is needed for cum _ sqrt _ F method OR if we need to
# determine the number of strata for equal _ size method automatically
if n_bins == 'auto' : # choose n _ bins heuristically
width_score = _heuristic_bin_width ( scores )
n_bins = np . ceil ( sp . ptp ( scores ) / width_score ) . astype ( int )
print ( "Automatically setting n_bins = {}." . format ( n_bins ) )
# approx distribution of scores - - called F
counts , score_bins = np . histogram ( scores , bins = n_bins )
# generate cumulative dist of sqrt ( F )
sqrt_counts = np . sqrt ( counts )
csf = np . cumsum ( sqrt_counts )
if goal_n_strata == 'auto' : # choose heuristically
width_csf = _heuristic_bin_width ( csf )
goal_n_strata = np . ceil ( sp . ptp ( csf ) / width_csf ) . astype ( int )
print ( "Automatically setting goal_n_strata = {}." . format ( goal_n_strata ) )
elif method == 'cum_sqrt_F' :
width_csf = csf [ - 1 ] / goal_n_strata
# goal _ n _ strata is now guaranteed to have a valid integer value
if method == 'equal_size' :
sorted_ids = scores . argsort ( )
n_items = len ( sorted_ids )
quotient = n_items // goal_n_strata
remainder = n_items % goal_n_strata
allocations = np . empty ( n_items , dtype = 'int' )
st_pops = [ quotient for i in range ( goal_n_strata ) ]
for i in range ( remainder ) :
st_pops [ i ] += 1
j = 0
for k , nk in enumerate ( st_pops ) :
start = j
end = j + nk
allocations [ sorted_ids [ start : end ] ] = k
j = end
if method == 'cum_sqrt_F' :
if goal_n_strata > n_bins :
warnings . warn ( "goal_n_strata > n_bins. " "Consider increasing n_bins." )
# calculate roughly equal bins on cum sqrt ( F ) scale
csf_bins = [ x * width_csf for x in np . arange ( goal_n_strata + 1 ) ]
# map cum sqrt ( F ) bins to score bins
j = 0
new_bins = [ ]
for ( idx , value ) in enumerate ( csf ) :
if j == ( len ( csf_bins ) - 1 ) or idx == ( len ( csf ) - 1 ) :
new_bins . append ( score_bins [ - 1 ] )
break
if value >= csf_bins [ j ] :
new_bins . append ( score_bins [ idx ] )
j += 1
new_bins [ 0 ] -= 0.01
new_bins [ - 1 ] += 0.01
# bin scores based on new _ bins
allocations = np . digitize ( scores , bins = new_bins , right = True ) - 1
# remove empty strata
nonempty_ids = np . unique ( allocations )
n_strata = len ( nonempty_ids )
indices = np . arange ( n_strata )
allocations = np . digitize ( allocations , nonempty_ids , right = True )
if n_strata < goal_n_strata :
warnings . warn ( "Failed to create {} strata" . format ( goal_n_strata ) )
return Strata ( allocations ) |
def get_steamids_from_ips ( self , server_ips , timeout = 30 ) :
"""Resolve SteamIDs from IPs
: param steam _ ids : a list of ips ( e . g . ` ` [ ' 1.2.3.4:27015 ' , . . . ] ` ` )
: type steam _ ids : list
: param timeout : ( optional ) timeout for request in seconds
: type timeout : int
: return : map of steamids to ips
: rtype : dict
: raises : : class : ` . UnifiedMessageError `
Sample response :
. . code : : python
{ ' 1.2.3.4:27060 ' : SteamID ( id = 123456 , type = ' AnonGameServer ' , universe = ' Public ' , instance = 1234 ) }""" | resp , error = self . _um . send_and_wait ( "GameServers.GetServerSteamIDsByIP#1" , { "server_ips" : server_ips } , timeout = timeout , )
if error :
raise error
if resp is None :
return None
return { server . addr : SteamID ( server . steamid ) for server in resp . servers } |
def __regions_russian ( self , word ) :
"""Return the regions RV and R2 which are used by the Russian stemmer .
In any word , RV is the region after the first vowel ,
or the end of the word if it contains no vowel .
R2 is the region after the first non - vowel following
a vowel in R1 , or the end of the word if there is no such non - vowel .
R1 is the region after the first non - vowel following a vowel ,
or the end of the word if there is no such non - vowel .
: param word : The Russian word whose regions RV and R2 are determined .
: type word : str or unicode
: return : the regions RV and R2 for the respective Russian word .
: rtype : tuple
: note : This helper method is invoked by the stem method of the subclass
RussianStemmer . It is not to be invoked directly !""" | r1 = ""
r2 = ""
rv = ""
vowels = ( "A" , "U" , "E" , "a" , "e" , "i" , "o" , "u" , "y" )
word = ( word . replace ( "i^a" , "A" ) . replace ( "i^u" , "U" ) . replace ( "e`" , "E" ) )
for i in range ( 1 , len ( word ) ) :
if word [ i ] not in vowels and word [ i - 1 ] in vowels :
r1 = word [ i + 1 : ]
break
for i in range ( 1 , len ( r1 ) ) :
if r1 [ i ] not in vowels and r1 [ i - 1 ] in vowels :
r2 = r1 [ i + 1 : ]
break
for i in range ( len ( word ) ) :
if word [ i ] in vowels :
rv = word [ i + 1 : ]
break
r2 = ( r2 . replace ( "A" , "i^a" ) . replace ( "U" , "i^u" ) . replace ( "E" , "e`" ) )
rv = ( rv . replace ( "A" , "i^a" ) . replace ( "U" , "i^u" ) . replace ( "E" , "e`" ) )
return ( rv , r2 ) |
def controlParameters ( self , module , status ) :
"""Returns control parameters as XML .
: type module : str
: type status : str
: param module : The module number / ID
: param status : The state to set ( i . e . true ( on ) or false ( off ) )
: return XML string to join with payload""" | if self . use_legacy_protocol :
return '''{}<NickName>Socket 1</NickName><Description>Socket 1</Description>
<OPStatus>{}</OPStatus><Controller>1</Controller>''' . format ( self . moduleParameters ( module ) , status )
else :
return '''{}<NickName>Socket 1</NickName><Description>Socket 1</Description>
<OPStatus>{}</OPStatus>''' . format ( self . moduleParameters ( module ) , status ) |
def apply_mask ( data : bytes , mask : bytes ) -> bytes :
"""Apply masking to the data of a WebSocket message .
` ` data ` ` and ` ` mask ` ` are bytes - like objects .
Return : class : ` bytes ` .""" | if len ( mask ) != 4 :
raise ValueError ( "mask must contain 4 bytes" )
return bytes ( b ^ m for b , m in zip ( data , itertools . cycle ( mask ) ) ) |
def floatify_latlng ( input_value ) :
"""Work around a JSON dict with string , not float , lat / lngs .
Given anything ( list / dict / etc ) it will return that thing again , * but * any
dict ( at any level ) that has only 2 elements lat & lng , will be replaced
with the lat & lng turned into floats .
If the API returns the lat / lng as strings , and not numbers , then this
function will ' clean them up ' to be floats .""" | if isinstance ( input_value , collections . Mapping ) :
if len ( input_value ) == 2 and sorted ( input_value . keys ( ) ) == [ 'lat' , 'lng' ] : # This dict has only 2 keys ' lat ' & ' lon '
return { 'lat' : float_if_float ( input_value [ "lat" ] ) , 'lng' : float_if_float ( input_value [ "lng" ] ) }
else :
return dict ( ( key , floatify_latlng ( value ) ) for key , value in input_value . items ( ) )
elif isinstance ( input_value , collections . MutableSequence ) :
return [ floatify_latlng ( x ) for x in input_value ]
else :
return input_value |
def match ( self , filename ) :
"""Searches for a pattern that matches the given filename .
: return A matching pattern or None if there is no matching pattern .""" | try :
for regex , patterns in self . _regex_patterns :
match = regex . match ( filename )
debug_template = "%s against %s: %%s" % ( filename , regex . _real_regex . pattern )
if match :
if self . debug :
logger . info ( debug_template % "hit" )
return patterns [ match . lastindex - 1 ]
if self . debug :
logger . info ( debug_template % "miss" )
except Exception as e : # We can ' t show the default e . msg to the user as thats for
# the combined pattern we sent to regex . Instead we indicate to
# the user that an ignore file needs fixing .
logger . error ( 'Invalid pattern found in regex: %s.' , e . msg )
e . msg = "File ~/.bazaar/ignore or .bzrignore contains error(s)."
bad_patterns = ''
for _ , patterns in self . _regex_patterns :
for p in patterns :
if not Globster . is_pattern_valid ( p ) :
bad_patterns += ( '\n %s' % p )
e . msg += bad_patterns
raise e
return None |
def check_image_positions ( self , kwargs_ps , kwargs_lens , tolerance = 0.001 ) :
"""checks whether the point sources in kwargs _ ps satisfy the lens equation with a tolerance
( computed by ray - tracing in the source plane )
: param kwargs _ ps :
: param kwargs _ lens :
: param tolerance :
: return : bool : True , if requirement on tolerance is fulfilled , False if not .""" | x_image_list , y_image_list = self . image_position ( kwargs_ps , kwargs_lens )
for i , model in enumerate ( self . _point_source_list ) :
if model in [ 'LENSED_POSITION' , 'SOURCE_POSITION' ] :
x_pos = x_image_list [ i ]
y_pos = y_image_list [ i ]
x_source , y_source = self . _lensModel . ray_shooting ( x_pos , y_pos , kwargs_lens )
dist = np . sqrt ( ( x_source - x_source [ 0 ] ) ** 2 + ( y_source - y_source [ 0 ] ) ** 2 )
if np . max ( dist ) > tolerance :
return False
return True |
def has_changed ( self , initial , data ) :
"Detects if the data was changed . This is added in 1.6." | if initial is None and data is None :
return False
if data and not hasattr ( data , '__iter__' ) :
data = self . widget . decompress ( data )
initial = self . to_python ( initial )
data = self . to_python ( data )
if hasattr ( self , '_coerce' ) :
data = self . _coerce ( data )
if isinstance ( data , Model ) and isinstance ( initial , Model ) :
return model_vars ( data ) != model_vars ( initial )
else :
return data != initial |
def set_inputhook ( self , callback ) :
"""Set PyOS _ InputHook to callback and return the previous one .""" | # On platforms with ' readline ' support , it ' s all too likely to
# have a KeyboardInterrupt signal delivered * even before * an
# initial ` ` try : ` ` clause in the callback can be executed , so
# we need to disable CTRL + C in this situation .
ignore_CTRL_C ( )
self . _callback = callback
self . _callback_pyfunctype = self . PYFUNC ( callback )
pyos_inputhook_ptr = self . get_pyos_inputhook ( )
original = self . get_pyos_inputhook_as_func ( )
pyos_inputhook_ptr . value = ctypes . cast ( self . _callback_pyfunctype , ctypes . c_void_p ) . value
self . _installed = True
return original |
def inject ( self , * args ) :
"""Decorator to mark a class , method , or function as needing dependencies injected .
Example usage : :
from flask _ unchained import unchained , injectable
# automatically figure out which params to inject
@ unchained . inject ( )
def my _ function ( not _ injected , some _ service : SomeService = injectable ) :
# do stuff
# or declare injectables explicitly
@ unchained . inject ( ' some _ service ' )
def my _ function ( not _ injected , some _ service : SomeService ) :
# do stuff
# use it on a class to set up injection on everything
@ unchained . inject ( )
class MyClass :
some _ service : SomeService = injectable
def _ _ init _ _ ( self , another _ service : AnotherService = injectable ) :
self . another _ service = another _ service
def a _ method ( self , yet _ another _ service = injectable ) :
yet _ another _ service . do _ stuff ( )""" | used_without_parenthesis = len ( args ) and callable ( args [ 0 ] )
has_explicit_args = len ( args ) and all ( isinstance ( x , str ) for x in args )
def wrapper ( fn ) :
cls = None
if isinstance ( fn , type ) :
cls = fn
fn = cls . __init__
# check if the fn has already been wrapped with inject
if hasattr ( fn , '__signature__' ) :
if cls and not hasattr ( cls , '__signature__' ) : # this happens when both the class and its _ _ init _ _ method
# where decorated with @ inject . which would be silly , but ,
# it should still work regardless
cls . __signature__ = fn . __signature__
if not cls :
return fn
sig = inspect . signature ( fn )
# create a new function wrapping the original to inject params
@ functools . wraps ( fn )
def new_fn ( * fn_args , ** fn_kwargs ) : # figure out which params we need to inject ( we don ' t want to
# interfere with any params the user has passed manually )
bound_args = sig . bind_partial ( * fn_args , ** fn_kwargs )
required = set ( sig . parameters . keys ( ) )
have = set ( bound_args . arguments . keys ( ) )
need = required . difference ( have )
to_inject = ( args if has_explicit_args else set ( [ k for k , v in sig . parameters . items ( ) if v . default == injectable ] ) )
# try to inject needed params from extensions or services
for param_name in to_inject :
if param_name not in need :
continue
if param_name in self . extensions :
fn_kwargs [ param_name ] = self . extensions [ param_name ]
elif param_name in self . services :
fn_kwargs [ param_name ] = self . services [ param_name ]
# check to make sure we we ' re not missing anything required
bound_args = sig . bind_partial ( * fn_args , ** fn_kwargs )
bound_args . apply_defaults ( )
for k , v in bound_args . arguments . items ( ) :
if isinstance ( v , str ) and v == injectable :
di_name = new_fn . __di_name__
is_constructor = ( '.' not in di_name and di_name != di_name . lower ( ) )
action = 'initialized' if is_constructor else 'called'
msg = f'{di_name} was {action} without the ' f'{k} parameter. Please supply it manually, or ' 'make sure it gets injected.'
raise ServiceUsageError ( msg )
if cls and not getattr ( cls , _DI_AUTOMATICALLY_HANDLED , False ) :
cls_attrs_to_inject = getattr ( cls , _INJECT_CLS_ATTRS , [ ] )
for attr , value in vars ( cls ) . items ( ) :
if value == injectable :
cls_attrs_to_inject . append ( attr )
if cls_attrs_to_inject :
setattr ( cls , _INJECT_CLS_ATTRS , cls_attrs_to_inject )
_inject_cls_attrs ( ) ( cls )
return fn ( * bound_args . args , ** bound_args . kwargs )
new_fn . __signature__ = sig
new_fn . __di_name__ = getattr ( fn , '__di_name__' , fn . __name__ )
if cls :
cls . __init__ = new_fn
cls . __signature__ = sig
for attr , meth in vars ( cls ) . items ( ) :
if ( attr . startswith ( '__' ) or not callable ( meth ) or hasattr ( meth , '__signature__' ) ) :
continue
setattr ( cls , attr , self . inject ( ) ( meth ) )
return cls
return new_fn
if used_without_parenthesis :
return wrapper ( args [ 0 ] )
return wrapper |
def to_toml ( value , pretty = False ) : # noqa : unused - argument
"""Serializes the given value to TOML .
: param value : the value to serialize
: param pretty :
this argument is ignored , as no TOML libraries support this type of
operation
: type pretty : bool
: rtype : str""" | if not toml :
raise NotImplementedError ( 'No supported TOML library available' )
return toml . dumps ( make_toml_friendly ( value ) ) . rstrip ( ) |
def MessageToString ( message , as_utf8 = False , as_one_line = False , pointy_brackets = False , use_index_order = False , float_format = None , use_field_number = False , descriptor_pool = None , indent = 0 ) :
"""Convert protobuf message to text format .
Floating point values can be formatted compactly with 15 digits of
precision ( which is the most that IEEE 754 " double " can guarantee )
using float _ format = ' . 15g ' . To ensure that converting to text and back to a
proto will result in an identical value , float _ format = ' . 17g ' should be used .
Args :
message : The protocol buffers message .
as _ utf8 : Produce text output in UTF8 format .
as _ one _ line : Don ' t introduce newlines between fields .
pointy _ brackets : If True , use angle brackets instead of curly braces for
nesting .
use _ index _ order : If True , print fields of a proto message using the order
defined in source code instead of the field number . By default , use the
field number order .
float _ format : If set , use this to specify floating point number formatting
( per the " Format Specification Mini - Language " ) ; otherwise , str ( ) is used .
use _ field _ number : If True , print field numbers instead of names .
descriptor _ pool : A DescriptorPool used to resolve Any types .
indent : The indent level , in terms of spaces , for pretty print .
Returns :
A string of the text formatted protocol buffer message .""" | out = TextWriter ( as_utf8 )
printer = _Printer ( out , indent , as_utf8 , as_one_line , pointy_brackets , use_index_order , float_format , use_field_number , descriptor_pool )
printer . PrintMessage ( message )
result = out . getvalue ( )
out . close ( )
if as_one_line :
return result . rstrip ( )
return result |
def admin_state ( self , ** kwargs ) :
"""Set interface administrative state .
Args :
int _ type ( str ) : Type of interface . ( gigabitethernet ,
tengigabitethernet , etc ) .
name ( str ) : Name of interface . ( 1/0/5 , 1/0/10 , etc ) .
enabled ( bool ) : Is the interface enabled ? ( True , False )
rbridge _ id ( str ) : rbridge - id for device . Only required when type is
` ve ` .
get ( bool ) : Get config instead of editing config . ( True , False )
callback ( function ) : A function executed upon completion of the
method . The only parameter passed to ` callback ` will be the
` ` ElementTree ` ` ` config ` .
Returns :
Return value of ` callback ` .
Raises :
KeyError : if ` int _ type ` , ` name ` , or ` enabled ` is not passed and
` get ` is not ` ` True ` ` .
ValueError : if ` int _ type ` , ` name ` , or ` enabled ` are invalid .
Examples :
> > > import pynos . device
> > > switches = [ ' 10.24.39.211 ' , ' 10.24.39.203 ' ]
> > > auth = ( ' admin ' , ' password ' )
> > > for switch in switches :
. . . conn = ( switch , ' 22 ' )
. . . with pynos . device . Device ( conn = conn , auth = auth ) as dev :
. . . dev . interface . admin _ state (
. . . int _ type = ' tengigabitethernet ' , name = ' 225/0/38 ' ,
. . . enabled = False )
. . . dev . interface . admin _ state (
. . . int _ type = ' tengigabitethernet ' , name = ' 225/0/38 ' ,
. . . enabled = True )
. . . output = dev . interface . add _ vlan _ int ( ' 87 ' )
. . . output = dev . interface . ip _ address ( int _ type = ' ve ' ,
. . . name = ' 87 ' , ip _ addr = ' 10.0.0.1/24 ' , rbridge _ id = ' 225 ' )
. . . output = dev . interface . admin _ state ( int _ type = ' ve ' ,
. . . name = ' 87 ' , enabled = True , rbridge _ id = ' 225 ' )
. . . output = dev . interface . admin _ state ( int _ type = ' ve ' ,
. . . name = ' 87 ' , enabled = False , rbridge _ id = ' 225 ' )
. . . output = dev . interface . ip _ address ( int _ type = ' loopback ' ,
. . . name = ' 225 ' , ip _ addr = ' 10.225.225.225/32 ' ,
. . . rbridge _ id = ' 225 ' )
. . . output = dev . interface . admin _ state ( int _ type = ' loopback ' ,
. . . name = ' 225 ' , enabled = True , rbridge _ id = ' 225 ' )
. . . output = dev . interface . admin _ state ( int _ type = ' loopback ' ,
. . . name = ' 225 ' , enabled = False , rbridge _ id = ' 225 ' )
. . . output = dev . interface . ip _ address ( int _ type = ' loopback ' ,
. . . name = ' 225 ' , ip _ addr = ' 10.225.225.225/32 ' ,
. . . rbridge _ id = ' 225 ' , delete = True )""" | int_type = kwargs . pop ( 'int_type' ) . lower ( )
name = kwargs . pop ( 'name' )
get = kwargs . pop ( 'get' , False )
if get :
enabled = None
else :
enabled = kwargs . pop ( 'enabled' )
rbridge_id = kwargs . pop ( 'rbridge_id' , '1' )
callback = kwargs . pop ( 'callback' , self . _callback )
valid_int_types = [ 'gigabitethernet' , 'tengigabitethernet' , 'fortygigabitethernet' , 'hundredgigabitethernet' , 'port_channel' , 've' , 'loopback' ]
if int_type not in valid_int_types :
raise ValueError ( '`int_type` must be one of: %s' % repr ( valid_int_types ) )
if not isinstance ( enabled , bool ) and not get :
raise ValueError ( '`enabled` must be `True` or `False`.' )
state_args = dict ( name = name )
method_name = 'interface_%s_shutdown' % int_type
method_class = self . _interface
if int_type == 've' :
method_name = "rbridge_id_%s" % method_name
method_class = self . _rbridge
state_args [ 'rbridge_id' ] = rbridge_id
if not pynos . utilities . valid_vlan_id ( name ) :
raise InvalidVlanId ( "`name` must be between `1` and `8191`" )
elif int_type == 'loopback' :
method_name = 'rbridge_id_interface_{0}_intf_' '{0}_shutdown' . format ( int_type )
method_class = self . _rbridge
state_args [ 'rbridge_id' ] = rbridge_id
state_args [ 'id' ] = name
elif not pynos . utilities . valid_interface ( int_type , name ) :
raise ValueError ( '`name` must be in the format of x/y/z for ' 'physical interfaces or x for port channel.' )
admin_state = getattr ( method_class , method_name )
config = admin_state ( ** state_args )
if enabled :
config . find ( './/*shutdown' ) . set ( 'operation' , 'delete' )
try :
if get :
return callback ( config , handler = 'get_config' )
else :
return callback ( config )
# TODO : Catch existing ' no shut '
# This is in place because if the interface is already admin up ,
# ` ncclient ` will raise an error if you try to admin up the interface
# again .
except AttributeError :
return None |
def convert_clip ( node , ** kwargs ) :
"""Map MXNet ' s Clip operator attributes to onnx ' s Clip operator
and return the created node .""" | name , input_nodes , attrs = get_inputs ( node , kwargs )
a_min = np . float ( attrs . get ( 'a_min' , - np . inf ) )
a_max = np . float ( attrs . get ( 'a_max' , np . inf ) )
clip_node = onnx . helper . make_node ( "Clip" , input_nodes , [ name ] , name = name , min = a_min , max = a_max )
return [ clip_node ] |
def init_logging ( ) :
"""Initialize logging ( set up forwarding to Go backend and sane defaults )""" | # Forward to Go backend
logging . addLevelName ( TRACE_LEVEL , 'TRACE' )
logging . setLoggerClass ( AgentLogger )
rootLogger = logging . getLogger ( )
rootLogger . addHandler ( AgentLogHandler ( ) )
rootLogger . setLevel ( _get_py_loglevel ( datadog_agent . get_config ( 'log_level' ) ) )
# ` requests ` ( used in a lot of checks ) imports ` urllib3 ` , which logs a bunch of stuff at the info level
# Therefore , pre emptively increase the default level of that logger to ` WARN `
urllib_logger = logging . getLogger ( "requests.packages.urllib3" )
urllib_logger . setLevel ( logging . WARN )
urllib_logger . propagate = True |
def send_keysequence_window_up ( self , window , keysequence , delay = 12000 ) :
"""Send key release ( up ) events for the given key sequence""" | _libxdo . xdo_send_keysequence_window_up ( self . _xdo , window , keysequence , ctypes . c_ulong ( delay ) ) |
def find_contours_level ( density , x , y , level , closed = False ) :
"""Find iso - valued density contours for a given level value
Parameters
density : 2d ndarray of shape ( M , N )
Kernel density estimate for which to compute the contours
x : 2d ndarray of shape ( M , N ) or 1d ndarray of size M
X - values corresponding to ` kde `
y : 2d ndarray of shape ( M , N ) or 1d ndarray of size M
Y - values corresponding to ` kde `
level : float between 0 and 1
Value along which to find contours in ` kde ` relative
to its maximum kde
Returns
contours : list of ndarrays of shape ( P , 2)
Contours found for the given level value
See Also
skimage . measure . find _ contours : Contour finding algorithm used""" | if level >= 1 or level <= 0 :
raise ValueError ( "`level` must be in (0,1), got '{}'!" . format ( level ) )
# level relative to maximum
level = level * density . max ( )
# xy coordinates
if len ( x . shape ) == 2 :
assert np . all ( x [ : , 0 ] == x [ : , 1 ] )
x = x [ : , 0 ]
if len ( y . shape ) == 2 :
assert np . all ( y [ 0 , : ] == y [ 1 , : ] )
y = y [ 0 , : ]
if closed : # find closed contours
density = np . pad ( density , ( ( 1 , 1 ) , ( 1 , 1 ) ) , mode = "constant" )
offset = 1
else : # leave contours open at kde boundary
offset = 0
conts_idx = find_contours ( density , level )
conts_xy = [ ]
for cc in conts_idx :
cx = np . interp ( x = cc [ : , 0 ] - offset , xp = range ( x . size ) , fp = x )
cy = np . interp ( x = cc [ : , 1 ] - offset , xp = range ( y . size ) , fp = y )
conts_xy . append ( np . stack ( ( cx , cy ) , axis = 1 ) )
return conts_xy |
def lvm_info ( self , name = None ) :
"""Call a program
: param name : if specified - program will return information for that lvm - entity only . otherwise -
all available entries are returned
: return : tuple of str ( fields )""" | cmd = [ ] if self . sudo ( ) is False else [ 'sudo' ]
cmd . extend ( [ self . command ( ) , '-c' ] )
if name is not None :
cmd . append ( name )
output = subprocess . check_output ( cmd , timeout = self . cmd_timeout ( ) )
output = output . decode ( )
result = [ ]
fields_count = self . fields_count ( )
for line in output . split ( '\n' ) :
line = line . strip ( )
fields = line . split ( ':' )
if len ( fields ) == fields_count :
result . append ( fields )
if name is not None and len ( result ) != 1 :
raise RuntimeError ( 'Unable to parse command result' )
return tuple ( result ) |
def _default_value_cell_data_func ( self , tree_view_column , cell , model , iter , data = None ) :
"""Function set renderer properties for every single cell independently
The function controls the editable and color scheme for every cell in the default value column according
the use _ runtime _ value flag and whether the state is a library state .
: param tree _ view _ column : the Gtk . TreeViewColumn to be rendered
: param cell : the current CellRenderer
: param model : the Gtk . ListStore or TreeStore that is the model for TreeView
: param iter : an iterator over the rows of the TreeStore / Gtk . ListStore Model
: param data : optional data to be passed : see http : / / dumbmatter . com / 2012/02 / some - notes - on - porting - from - pygtk - to - pygobject /""" | if isinstance ( self . model . state , LibraryState ) :
use_runtime_value = model . get_value ( iter , self . USE_RUNTIME_VALUE_STORAGE_ID )
if use_runtime_value :
cell . set_property ( "editable" , True )
cell . set_property ( 'text' , model . get_value ( iter , self . RUNTIME_VALUE_STORAGE_ID ) )
cell . set_property ( 'foreground' , "white" )
else :
cell . set_property ( "editable" , False )
cell . set_property ( 'text' , model . get_value ( iter , self . DEFAULT_VALUE_STORAGE_ID ) )
cell . set_property ( 'foreground' , "dark grey" )
return |
def _write_frames ( self , handle ) :
'''Write our frame data to the given file handle .
Parameters
handle : file
Write metadata and C3D motion frames to the given file handle . The
writer does not close the handle .''' | assert handle . tell ( ) == 512 * ( self . header . data_block - 1 )
scale = abs ( self . point_scale )
is_float = self . point_scale < 0
point_dtype = [ np . int16 , np . float32 ] [ is_float ]
point_scale = [ scale , 1 ] [ is_float ]
point_format = 'if' [ is_float ]
raw = np . empty ( ( self . point_used , 4 ) , point_dtype )
for points , analog in self . _frames :
valid = points [ : , 3 ] > - 1
raw [ ~ valid , 3 ] = - 1
raw [ valid , : 3 ] = points [ valid , : 3 ] / self . _point_scale
raw [ valid , 3 ] = ( ( ( points [ valid , 4 ] ) . astype ( np . uint8 ) << 8 ) | ( points [ valid , 3 ] / scale ) . astype ( np . uint16 ) )
point = array . array ( point_format )
point . extend ( raw . flatten ( ) )
point . tofile ( handle )
analog = array . array ( point_format )
analog . extend ( analog )
analog . tofile ( handle )
self . _pad_block ( handle ) |
def Dirname ( self ) :
"""Get a new copied object with only the directory path .""" | result = self . Copy ( )
while 1 :
last_directory = posixpath . dirname ( result . last . path )
if last_directory != "/" or len ( result ) <= 1 :
result . last . path = last_directory
# Make sure to clear the inode information .
result . last . inode = None
break
result . Pop ( - 1 )
return result |
def has_permissions ( self , object , group , operations ) :
'''Check if this : class : ` Subject ` has permissions for ` ` operations ` `
on an ` ` object ` ` . It returns the number of valid permissions .''' | if self . is_superuser :
return 1
else :
models = self . session . router
# valid permissions
query = models . permission . for_object ( object , operation = operations )
objects = models [ models . role . permissions . model ]
return objects . filter ( role = self . role . query ( ) , permission = query ) . count ( ) |
def cli ( env , volume_id , schedule_type ) :
"""Disables snapshots on the specified schedule for a given volume""" | if ( schedule_type not in [ 'INTERVAL' , 'HOURLY' , 'DAILY' , 'WEEKLY' ] ) :
raise exceptions . CLIAbort ( '--schedule-type must be INTERVAL, HOURLY, DAILY, or WEEKLY' )
block_manager = SoftLayer . BlockStorageManager ( env . client )
disabled = block_manager . disable_snapshots ( volume_id , schedule_type )
if disabled :
click . echo ( '%s snapshots have been disabled for volume %s' % ( schedule_type , volume_id ) ) |
def get ( self , pk , ** kwargs ) :
"""Get item from Model
get :
parameters :
- in : path
schema :
type : integer
name : pk
- $ ref : ' # / components / parameters / get _ item _ schema '
responses :
200:
description : Item from Model
content :
application / json :
schema :
type : object
properties :
label _ columns :
type : object
show _ columns :
type : array
items :
type : string
description _ columns :
type : object
show _ title :
type : string
id :
type : string
result :
$ ref : ' # / components / schemas / { { self . _ _ class _ _ . _ _ name _ _ } } . get '
400:
$ ref : ' # / components / responses / 400'
401:
$ ref : ' # / components / responses / 401'
404:
$ ref : ' # / components / responses / 404'
422:
$ ref : ' # / components / responses / 422'
500:
$ ref : ' # / components / responses / 500'""" | item = self . datamodel . get ( pk , self . _base_filters )
if not item :
return self . response_404 ( )
_response = dict ( )
_args = kwargs . get ( "rison" , { } )
select_cols = _args . get ( API_SELECT_COLUMNS_RIS_KEY , [ ] )
_pruned_select_cols = [ col for col in select_cols if col in self . show_columns ]
self . set_response_key_mappings ( _response , self . get , _args , ** { API_SELECT_COLUMNS_RIS_KEY : _pruned_select_cols } )
if _pruned_select_cols :
_show_model_schema = self . model2schemaconverter . convert ( _pruned_select_cols )
else :
_show_model_schema = self . show_model_schema
_response [ "id" ] = pk
_response [ API_RESULT_RES_KEY ] = _show_model_schema . dump ( item , many = False ) . data
self . pre_get ( _response )
return self . response ( 200 , ** _response ) |
def metadata ( self , exportFormat = "default" , output = None , saveFolder = None , fileName = None ) :
"""exports metadata to the various supported formats
Inputs :
exportFormats - export metadata to the following formats : fgdc ,
inspire , iso19139 , iso19139-3.2 , iso19115 , arcgis , and default .
default means the value will be ISO 19139 Metadata
Implementation Specification GML3.2 or the default format set
for your ArcGIS online organizational account .
output - html or none . Html returns values as html text .
saveFolder - Default is None . If provided the metadata file will
be saved to that location .
fileName - Default is None . If provided , this will be the name of
the file on your local drive .
Output :
path to file or string""" | url = "%s/info/metadata/metadata.xml" % self . root
allowedFormats = [ "fgdc" , "inspire" , "iso19139" , "iso19139-3.2" , "iso19115" , "arcgis" , "default" ]
if not exportFormat . lower ( ) in allowedFormats :
raise Exception ( "Invalid exportFormat" )
if exportFormat . lower ( ) == "arcgis" :
params = { }
else :
params = { "format" : exportFormat }
if exportFormat . lower ( ) == "default" :
exportFormat = ""
params = { "format" : exportFormat }
if output is not None :
params [ 'output' ] = output
if saveFolder is None :
saveFolder = tempfile . gettempdir ( )
if fileName is None :
fileName = "metadata.xml"
if output is None :
result = self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port , out_folder = saveFolder , file_name = fileName )
if os . path . isfile ( result ) == False :
with open ( os . path . join ( saveFolder , fileName ) , 'wb' ) as writer :
writer . write ( result )
writer . flush ( )
writer . close ( )
return os . path . join ( saveFolder , fileName )
return result
else :
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) |
def reflection_matrix_pow ( reflection_matrix : np . ndarray , exponent : float ) :
"""Raises a matrix with two opposing eigenvalues to a power .
Args :
reflection _ matrix : The matrix to raise to a power .
exponent : The power to raise the matrix to .
Returns :
The given matrix raised to the given power .""" | # The eigenvalues are x and - x for some complex unit x . Determine x .
squared_phase = np . dot ( reflection_matrix [ : , 0 ] , reflection_matrix [ 0 , : ] )
phase = complex ( np . sqrt ( squared_phase ) )
# Extract + x and - x eigencomponents of the matrix .
i = np . eye ( reflection_matrix . shape [ 0 ] ) * phase
pos_part = ( i + reflection_matrix ) * 0.5
neg_part = ( i - reflection_matrix ) * 0.5
# Raise the matrix to a power by raising its eigencomponents to that power .
pos_factor = phase ** ( exponent - 1 )
neg_factor = pos_factor * complex ( - 1 ) ** exponent
pos_part_raised = pos_factor * pos_part
neg_part_raised = neg_part * neg_factor
return pos_part_raised + neg_part_raised |
def traverse_ruffus_exception ( exceptions , options , log ) :
"""Traverse a RethrownJobError and output the exceptions
Ruffus presents exceptions as 5 element tuples . The RethrownJobException
has a list of exceptions like
e . job _ exceptions = [ ( 5 - tuple ) , ( 5 - tuple ) , . . . ]
ruffus < 2.7.0 had a bug with exception marshalling that would give
different output whether the main or child process raised the exception .
We no longer support this .
Attempting to log the exception itself will re - marshall it to the logger
which is normally running in another process . It ' s better to avoid re -
marshalling .
The exit code will be based on this , even if multiple exceptions occurred
at the same time .""" | exit_codes = [ ]
for exc in exceptions :
exit_code = do_ruffus_exception ( exc , options , log )
exit_codes . append ( exit_code )
return exit_codes [ 0 ] |
def _parsed_items ( items , sep = _SEP , ** options ) :
""": param items : List of pairs , [ ( key , value ) ] , or generator yields pairs
: param sep : Seprator string
: return : Generator to yield ( key , value ) pair of ' dic '""" | parse = _parse if options . get ( "ac_parse_value" ) else anyconfig . utils . noop
for key , val in items :
yield ( key , parse ( val , sep ) ) |
def send_message_tracked ( self , msg ) :
"""Send a message to the MUC with tracking .
: param msg : The message to send .
: type msg : : class : ` aioxmpp . Message `
. . warning : :
Please read : ref : ` api - tracking - memory ` . This is especially relevant
for MUCs because tracking is not guaranteed to work due to how
: xep : ` 45 ` is written . It will work in many cases , probably in all
cases you test during development , but it may fail to work for some
individual messages and it may fail to work consistently for some
services . See the implementation details below for reasons .
The message is tracked and is considered
: attr : ` ~ . MessageState . DELIVERED _ TO _ RECIPIENT ` when it is reflected back
to us by the MUC service . The reflected message is then available in
the : attr : ` ~ . MessageTracker . response ` attribute .
. . note : :
Two things :
1 . The MUC service may change the contents of the message . An
example of this is the Prosody developer MUC which replaces
messages with more than a few lines with a pastebin link .
2 . Reflected messages which are caught by tracking are not emitted
through : meth : ` on _ message ` .
There is no need to set the address attributes or the type of the
message correctly ; those will be overridden by this method to conform
to the requirements of a message to the MUC . Other attributes are left
untouched ( except that : meth : ` ~ . StanzaBase . autoset _ id ` is called ) and
can be used as desired for the message .
. . warning : :
Using : meth : ` send _ message _ tracked ` before : meth : ` on _ join ` has
emitted will cause the ` member ` object in the resulting
: meth : ` on _ message ` event to be : data : ` None ` ( the message will be
delivered just fine ) .
Using : meth : ` send _ message _ tracked ` before history replay is over
will cause the : meth : ` on _ message ` event to be emitted during
history replay , even though everyone else in the MUC will - - of
course - - only see the message after the history .
: meth : ` send _ message ` is not affected by these quirks .
. . seealso : :
: meth : ` . AbstractConversation . send _ message _ tracked ` for the full
interface specification .
* * Implementation details : * * Currently , we try to detect reflected
messages using two different criteria . First , if we see a message with
the same message ID ( note that message IDs contain 120 bits of entropy )
as the message we sent , we consider it as the reflection . As some MUC
services re - write the message ID in the reflection , as a fallback , we
also consider messages which originate from the correct sender and have
the correct body a reflection .
Obviously , this fails consistently in MUCs which re - write the body and
re - write the ID and randomly if the MUC always re - writes the ID but
only sometimes the body .""" | msg . type_ = aioxmpp . MessageType . GROUPCHAT
msg . to = self . _mucjid
# see https : / / mail . jabber . org / pipermail / standards / 2017 - January / 032048 . html # NOQA
# for a full discussion on the rationale for this .
# TL ; DR : we want to help entities to discover that a message is related
# to a MUC .
msg . xep0045_muc_user = muc_xso . UserExt ( )
msg . autoset_id ( )
tracking_svc = self . service . dependencies [ aioxmpp . tracking . BasicTrackingService ]
tracker = aioxmpp . tracking . MessageTracker ( )
id_key = msg . id_
body_key = _extract_one_pair ( msg . body )
self . _tracking_by_id [ id_key ] = tracker
self . _tracking_metadata [ tracker ] = ( id_key , body_key , )
self . _tracking_by_body . setdefault ( body_key , [ ] ) . append ( tracker )
tracker . on_closed . connect ( functools . partial ( self . _tracker_closed , tracker , ) )
token = tracking_svc . send_tracked ( msg , tracker )
self . on_message ( msg , self . _this_occupant , aioxmpp . im . dispatcher . MessageSource . STREAM , tracker = tracker , )
return token , tracker |
def hpsample ( self , data : [ 'SASdata' , str ] = None , cls : [ str , list ] = None , performance : str = None , target : [ str , list , dict ] = None , var : str = None , procopts : [ str , list , dict ] = None , stmtpassthrough : [ str , list , dict ] = None , ** kwargs : dict ) -> 'SASresults' :
"""Python method to call the HPSAMPLE procedure .
Documentation link :
https : / / go . documentation . sas . com / ? cdcId = pgmsascdc & cdcVersion = 9.4_3.4 & docsetId = prochp & docsetTarget = prochp _ hpsample _ toc . htm & locale = en
: param data : SASdata object or string . This parameter is required . .
: parm cls : The cls variable can be a string or list type . It refers to the categorical , or nominal variables .
: parm performance : The performance variable can only be a string type .
: parm target : The target variable can be a string , list or dict type . It refers to the dependent , y , or label variable .
: parm var : The var variable can only be a string type .
: parm procopts : The procopts variable is a generic option available for advanced use . It can only be a string type .
: parm stmtpassthrough : The stmtpassthrough variable is a generic option available for advanced use . It can only be a string type .
: return : SAS Result Object""" | |
async def on_isupport_chanlimit ( self , value ) :
"""Simultaneous channel limits for user .""" | self . _channel_limits = { }
for entry in value . split ( ',' ) :
types , limit = entry . split ( ':' )
# Assign limit to channel type group and add lookup entry for type .
self . _channel_limits [ frozenset ( types ) ] = int ( limit )
for prefix in types :
self . _channel_limit_groups [ prefix ] = frozenset ( types ) |
def get_image_url ( self , selector , by = By . CSS_SELECTOR , timeout = settings . SMALL_TIMEOUT ) :
"""Extracts the URL from an image element on the page .""" | if self . timeout_multiplier and timeout == settings . SMALL_TIMEOUT :
timeout = self . __get_new_timeout ( timeout )
return self . get_attribute ( selector , attribute = 'src' , by = by , timeout = timeout ) |
def transform_data ( self , data , request = None , response = None , context = None ) :
transform = self . transform
if hasattr ( transform , 'context' ) :
self . transform . context = context
"""Runs the transforms specified on this endpoint with the provided data , returning the data modified""" | if transform and not ( isinstance ( transform , type ) and isinstance ( data , transform ) ) :
if self . _params_for_transform :
return transform ( data , ** self . _arguments ( self . _params_for_transform , request , response ) )
else :
return transform ( data )
return data |
def nvmlSystemGetProcessName ( pid ) :
r"""* Gets name of the process with provided process id
* For all products .
* Returned process name is cropped to provided length .
* name string is encoded in ANSI .
* @ param pid The identifier of the process
* @ param name Reference in which to return the process name
* @ param length The maximum allowed length of the string returned in \ a name
* @ return
* - \ ref NVML _ SUCCESS if \ a name has been set
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a name is NULL or \ a length is 0.
* - \ ref NVML _ ERROR _ NOT _ FOUND if process doesn ' t exists
* - \ ref NVML _ ERROR _ NO _ PERMISSION if the user doesn ' t have permission to perform this operation
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
nvmlReturn _ t DECLDIR nvmlSystemGetProcessName""" | c_name = create_string_buffer ( 1024 )
fn = _nvmlGetFunctionPointer ( "nvmlSystemGetProcessName" )
ret = fn ( c_uint ( pid ) , c_name , c_uint ( 1024 ) )
_nvmlCheckReturn ( ret )
return bytes_to_str ( c_name . value ) |
def find_omega_min ( omega_levelu , Neu , Nl , xiu ) :
r"""Find the smallest transition frequency for each field .
> > > Ne = 6
> > > Nl = 2
> > > omega _ level = [ 0.0 , 100.0 , 100.0 , 200.0 , 200.0 , 300.0]
> > > xi = np . zeros ( ( Nl , Ne , Ne ) )
> > > coup = [ [ ( 1 , 0 ) , ( 2 , 0 ) ] , [ ( 3 , 0 ) , ( 4 , 0 ) , ( 5 , 0 ) ] ]
> > > for l in range ( Nl ) :
. . . for pair in coup [ l ] :
. . . xi [ l , pair [ 0 ] , pair [ 1 ] ] = 1.0
. . . xi [ l , pair [ 1 ] , pair [ 0 ] ] = 1.0
> > > aux = define _ simplification ( omega _ level , xi , Nl )
> > > u , invu , omega _ levelu , Neu , xiu = aux
> > > find _ omega _ min ( omega _ levelu , Neu , Nl , xiu )
( [ 100.0 , 200.0 ] , [ 1 , 2 ] , [ 0 , 0 ] )""" | omega_min = [ ] ;
iu0 = [ ] ;
ju0 = [ ]
for l in range ( Nl ) :
omegasl = [ ]
for iu in range ( Neu ) :
for ju in range ( iu ) :
if xiu [ l , iu , ju ] == 1 :
omegasl += [ ( omega_levelu [ iu ] - omega_levelu [ ju ] , iu , ju ) ]
omegasl = list ( sorted ( omegasl ) )
omega_min += [ omegasl [ 0 ] [ 0 ] ]
iu0 += [ omegasl [ 0 ] [ 1 ] ]
ju0 += [ omegasl [ 0 ] [ 2 ] ]
return omega_min , iu0 , ju0 |
def _attempt_resumable_download ( self , key , fp , headers , cb , num_cb , torrent , version_id ) :
"""Attempts a resumable download .
Raises ResumableDownloadException if any problems occur .""" | cur_file_size = get_cur_file_size ( fp , position_to_eof = True )
if ( cur_file_size and self . etag_value_for_current_download and self . etag_value_for_current_download == key . etag . strip ( '"\'' ) ) : # Try to resume existing transfer .
if cur_file_size > key . size :
raise ResumableDownloadException ( '%s is larger (%d) than %s (%d).\nDeleting tracker file, so ' 'if you re-try this download it will start from scratch' % ( fp . name , cur_file_size , str ( storage_uri_for_key ( key ) ) , key . size ) , ResumableTransferDisposition . ABORT )
elif cur_file_size == key . size :
if key . bucket . connection . debug >= 1 :
print 'Download complete.'
return
if key . bucket . connection . debug >= 1 :
print 'Resuming download.'
headers = headers . copy ( )
headers [ 'Range' ] = 'bytes=%d-%d' % ( cur_file_size , key . size - 1 )
cb = ByteTranslatingCallbackHandler ( cb , cur_file_size ) . call
self . download_start_point = cur_file_size
else :
if key . bucket . connection . debug >= 1 :
print 'Starting new resumable download.'
self . _save_tracker_info ( key )
self . download_start_point = 0
# Truncate the file , in case a new resumable download is being
# started atop an existing file .
fp . truncate ( 0 )
# Disable AWSAuthConnection - level retry behavior , since that would
# cause downloads to restart from scratch .
key . get_file ( fp , headers , cb , num_cb , torrent , version_id , override_num_retries = 0 )
fp . flush ( ) |
def config_dir ( self ) :
"""Get the path to the user configuration directory . The
directory is guaranteed to exist as a postcondition ( one may be
created if none exist ) .
If the application ' s ` ` . . . DIR ` ` environment variable is set , it
is used as the configuration directory . Otherwise ,
platform - specific standard configuration locations are searched
for a ` ` config . yaml ` ` file . If no configuration file is found , a
fallback path is used .""" | # If environment variable is set , use it .
if self . _env_var in os . environ :
appdir = os . environ [ self . _env_var ]
appdir = os . path . abspath ( os . path . expanduser ( appdir ) )
if os . path . isfile ( appdir ) :
raise ConfigError ( u'{0} must be a directory' . format ( self . _env_var ) )
else : # Search platform - specific locations . If no config file is
# found , fall back to the first directory in the list .
configdirs = config_dirs ( )
for confdir in configdirs :
appdir = os . path . join ( confdir , self . appname )
if os . path . isfile ( os . path . join ( appdir , CONFIG_FILENAME ) ) :
break
else :
appdir = os . path . join ( configdirs [ 0 ] , self . appname )
# Ensure that the directory exists .
if not os . path . isdir ( appdir ) :
os . makedirs ( appdir )
return appdir |
def completed_stage ( self , stage ) :
"""Determine whether the pipeline ' s completed the stage indicated .
: param pypiper . Stage stage : Stage to check for completion status .
: return bool : Whether this pipeline ' s completed the indicated stage .
: raises UnknownStageException : If the stage name given is undefined
for the pipeline , a ValueError arises .""" | check_path = checkpoint_filepath ( stage , self . manager )
return os . path . exists ( check_path ) |
def pivot_wavelength ( self ) :
"""Get the bandpass ' pivot wavelength .
Unlike calc _ pivot _ wavelength ( ) , this function will use a cached
value if available .""" | wl = self . registry . _pivot_wavelengths . get ( ( self . telescope , self . band ) )
if wl is not None :
return wl
wl = self . calc_pivot_wavelength ( )
self . registry . register_pivot_wavelength ( self . telescope , self . band , wl )
return wl |
def sc_imap ( self , viewer , event , msg = True ) :
"""Interactively change the intensity map by scrolling .""" | direction = self . get_direction ( event . direction )
self . _cycle_imap ( viewer , msg , direction = direction )
return True |
def _legacy_init ( self , name , arr ) :
"""Legacy initialization method .
Parameters
name : str
Name of corresponding NDArray .
arr : NDArray
NDArray to be initialized .""" | warnings . warn ( "\033[91mCalling initializer with init(str, NDArray) has been deprecated." "please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m" , DeprecationWarning , stacklevel = 3 )
if not isinstance ( name , string_types ) :
raise TypeError ( 'name must be string' )
if not isinstance ( arr , NDArray ) :
raise TypeError ( 'arr must be NDArray' )
if name . startswith ( 'upsampling' ) :
self . _init_bilinear ( name , arr )
elif name . startswith ( 'stn_loc' ) and name . endswith ( 'weight' ) :
self . _init_zero ( name , arr )
elif name . startswith ( 'stn_loc' ) and name . endswith ( 'bias' ) :
self . _init_loc_bias ( name , arr )
elif name . endswith ( 'bias' ) :
self . _init_bias ( name , arr )
elif name . endswith ( 'gamma' ) :
self . _init_gamma ( name , arr )
elif name . endswith ( 'beta' ) :
self . _init_beta ( name , arr )
elif name . endswith ( 'weight' ) :
self . _init_weight ( name , arr )
elif name . endswith ( "moving_mean" ) :
self . _init_zero ( name , arr )
elif name . endswith ( "moving_var" ) :
self . _init_one ( name , arr )
elif name . endswith ( "moving_inv_var" ) :
self . _init_zero ( name , arr )
elif name . endswith ( "moving_avg" ) :
self . _init_zero ( name , arr )
elif name . endswith ( 'min' ) :
self . _init_zero ( name , arr )
elif name . endswith ( 'max' ) :
self . _init_one ( name , arr )
else :
self . _init_default ( name , arr ) |
def build_tree_from_alignment ( aln , moltype = DNA , best_tree = False , params = None ) :
"""Returns a tree from alignment
Will check MolType of aln object""" | if params is None :
params = { }
if moltype == DNA or moltype == RNA :
params [ '-nt' ] = True
elif moltype == PROTEIN :
params [ '-nt' ] = False
else :
raise ValueError , "FastTree does not support moltype: %s" % moltype . label
app = FastTree ( params = params )
if best_tree :
raise NotImplementedError , "best_tree not implemented yet"
result = app ( aln . toFasta ( ) )
tree = DndParser ( result [ 'Tree' ] . read ( ) , constructor = PhyloNode )
return tree |
def count_words ( text , to_lower = True , delimiters = DEFAULT_DELIMITERS ) :
"""If ` text ` is an SArray of strings or an SArray of lists of strings , the
occurances of word are counted for each row in the SArray .
If ` text ` is an SArray of dictionaries , the keys are tokenized and the
values are the counts . Counts for the same word , in the same row , are
added together .
This output is commonly known as the " bag - of - words " representation of text
data .
Parameters
text : SArray [ str | dict | list ]
SArray of type : string , dict or list .
to _ lower : bool , optional
If True , all strings are converted to lower case before counting .
delimiters : list [ str ] , None , optional
Input strings are tokenized using ` delimiters ` characters in this list .
Each entry in this list must contain a single character . If set to
` None ` , then a Penn treebank - style tokenization is used , which contains
smart handling of punctuations .
Returns
out : SArray [ dict ]
An SArray with the same length as the ` text ` input . For each row , the keys
of the dictionary are the words and the values are the corresponding counts .
See Also
count _ ngrams , tf _ idf , tokenize ,
References
- ` Bag of words model < http : / / en . wikipedia . org / wiki / Bag - of - words _ model > ` _
- ` Penn treebank tokenization < https : / / web . archive . org / web / 19970614072242 / http : / / www . cis . upenn . edu : 80 / ~ treebank / tokenization . html > ` _
Examples
. . sourcecode : : python
> > > import turicreate
# Create input data
> > > sa = turicreate . SArray ( [ " The quick brown fox jumps . " ,
" Word word WORD , word ! ! ! word " ] )
# Run count _ words
> > > turicreate . text _ analytics . count _ words ( sa )
dtype : dict
Rows : 2
[ { ' quick ' : 1 , ' brown ' : 1 , ' the ' : 1 , ' fox ' : 1 , ' jumps . ' : 1 } ,
{ ' word , ' : 5 } ]
# Run count _ words with Penn treebank style tokenization to handle
# punctuations
> > > turicreate . text _ analytics . count _ words ( sa , delimiters = None )
dtype : dict
Rows : 2
[ { ' brown ' : 1 , ' jumps ' : 1 , ' fox ' : 1 , ' . ' : 1 , ' quick ' : 1 , ' the ' : 1 } ,
{ ' word ' : 3 , ' word ! ! ! word ' : 1 , ' , ' : 1 } ]
# Run count _ words with dictionary input
> > > sa = turicreate . SArray ( [ { ' alice bob ' : 1 , ' Bob alice ' : 0.5 } ,
{ ' a dog ' : 0 , ' a dog cat ' : 5 } ] )
> > > turicreate . text _ analytics . count _ words ( sa )
dtype : dict
Rows : 2
[ { ' bob ' : 1.5 , ' alice ' : 1.5 } , { ' a ' : 5 , ' dog ' : 5 , ' cat ' : 5 } ]
# Run count _ words with list input
> > > sa = turicreate . SArray ( [ [ ' one ' , ' bar bah ' ] , [ ' a dog ' , ' a dog cat ' ] ] )
> > > turicreate . text _ analytics . count _ words ( sa )
dtype : dict
Rows : 2
[ { ' bar ' : 1 , ' bah ' : 1 , ' one ' : 1 } , { ' a ' : 2 , ' dog ' : 2 , ' cat ' : 1 } ]""" | _raise_error_if_not_sarray ( text , "text" )
# # Compute word counts
sf = _turicreate . SFrame ( { 'docs' : text } )
fe = _feature_engineering . WordCounter ( features = 'docs' , to_lower = to_lower , delimiters = delimiters , output_column_prefix = None )
output_sf = fe . fit_transform ( sf )
return output_sf [ 'docs' ] |
def _new_java_obj ( sc , java_class , * args ) :
"""Construct a new Java object .""" | java_obj = _jvm ( )
for name in java_class . split ( "." ) :
java_obj = getattr ( java_obj , name )
java_args = [ _py2java ( sc , arg ) for arg in args ]
return java_obj ( * java_args ) |
def send ( self , to , message ) :
"""Send a message to another process .
Same as ` ` Process . send ` ` except that ` ` message ` ` is a protocol buffer .
Returns immediately .
: param to : The pid of the process to send a message .
: type to : : class : ` PID `
: param message : The message to send
: type method : A protocol buffer instance .
: raises : Will raise a ` ` Process . UnboundProcess ` ` exception if the
process is not bound to a context .
: return : Nothing""" | super ( ProtobufProcess , self ) . send ( to , message . DESCRIPTOR . full_name , message . SerializeToString ( ) ) |
def tags ( ) : # type : ( ) - > List [ str ]
"""Returns all tags in the repo .
Returns :
list [ str ] : List of all tags in the repo , sorted as versions .
All tags returned by this function will be parsed as if the contained
versions ( using ` ` v : refname ` ` sorting ) .""" | return shell . run ( 'git tag --sort=v:refname' , capture = True , never_pretend = True ) . stdout . strip ( ) . splitlines ( ) |
def rmon_alarm_entry_alarm_rising_threshold ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
rmon = ET . SubElement ( config , "rmon" , xmlns = "urn:brocade.com:mgmt:brocade-rmon" )
alarm_entry = ET . SubElement ( rmon , "alarm-entry" )
alarm_index_key = ET . SubElement ( alarm_entry , "alarm-index" )
alarm_index_key . text = kwargs . pop ( 'alarm_index' )
alarm_rising_threshold = ET . SubElement ( alarm_entry , "alarm-rising-threshold" )
alarm_rising_threshold . text = kwargs . pop ( 'alarm_rising_threshold' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def onPlanDeleted ( self , mid = None , plan = None , author_id = None , thread_id = None , thread_type = None , ts = None , metadata = None , msg = None , ) :
"""Called when the client is listening , and somebody deletes a plan
: param mid : The action ID
: param plan : Deleted plan
: param author _ id : The ID of the person who deleted the plan
: param thread _ id : Thread ID that the action was sent to . See : ref : ` intro _ threads `
: param thread _ type : Type of thread that the action was sent to . See : ref : ` intro _ threads `
: param ts : A timestamp of the action
: param metadata : Extra metadata about the action
: param msg : A full set of the data recieved
: type plan : models . Plan
: type thread _ type : models . ThreadType""" | log . info ( "{} deleted plan {} in {} ({})" . format ( author_id , plan , thread_id , thread_type . name ) ) |
def clean_text ( self , country , guess = False , ** kwargs ) :
"""Determine a two - letter country code based on an input .
The input may be a country code , a country name , etc .""" | code = country . lower ( ) . strip ( )
if code in self . names :
return code
country = countrynames . to_code ( country , fuzzy = guess )
if country is not None :
return country . lower ( ) |
def default_value ( self ) :
"""Get the default value of the field .""" | if self . name in tsdb_coded_attributes :
return tsdb_coded_attributes [ self . name ]
elif self . datatype == ':integer' :
return - 1
else :
return '' |
def run ( ) :
'''Dump on stdout the config flags required to compile pythran - generated code .''' | import argparse
import distutils . sysconfig
import pythran
import numpy
parser = argparse . ArgumentParser ( prog = 'pythran-config' , description = 'output build options for pythran-generated code' , epilog = "It's a megablast!" )
parser . add_argument ( '--compiler' , action = 'store_true' , help = 'print default compiler' )
parser . add_argument ( '--cflags' , action = 'store_true' , help = 'print compilation flags' )
parser . add_argument ( '--libs' , action = 'store_true' , help = 'print linker flags' )
parser . add_argument ( '--no-python' , action = 'store_true' , help = 'do not include Python-related flags' )
parser . add_argument ( '--verbose' , '-v' , action = 'count' , default = 0 , help = ( 'verbose mode: [-v] prints warnings if pythranrc ' 'has an invalid configuration; use ' '[-vv] for more information' ) )
args = parser . parse_args ( sys . argv [ 1 : ] )
args . python = not args . no_python
output = [ ]
extension = pythran . config . make_extension ( python = args . python )
if args . verbose >= 1 :
if args . verbose == 1 :
logger . setLevel ( logging . WARNING )
else :
logger . setLevel ( logging . INFO )
lint_cfg ( cfg )
if args . compiler or args . verbose >= 2 :
cxx = compiler ( ) or 'c++'
logger . info ( 'CXX = ' . rjust ( 10 ) + cxx )
if args . compiler :
output . append ( cxx )
if args . cflags or args . verbose >= 2 :
def fmt_define ( define ) :
name , value = define
if value is None :
return '-D' + name
else :
return '-D' + name + '=' + value
cflags = [ ]
cflags . extend ( fmt_define ( define ) for define in extension [ 'define_macros' ] )
cflags . extend ( ( '-I' + include ) for include in extension [ 'include_dirs' ] )
if args . python :
cflags . append ( '-I' + numpy . get_include ( ) )
cflags . append ( '-I' + distutils . sysconfig . get_python_inc ( ) )
logger . info ( 'CFLAGS = ' . rjust ( 10 ) + ' ' . join ( cflags ) )
if args . cflags :
output . extend ( cflags )
if args . libs or args . verbose >= 2 :
ldflags = [ ]
ldflags . extend ( ( '-L' + include ) for include in extension [ 'library_dirs' ] )
ldflags . extend ( ( '-l' + include ) for include in extension [ 'libraries' ] )
if args . python :
ldflags . append ( '-L' + distutils . sysconfig . get_config_var ( 'LIBPL' ) )
ldflags . extend ( distutils . sysconfig . get_config_var ( 'LIBS' ) . split ( ) )
ldflags . append ( '-lpython' + distutils . sysconfig . get_config_var ( 'VERSION' ) )
logger . info ( 'LDFLAGS = ' . rjust ( 10 ) + ' ' . join ( ldflags ) )
if args . libs :
output . extend ( ldflags )
if output :
print ( ' ' . join ( output ) ) |
def _generate ( cls , strategy , params ) :
"""generate the object .
Args :
params ( dict ) : attributes to use for generating the object
strategy : the strategy to use""" | if cls . _meta . abstract :
raise errors . FactoryError ( "Cannot generate instances of abstract factory %(f)s; " "Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract " "is either not set or False." % dict ( f = cls . __name__ ) )
step = builder . StepBuilder ( cls . _meta , params , strategy )
return step . build ( ) |
def project_list_folder ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / project - xxxx / listFolder API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Folders - and - Deletion # API - method % 3A - % 2Fclass - xxxx % 2FlistFolder""" | return DXHTTPRequest ( '/%s/listFolder' % object_id , input_params , always_retry = always_retry , ** kwargs ) |
def compress ( x , y ) :
"""Given a x , y coordinate , encode in " compressed format "
Returned is always 33 bytes .""" | polarity = "02" if y % 2 == 0 else "03"
wrap = lambda x : x
if not is_py2 :
wrap = lambda x : bytes ( x , 'ascii' )
return unhexlify ( wrap ( "%s%0.64x" % ( polarity , x ) ) ) |
def p_document_shorthand_with_fragments ( self , p ) :
"""document : selection _ set fragment _ list""" | p [ 0 ] = Document ( definitions = [ Query ( selections = p [ 1 ] ) ] + p [ 2 ] ) |
def connect ( self ) :
"""Connects to the database server .""" | with warnings . catch_warnings ( ) :
warnings . filterwarnings ( 'ignore' , '.*deprecated.*' )
self . _conn = client . connect ( init_command = self . init_fun , sql_mode = "NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO," "STRICT_ALL_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" , charset = config [ 'connection.charset' ] , ** self . conn_info )
self . _conn . autocommit ( True ) |
def _indirect_jump_resolved ( self , jump , jump_addr , resolved_by , targets ) :
"""Called when an indirect jump is successfully resolved .
: param IndirectJump jump : The resolved indirect jump , or None if an IndirectJump instance is
not available .
: param int jump _ addr : Address of the resolved indirect jump .
: param IndirectJumpResolver resolved _ by : The resolver used to resolve this indirect jump .
: param list targets : List of indirect jump targets .
: param CFGJob job : The job at the start of the block containing the indirect jump .
: return : None""" | addr = jump . addr if jump is not None else jump_addr
l . debug ( 'The indirect jump at %#x is successfully resolved by %s. It has %d targets.' , addr , resolved_by , len ( targets ) )
self . kb . resolved_indirect_jumps . add ( addr ) |
def phenotypesGenerator ( self , request ) :
"""Returns a generator over the ( phenotypes , nextPageToken ) pairs
defined by the ( JSON string ) request""" | # TODO make paging work using SPARQL ?
compoundId = datamodel . PhenotypeAssociationSetCompoundId . parse ( request . phenotype_association_set_id )
dataset = self . getDataRepository ( ) . getDataset ( compoundId . dataset_id )
phenotypeAssociationSet = dataset . getPhenotypeAssociationSet ( compoundId . phenotypeAssociationSetId )
associations = phenotypeAssociationSet . getAssociations ( request )
phenotypes = [ association . phenotype for association in associations ]
return self . _protocolListGenerator ( request , phenotypes ) |
def map_field ( field , func , dict_sequence ) :
"""Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key .""" | for item in dict_sequence :
try :
item [ field ] = func ( item . get ( field , None ) )
yield item
except ValueError :
pass |
def scheduled_function ( f , hints = None ) :
"""The Noodles schedule function decorator .
The decorated function will return a workflow in stead of
being applied immediately . This workflow can then be passed to a job
scheduler in order to be run on any architecture supporting the current
python environment .""" | if hints is None :
hints = { }
if 'version' not in hints :
try :
source_bytes = inspect . getsource ( f ) . encode ( )
except Exception :
pass
else :
m = hashlib . md5 ( )
m . update ( source_bytes )
hints [ 'version' ] = m . hexdigest ( )
@ wraps ( f )
def wrapped ( * args , ** kwargs ) :
return PromisedObject ( from_call ( f , args , kwargs , deepcopy ( hints ) , call_by_value = config [ 'call_by_value' ] ) )
# add * ( scheduled ) * to the beginning of the docstring .
if hasattr ( wrapped , '__doc__' ) and wrapped . __doc__ is not None :
wrapped . __doc__ = "*(scheduled)* " + wrapped . __doc__
return wrapped |
def _refresh_stats ( self ) :
"""Loads up the stats sheet created for this pipeline run and reads
those stats into memory""" | # regex identifies all possible stats files .
# regex = self . outfolder + " * _ stats . tsv "
# stats _ files = glob . glob ( regex )
# stats _ files . insert ( self . pipeline _ stats _ file ) # last one is the current pipeline
# for stats _ file in stats _ files :
stats_file = self . pipeline_stats_file
if os . path . isfile ( self . pipeline_stats_file ) :
with open ( stats_file , 'r' ) as stat_file :
for line in stat_file :
try : # Someone may have put something that ' s not 3 columns in the stats file
# if so , shame on him , but we can just ignore it .
key , value , annotation = line . split ( '\t' )
except ValueError :
print ( "WARNING: Each row in a stats file is expected to have 3 columns" )
if annotation . rstrip ( ) == self . name or annotation . rstrip ( ) == "shared" :
self . stats_dict [ key ] = value . strip ( ) |
def loadLabeledPoints ( sc , path , minPartitions = None ) :
"""Load labeled points saved using RDD . saveAsTextFile .
: param sc : Spark context
: param path : file or directory path in any Hadoop - supported file
system URI
: param minPartitions : min number of partitions
@ return : labeled data stored as an RDD of LabeledPoint
> > > from tempfile import NamedTemporaryFile
> > > from pyspark . mllib . util import MLUtils
> > > from pyspark . mllib . regression import LabeledPoint
> > > examples = [ LabeledPoint ( 1.1 , Vectors . sparse ( 3 , [ ( 0 , - 1.23 ) , ( 2 , 4.56e - 7 ) ] ) ) ,
. . . LabeledPoint ( 0.0 , Vectors . dense ( [ 1.01 , 2.02 , 3.03 ] ) ) ]
> > > tempFile = NamedTemporaryFile ( delete = True )
> > > tempFile . close ( )
> > > sc . parallelize ( examples , 1 ) . saveAsTextFile ( tempFile . name )
> > > MLUtils . loadLabeledPoints ( sc , tempFile . name ) . collect ( )
[ LabeledPoint ( 1.1 , ( 3 , [ 0,2 ] , [ - 1.23,4.56e - 07 ] ) ) , LabeledPoint ( 0.0 , [ 1.01,2.02,3.03 ] ) ]""" | minPartitions = minPartitions or min ( sc . defaultParallelism , 2 )
return callMLlibFunc ( "loadLabeledPoints" , sc , path , minPartitions ) |
def highlight ( self , selector , by = By . CSS_SELECTOR , loops = settings . HIGHLIGHTS , scroll = True ) :
"""This method uses fancy JavaScript to highlight an element .
Used during demo _ mode .
@ Params
selector - the selector of the element to find
by - the type of selector to search by ( Default : CSS )
loops - # of times to repeat the highlight animation
( Default : 4 . Each loop lasts for about 0.18s )
scroll - the option to scroll to the element first ( Default : True )""" | selector , by = self . __recalculate_selector ( selector , by )
element = self . find_element ( selector , by = by , timeout = settings . SMALL_TIMEOUT )
if scroll :
self . __slow_scroll_to_element ( element )
try :
selector = self . convert_to_css_selector ( selector , by = by )
except Exception : # Don ' t highlight if can ' t convert to CSS _ SELECTOR
return
if self . highlights :
loops = self . highlights
if self . browser == 'ie' :
loops = 1
# Override previous setting because IE is slow
loops = int ( loops )
o_bs = ''
# original _ box _ shadow
style = element . get_attribute ( 'style' )
if style :
if 'box-shadow: ' in style :
box_start = style . find ( 'box-shadow: ' )
box_end = style . find ( ';' , box_start ) + 1
original_box_shadow = style [ box_start : box_end ]
o_bs = original_box_shadow
if ":contains" not in selector and ":first" not in selector :
selector = re . escape ( selector )
selector = self . __escape_quotes_if_needed ( selector )
self . __highlight_with_js ( selector , loops , o_bs )
else :
selector = self . __make_css_match_first_element_only ( selector )
selector = re . escape ( selector )
selector = self . __escape_quotes_if_needed ( selector )
try :
self . __highlight_with_jquery ( selector , loops , o_bs )
except Exception :
pass
# JQuery probably couldn ' t load . Skip highlighting .
time . sleep ( 0.065 ) |
def do_resolved ( self , subcmd , opts , * args ) :
"""Remove ' conflicted ' state on working copy files or directories .
usage :
resolved PATH . . .
Note : this subcommand does not semantically resolve conflicts or
remove conflict markers ; it merely removes the conflict - related
artifact files and allows PATH to be committed again .
$ { cmd _ option _ list }""" | print "'svn %s' opts: %s" % ( subcmd , opts )
print "'svn %s' args: %s" % ( subcmd , args ) |
def honeycomb_lattice ( a , b , spacing , alternating_sites = False ) :
"""Generate a honeycomb lattice .
Args :
a ( Int ) : Number of lattice repeat units along x .
b ( Int ) : Number of lattice repeat units along y .
spacing ( Float ) : Distance between lattice sites .
alternating _ sites ( Bool , optional ) : Label alternating sites with ' A ' and ' B ' . Defaults to False .
Returns :
( Lattice ) : The new lattice
Notes :
The returned lattice is 3D periodic , but all sites and edges lie in the xy plane .""" | if alternating_sites :
site_labels = [ 'A' , 'B' , 'A' , 'B' ]
else :
site_labels = [ 'L' , 'L' , 'L' , 'L' ]
unit_cell_lengths = np . array ( [ sqrt ( 3 ) , 3.0 , 0.0 ] ) * spacing
cell_lengths = unit_cell_lengths * np . array ( [ a , b , 1.0 ] )
grid = np . array ( list ( range ( 1 , int ( a * b * 4 + 1 ) ) ) ) . reshape ( a , b , 4 , order = 'C' )
sites = [ ]
for i in range ( a ) :
for j in range ( b ) : # site 1
r = np . array ( [ i * sqrt ( 3 ) * spacing , j * 3 * spacing , 0.0 ] )
neighbours = [ grid [ i , j , 1 ] , np . roll ( grid , + 1 , axis = 0 ) [ i , j , 1 ] , np . roll ( grid , + 1 , axis = 1 ) [ i , j , 3 ] ]
sites . append ( lattice_site . Site ( grid [ i , j , 0 ] , r , neighbours , 0.0 , site_labels [ 0 ] ) )
# site 2
r = np . array ( [ i * sqrt ( 3 ) * spacing + sqrt ( 3 ) / 2 * spacing , ( j * 3 + 0.5 ) * spacing , 0.0 ] )
neighbours = [ grid [ i , j , 0 ] , grid [ i , j , 2 ] , np . roll ( grid , - 1 , axis = 0 ) [ i , j , 0 ] ]
sites . append ( lattice_site . Site ( grid [ i , j , 1 ] , r , neighbours , 0.0 , site_labels [ 1 ] ) )
# site 3
r = np . array ( [ i * sqrt ( 3 ) * spacing + sqrt ( 3 ) / 2 * spacing , ( j * 3 + 1.5 ) * spacing , 0.0 ] )
neighbours = [ grid [ i , j , 1 ] , grid [ i , j , 3 ] , np . roll ( grid , - 1 , axis = 0 ) [ i , j , 3 ] ]
sites . append ( lattice_site . Site ( grid [ i , j , 2 ] , r , neighbours , 0.0 , site_labels [ 2 ] ) )
# site 4
r = np . array ( [ i * sqrt ( 3 ) * spacing , ( j * 3 + 2 ) * spacing , 0.0 ] )
neighbours = [ grid [ i , j , 2 ] , np . roll ( grid , + 1 , axis = 0 ) [ i , j , 2 ] , np . roll ( grid , - 1 , axis = 1 ) [ i , j , 0 ] ]
sites . append ( lattice_site . Site ( grid [ i , j , 3 ] , r , neighbours , 0.0 , site_labels [ 3 ] ) )
return lattice . Lattice ( sites , cell_lengths = cell_lengths ) |
def pkg_contents ( self ) :
"""Print packages contents""" | packages = self . args [ 1 : ]
options = [ "-d" , "--display" ]
if len ( self . args ) > 1 and self . args [ 0 ] in options :
PackageManager ( packages ) . display ( )
else :
usage ( "" ) |
def viewbox ( self ) :
"""Return bounding box of the viewport .
: return : ( left , top , right , bottom ) ` tuple ` .""" | return self . left , self . top , self . right , self . bottom |
def item_wegobject_adapter ( obj , request ) :
"""Adapter for rendering a list of
: class : ` crabpy . gateway . Wegobject ` to json .""" | return { 'id' : obj . id , 'aard' : { 'id' : obj . aard . id , 'naam' : obj . aard . naam , 'definitie' : obj . aard . definitie } , 'centroid' : obj . centroid , 'bounding_box' : obj . bounding_box , 'metadata' : { 'begin_tijd' : obj . metadata . begin_tijd , 'begin_datum' : obj . metadata . begin_datum , 'begin_bewerking' : { 'id' : obj . metadata . begin_bewerking . id , 'naam' : obj . metadata . begin_bewerking . naam , 'definitie' : obj . metadata . begin_bewerking . definitie } , 'begin_organisatie' : { 'id' : obj . metadata . begin_organisatie . id , 'naam' : obj . metadata . begin_organisatie . naam , 'definitie' : obj . metadata . begin_organisatie . definitie } } } |
def get_choice_selected_value ( self ) :
"""Returns the default selection from a choice menu
Throws an error if this is not a choice parameter .""" | if 'choiceInfo' not in self . dto [ self . name ] :
raise GPException ( 'not a choice parameter' )
choice_info_dto = self . dto [ self . name ] [ 'choiceInfo' ]
if 'selectedValue' in choice_info_dto :
return self . dto [ self . name ] [ 'choiceInfo' ] [ 'selectedValue' ]
else :
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.