signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def can_unconsign ( self ) :
"""bool : : const : ` True ` if : attr : ` address ` can unconsign the edition
: attr : ` edition _ number ` of : attr : ` piece _ address ` else : const : ` False ` .
If the last transaction is a consignment of the edition to the user .""" | chain = BlockchainSpider . chain ( self . _tree , self . edition_number )
if len ( chain ) == 0 :
self . reason = 'Master edition not yet registered'
return False
chain = BlockchainSpider . strip_loan ( chain )
action = chain [ - 1 ] [ 'action' ]
piece_address = chain [ - 1 ] [ 'piece_address' ]
edition_number = chain [ - 1 ] [ 'edition_number' ]
to_address = chain [ - 1 ] [ 'to_address' ]
if action != 'CONSIGN' or piece_address != self . piece_address or edition_number != self . edition_number or to_address != self . address :
self . reason = 'Edition number {} is not consigned to {}' . format ( self . edition_number , self . address )
return False
return True |
def create_presentation ( self ) :
"""Create the presentation .
The audio track is mixed with the slides . The resulting file is saved as self . output
DownloadError is raised if some resources cannot be fetched .
ConversionError is raised if the final video cannot be created .""" | # Avoid wasting time and bandwidth if we known that conversion will fail .
if not self . overwrite and os . path . exists ( self . output ) :
raise ConversionError ( "File %s already exist and --overwrite not specified" % self . output )
video = self . download_video ( )
raw_slides = self . download_slides ( )
# ffmpeg does not support SWF
png_slides = self . _convert_slides ( raw_slides )
# Create one frame per second using the time code information
frame_pattern = self . _prepare_frames ( png_slides )
return self . _assemble ( video , frame_pattern ) |
def setDigitalMinimum ( self , edfsignal , digital_minimum ) :
"""Sets the minimum digital value of signal edfsignal .
Usually , the value - 32768 is used for EDF + and - 8388608 for BDF + . Usually this will be ( - ( digital _ maximum + 1 ) ) .
Parameters
edfsignal : int
signal number
digital _ minimum : int
Sets the minimum digital value
Notes
This function is optional and can be called only after opening a file in writemode and before the first sample write action .""" | if ( edfsignal < 0 or edfsignal > self . n_channels ) :
raise ChannelDoesNotExist ( edfsignal )
self . channels [ edfsignal ] [ 'digital_min' ] = digital_minimum
self . update_header ( ) |
def spin1z_from_mass1_mass2_chi_eff_chi_a ( mass1 , mass2 , chi_eff , chi_a ) :
"""Returns spin1z .""" | return ( mass1 + mass2 ) / ( 2.0 * mass1 ) * ( chi_eff - chi_a ) |
def load_file ( self , filepath ) :
"""This function opens any type of a readable file and decompose
the file object into a list , for each line , of lists containing
splitted line strings using space as a spacer .
Parameters
filepath : : class : ` str `
The full path or a relative path to any type of file .
Returns
: class : ` dict `
Returns a dictionary containing the molecular information
extracted from the input files . This information will
vary with file type and information stored in it .
The data is sorted into lists that contain one feature
for example key atom _ id : [ atom _ id _ 1 , atom _ id _ 2]
Over the process of analysis this dictionary will be updated
with new data .""" | self . file_path = filepath
_ , self . file_type = os . path . splitext ( filepath )
_ , self . file_name = os . path . split ( filepath )
with open ( filepath ) as ffile :
self . file_content = ffile . readlines ( )
return ( self . _load_funcs [ self . file_type ] ( ) ) |
def is_in_intervall ( value , min_value , max_value , name = 'variable' ) :
"""Raise an exception if value is not in an interval .
Parameters
value : orderable
min _ value : orderable
max _ value : orderable
name : str
Name of the variable to print in exception .""" | if not ( min_value <= value <= max_value ) :
raise ValueError ( '{}={} is not in [{}, {}]' . format ( name , value , min_value , max_value ) ) |
def dashed_line ( self , x1 , y1 , x2 , y2 , dash_length = 1 , space_length = 1 ) :
"""Draw a dashed line . Same interface as line ( ) except :
- dash _ length : Length of the dash
- space _ length : Length of the space between dashes""" | self . _set_dash ( dash_length , space_length )
self . line ( x1 , y1 , x2 , y2 )
self . _set_dash ( ) |
def index_name ( self ) :
"""Get Elasticsearch index name associated to the campaign""" | fmt = self . campaign . export . elasticsearch . index_name
fields = dict ( date = self . report [ 'date' ] )
return fmt . format ( ** fields ) . lower ( ) |
def _get_csv_from_section ( sections , crumbs , csvs ) :
"""Get table name , variable name , and column values from paleo metadata
: param dict sections : Metadata
: param str crumbs : Crumbs
: param dict csvs : Csv
: return dict sections : Metadata
: return dict csvs : Csv""" | logger_csvs . info ( "enter get_csv_from_section: {}" . format ( crumbs ) )
_idx = 0
try : # Process the tables in section
for _name , _section in sections . items ( ) : # Process each entry sub - table below if they exist
if "measurementTable" in _section :
sections [ _name ] [ "measurementTable" ] , csvs = _get_csv_from_table ( _section [ "measurementTable" ] , "{}{}{}" . format ( crumbs , _idx , "measurement" ) , csvs )
if "model" in _section :
sections [ _name ] [ "model" ] , csvs = _get_csv_from_model ( _section [ "model" ] , "{}{}{}" . format ( crumbs , _idx , "model" ) , csvs )
_idx += 1
except Exception as e :
logger_csvs . error ( "get_csv_from_section: {}, {}" . format ( crumbs , e ) )
print ( "Error: get_csv_from_section: {}, {}" . format ( crumbs , e ) )
logger_csvs . info ( "exit get_csv_from_section: {}" . format ( crumbs ) )
return sections , csvs |
async def pre_handle ( self , request : Request , responder : 'Responder' ) :
"""Start typing right when the message is received .""" | responder . send ( [ lyr . Typing ( ) ] )
await responder . flush ( request )
responder . clear ( )
await self . next ( request , responder ) |
def send_OS_X_notify ( title , content , img_path ) :
'''发送Mac桌面通知''' | try :
from Foundation import ( NSDate , NSUserNotification , NSUserNotificationCenter )
from AppKit import NSImage
import objc
except ImportError :
logger . info ( 'failed to init OSX notify!' )
return
def swizzle ( cls , SEL , func ) :
old_IMP = getattr ( cls , SEL , None )
if old_IMP is None :
old_IMP = cls . instanceMethodForSelector_ ( SEL )
def wrapper ( self , * args , ** kwargs ) :
return func ( self , old_IMP , * args , ** kwargs )
new_IMP = objc . selector ( wrapper , selector = old_IMP . selector , signature = old_IMP . signature )
objc . classAddMethod ( cls , SEL . encode ( ) , new_IMP )
def swizzled_bundleIdentifier ( self , original ) : # Use iTunes icon for notification
return 'com.apple.itunes'
swizzle ( objc . lookUpClass ( 'NSBundle' ) , 'bundleIdentifier' , swizzled_bundleIdentifier )
notification = NSUserNotification . alloc ( ) . init ( )
notification . setTitle_ ( title )
notification . setSubtitle_ ( content )
notification . setInformativeText_ ( '' )
notification . setUserInfo_ ( { } )
if img_path is not None :
image = NSImage . alloc ( ) . initWithContentsOfFile_ ( img_path )
# notification . setContentImage _ ( image )
notification . set_identityImage_ ( image )
notification . setDeliveryDate_ ( NSDate . dateWithTimeInterval_sinceDate_ ( 0 , NSDate . date ( ) ) )
NSUserNotificationCenter . defaultUserNotificationCenter ( ) . scheduleNotification_ ( notification )
logger . info ( 'send notify success!' ) |
def transition_to_execute ( self ) :
"""Transition to execute""" | assert self . state in [ AQStateMachineStates . add ]
self . state = AQStateMachineStates . execute |
def state_not_literal ( self , value ) :
"""Parse not literal .""" | value = negate = chr ( value )
while value == negate :
value = choice ( self . literals )
yield value |
def data ( self ) :
"""Data representation of the datasource sent to the frontend""" | order_by_choices = [ ]
# self . column _ names return sorted column _ names
for s in self . column_names :
s = str ( s or '' )
order_by_choices . append ( ( json . dumps ( [ s , True ] ) , s + ' [asc]' ) )
order_by_choices . append ( ( json . dumps ( [ s , False ] ) , s + ' [desc]' ) )
verbose_map = { '__timestamp' : 'Time' }
verbose_map . update ( { o . metric_name : o . verbose_name or o . metric_name for o in self . metrics } )
verbose_map . update ( { o . column_name : o . verbose_name or o . column_name for o in self . columns } )
return { # simple fields
'id' : self . id , 'column_formats' : self . column_formats , 'description' : self . description , 'database' : self . database . data , # pylint : disable = no - member
'default_endpoint' : self . default_endpoint , 'filter_select' : self . filter_select_enabled , # TODO deprecate
'filter_select_enabled' : self . filter_select_enabled , 'name' : self . name , 'datasource_name' : self . datasource_name , 'type' : self . type , 'schema' : self . schema , 'offset' : self . offset , 'cache_timeout' : self . cache_timeout , 'params' : self . params , 'perm' : self . perm , 'edit_url' : self . url , # sqla - specific
'sql' : self . sql , # one to many
'columns' : [ o . data for o in self . columns ] , 'metrics' : [ o . data for o in self . metrics ] , # TODO deprecate , move logic to JS
'order_by_choices' : order_by_choices , 'owners' : [ owner . id for owner in self . owners ] , 'verbose_map' : verbose_map , 'select_star' : self . select_star , } |
def helper_list ( access_token , oid , path ) :
'''Helper Function to list a URL path .
Args :
access _ token ( str ) : A valid Azure authentication token .
oid ( str ) : An OID .
path ( str ) : A URL Path .
Returns :
HTTP response . JSON body .''' | if oid != "" :
path = '' . join ( [ path , "('" , oid , "')" ] )
endpoint = '' . join ( [ ams_rest_endpoint , path ] )
return do_ams_get ( endpoint , path , access_token ) |
def parse_rst_params ( doc ) :
"""Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions .
> > > doc = ' ' '
. . . : param foo : foo parameter
. . . foo parameter
. . . : param bar : bar parameter
. . . : param baz : baz parameter
. . . baz parameter
. . . baz parameter
. . . Some text .
> > > params = parse _ rst _ params ( doc )
> > > params [ ' foo ' ]
' foo parameter foo parameter '
> > > params [ ' bar ' ]
' bar parameter '
> > > params [ ' baz ' ]
' baz parameter baz parameter baz parameter '""" | param_re = re . compile ( r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""" , re . MULTILINE | re . VERBOSE )
params = { }
for match in param_re . finditer ( doc ) :
parts = match . groupdict ( )
body_lines = parts [ 'body' ] . strip ( ) . split ( '\n' )
params [ parts [ 'param' ] ] = ' ' . join ( s . strip ( ) for s in body_lines )
return params |
def __get_ssh_credentials ( vm_ ) :
'''Get configured SSH credentials .''' | ssh_user = config . get_cloud_config_value ( 'ssh_username' , vm_ , __opts__ , default = os . getenv ( 'USER' ) )
ssh_key = config . get_cloud_config_value ( 'ssh_keyfile' , vm_ , __opts__ , default = os . path . expanduser ( '~/.ssh/google_compute_engine' ) )
return ssh_user , ssh_key |
def get_bindings_for_keys ( self , keys ) :
"""Return a list of key bindings that can handle this key .
( This return also inactive bindings , so the ` filter ` still has to be
called , for checking it . )
: param keys : tuple of keys .""" | def get ( ) :
result = [ ]
for b in self . key_bindings :
if len ( keys ) == len ( b . keys ) :
match = True
any_count = 0
for i , j in zip ( b . keys , keys ) :
if i != j and i != Keys . Any :
match = False
break
if i == Keys . Any :
any_count += 1
if match :
result . append ( ( any_count , b ) )
# Place bindings that have more ' Any ' occurences in them at the end .
result = sorted ( result , key = lambda item : - item [ 0 ] )
return [ item [ 1 ] for item in result ]
return self . _get_bindings_for_keys_cache . get ( keys , get ) |
def almost_equals ( self , other ) :
"""Compare transforms for approximate equality .
: param other : Transform being compared .
: type other : Affine
: return : True if absolute difference between each element
of each respective tranform matrix < ` ` EPSILON ` ` .""" | for i in ( 0 , 1 , 2 , 3 , 4 , 5 ) :
if abs ( self [ i ] - other [ i ] ) >= EPSILON :
return False
return True |
def system_methodHelp ( self , method_name : str ) -> str :
"""将docstring返回 .
system . methodHelp ( ' add ' ) = > " Adds two integers together "
Return :
( str ) : - 函数的帮助文本""" | method = None
if method_name in self . funcs :
method = self . funcs [ method_name ]
elif self . instance is not None :
try :
method = resolve_dotted_attribute ( self . instance , method_name , self . allow_dotted_names )
except AttributeError :
pass
if method is None :
return ""
else :
return pydoc . getdoc ( method ) |
def plot ( data , output_dir_path = '.' , width = 10 , height = 8 ) :
"""Create two plots : 1 ) loss 2 ) accuracy .
Args :
data : Panda dataframe in * the * format .""" | if not isinstance ( data , pd . DataFrame ) :
data = pd . DataFrame ( data )
plot_accuracy ( data , output_dir_path = output_dir_path , width = width , height = height )
plot_loss ( data , output_dir_path , width = width , height = height ) |
def clean_file ( c_source , virtualenv_dirname ) :
"""Strip trailing whitespace and clean up " local " names in C source .
These source files are autogenerated from the ` ` cython ` ` CLI .
Args :
c _ source ( str ) : Path to a ` ` . c ` ` source file .
virtualenv _ dirname ( str ) : The name of the ` ` virtualenv ` `
directory where Cython is installed ( this is part of a
relative path ` ` . nox / { NAME } / lib / . . . ` ` ) .""" | with open ( c_source , "r" ) as file_obj :
contents = file_obj . read ( ) . rstrip ( )
# Replace the path to the Cython include files .
py_version = "python{}.{}" . format ( * sys . version_info [ : 2 ] )
lib_path = os . path . join ( ".nox" , virtualenv_dirname , "lib" , py_version , "site-packages" , "" )
contents = contents . replace ( lib_path , "" )
# Write the files back , but strip all trailing whitespace .
lines = contents . split ( "\n" )
with open ( c_source , "w" ) as file_obj :
for line in lines :
file_obj . write ( line . rstrip ( ) + "\n" ) |
def contour ( c , subsample = 1 , size = 10 , color = 'g' ) :
"""Draws a contour on the current plot by scattering points .
Parameters
c : : obj : ` autolab _ core . Contour `
contour to draw
subsample : int
subsample rate for boundary pixels
size : int
size of scattered points
color : : obj : ` str `
color of box""" | if not isinstance ( c , Contour ) :
raise ValueError ( 'Input must be of type Contour' )
for i in range ( c . num_pixels ) [ 0 : : subsample ] :
plt . scatter ( c . boundary_pixels [ i , 1 ] , c . boundary_pixels [ i , 0 ] , s = size , c = color ) |
def get_nameserver_detail_output_show_nameserver_nameserver_xlatedomain ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_nameserver_detail = ET . Element ( "get_nameserver_detail" )
config = get_nameserver_detail
output = ET . SubElement ( get_nameserver_detail , "output" )
show_nameserver = ET . SubElement ( output , "show-nameserver" )
nameserver_portid_key = ET . SubElement ( show_nameserver , "nameserver-portid" )
nameserver_portid_key . text = kwargs . pop ( 'nameserver_portid' )
nameserver_xlatedomain = ET . SubElement ( show_nameserver , "nameserver-xlatedomain" )
nameserver_xlatedomain . text = kwargs . pop ( 'nameserver_xlatedomain' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def push_broks ( self , broks ) :
"""Send a HTTP request to the satellite ( POST / push _ broks )
Send broks to the satellite
: param broks : Brok list to send
: type broks : list
: return : True on success , False on failure
: rtype : bool""" | logger . debug ( "[%s] Pushing %d broks" , self . name , len ( broks ) )
return self . con . post ( '_push_broks' , { 'broks' : broks } , wait = True ) |
def search ( lines , pattern ) :
"""return all lines that match the pattern
# TODO : we need an example
: param lines :
: param pattern :
: return :""" | p = pattern . replace ( "*" , ".*" )
test = re . compile ( p )
result = [ ]
for l in lines :
if test . search ( l ) :
result . append ( l )
return result |
def _parse_shard_list ( shard_list , current_shard_list ) :
"""parse shard list
: param shard _ list : format like : 1,5-10,20
: param current _ shard _ list : current shard list
: return :""" | if not shard_list :
return current_shard_list
target_shards = [ ]
for n in shard_list . split ( "," ) :
n = n . strip ( )
if n . isdigit ( ) and n in current_shard_list :
target_shards . append ( n )
elif n :
rng = n . split ( "-" )
if len ( rng ) == 2 :
s = rng [ 0 ] . strip ( )
e = rng [ 1 ] . strip ( )
if s . isdigit ( ) and e . isdigit ( ) :
for x in range ( int ( s ) , int ( e ) + 1 ) :
if str ( x ) in current_shard_list :
target_shards . append ( str ( x ) )
logger . info ( "parse_shard, shard_list: '{0}' current shard '{1}' result: '{2}'" . format ( shard_list , current_shard_list , target_shards ) )
if not target_shards :
raise LogException ( "InvalidParameter" , "There's no available shard with settings {0}" . format ( shard_list ) )
return target_shards |
def FileHacks ( self ) :
"""Hacks to make the filesystem look normal .""" | if sys . platform == "win32" :
import win32api
# pylint : disable = g - import - not - at - top
# Make the filesystem look like the topmost level are the drive letters .
if self . path == "/" :
self . files = win32api . GetLogicalDriveStrings ( ) . split ( "\x00" )
# Remove empty strings and strip trailing backslashes .
self . files = [ drive . rstrip ( "\\" ) for drive in self . files if drive ]
# This regex will match the various windows devices . Raw hard disk devices
# must be considered files , however in windows , if we try to list them as
# directories this also works . Since the code above distinguished between
# files and directories using the file listing property , we must force
# treating raw devices as files .
elif re . match ( r"/*\\\\.\\[^\\]+\\?$" , self . path ) is not None : # Special case windows devices cant seek to the end so just lie about
# the size
self . size = 0x7fffffffffffffff
# Windows raw devices can be opened in two incompatible modes . With a
# trailing \ they look like a directory , but without they are the raw
# device . In GRR we only support opening devices in raw mode so ensure
# that we never append a \ to raw device name .
self . path = self . path . rstrip ( "\\" )
# In windows raw devices must be accessed using sector alignment .
self . alignment = 512
elif sys . platform == "darwin" : # On Mac , raw disk devices are also not seekable to the end and have no
# size so we use the same approach as on Windows .
if re . match ( "/dev/r?disk.*" , self . path ) :
self . size = 0x7fffffffffffffff
self . alignment = 512 |
def split_by_fname_file ( self , fname : PathOrStr , path : PathOrStr = None ) -> 'ItemLists' :
"Split the data by using the names in ` fname ` for the validation set . ` path ` will override ` self . path ` ." | path = Path ( ifnone ( path , self . path ) )
valid_names = loadtxt_str ( path / fname )
return self . split_by_files ( valid_names ) |
def find_visible_birthdays ( request , data ) :
"""Return only the birthdays visible to current user .""" | if request . user and ( request . user . is_teacher or request . user . is_eighthoffice or request . user . is_eighth_admin ) :
return data
data [ 'today' ] [ 'users' ] = [ u for u in data [ 'today' ] [ 'users' ] if u [ 'public' ] ]
data [ 'tomorrow' ] [ 'users' ] = [ u for u in data [ 'tomorrow' ] [ 'users' ] if u [ 'public' ] ]
return data |
def namedb_name_update ( cur , opcode , input_opdata , only_if = { } , constraints_ignored = [ ] ) :
"""Update an existing name in the database .
If non - empty , only update the given fields .
DO NOT CALL THIS METHOD DIRECTLY .""" | opdata = copy . deepcopy ( input_opdata )
namedb_name_fields_check ( opdata )
mutate_fields = op_get_mutate_fields ( opcode )
if opcode not in OPCODE_CREATION_OPS :
assert 'name' not in mutate_fields , "BUG: 'name' listed as a mutate field for '%s'" % ( opcode )
# reduce opdata down to the given fields . . . .
must_equal = namedb_update_must_equal ( opdata , mutate_fields )
must_equal += [ 'name' , 'block_number' ]
for ignored in constraints_ignored :
if ignored in must_equal : # ignore this constraint
must_equal . remove ( ignored )
try :
query , values = namedb_update_prepare ( cur , [ 'name' , 'block_number' ] , opdata , "name_records" , must_equal = must_equal , only_if = only_if )
except Exception , e :
log . exception ( e )
log . error ( "FATAL: failed to update name '%s'" % opdata [ 'name' ] )
os . abort ( )
namedb_query_execute ( cur , query , values )
try :
assert cur . rowcount == 1 , "Updated %s row(s)" % cur . rowcount
except Exception , e :
log . exception ( e )
log . error ( "FATAL: failed to update name '%s'" % opdata [ 'name' ] )
log . error ( "Query: %s" , "" . join ( [ "%s %s" % ( frag , "'%s'" % val if type ( val ) in [ str , unicode ] else val ) for ( frag , val ) in zip ( query . split ( "?" ) , values + ( "" , ) ) ] ) )
os . abort ( )
return True |
def _setup_pailgun ( self ) :
"""Sets up a PailgunServer instance .""" | # Constructs and returns a runnable PantsRunner .
def runner_factory ( sock , arguments , environment ) :
return self . _runner_class . create ( sock , arguments , environment , self . services , self . _scheduler_service , )
# Plumb the daemon ' s lifecycle lock to the ` PailgunServer ` to safeguard teardown .
# This indirection exists to allow the server to be created before PantsService . setup
# has been called to actually initialize the ` services ` field .
@ contextmanager
def lifecycle_lock ( ) :
with self . services . lifecycle_lock :
yield
return PailgunServer ( self . _bind_addr , runner_factory , lifecycle_lock , self . _request_complete_callback ) |
def p_ex_expression ( tok ) :
"""ex _ expression : OP _ EXISTS cmp _ expression
| cmp _ expression""" | if len ( tok ) == 3 :
tok [ 0 ] = UnaryOperationRule ( tok [ 1 ] , tok [ 2 ] )
else :
tok [ 0 ] = tok [ 1 ] |
def ckw03 ( handle , begtim , endtim , inst , ref , avflag , segid , nrec , sclkdp , quats , avvs , nints , starts ) :
"""Add a type 3 segment to a C - kernel .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / ckw03 _ c . html
: param handle : Handle of an open CK file .
: type handle : int
: param begtim : The beginning encoded SCLK of the segment .
: type begtim : float
: param endtim : The ending encoded SCLK of the segment .
: type endtim : float
: param inst : The NAIF instrument ID code .
: type inst : int
: param ref : The reference frame of the segment .
: type ref : str
: param avflag : True if the segment will contain angular velocity .
: type avflag : bool
: param segid : Segment identifier .
: type segid : str
: param nrec : Number of pointing records .
: type nrec : int
: param sclkdp : Encoded SCLK times .
: type sclkdp : Array of floats
: param quats : Quaternions representing instrument pointing .
: type quats : Nx4 - Element Array of floats
: param avvs : Angular velocity vectors .
: type avvs : Nx3 - Element Array of floats
: param nints : Number of intervals .
: type nints : int
: param starts : Encoded SCLK interval start times .
: type starts : Array of floats""" | handle = ctypes . c_int ( handle )
begtim = ctypes . c_double ( begtim )
endtim = ctypes . c_double ( endtim )
inst = ctypes . c_int ( inst )
ref = stypes . stringToCharP ( ref )
avflag = ctypes . c_int ( avflag )
segid = stypes . stringToCharP ( segid )
sclkdp = stypes . toDoubleVector ( sclkdp )
quats = stypes . toDoubleMatrix ( quats )
avvs = stypes . toDoubleMatrix ( avvs )
nrec = ctypes . c_int ( nrec )
starts = stypes . toDoubleVector ( starts )
nints = ctypes . c_int ( nints )
libspice . ckw03_c ( handle , begtim , endtim , inst , ref , avflag , segid , nrec , sclkdp , quats , avvs , nints , starts ) |
def assert_subclass_of ( typ , allowed_types # type : Union [ Type , Tuple [ Type ] ]
) :
"""An inlined version of subclass _ of ( var _ types ) ( value ) without ' return True ' : it does not return anything in case of
success , and raises a IsWrongType exception in case of failure .
Used in validate and validation / validator
: param typ : the type to check
: param allowed _ types : the type ( s ) to enforce . If a tuple of types is provided it is considered alternate types :
one match is enough to succeed . If None , type will not be enforced
: return :""" | if not issubclass ( typ , allowed_types ) :
try : # more than 1 ?
allowed_types [ 1 ]
raise IsWrongType ( wrong_value = typ , ref_type = allowed_types , help_msg = 'Value should be a subclass of any of {ref_type}' )
except IndexError :
allowed_types = allowed_types [ 0 ]
except TypeError :
pass
raise IsWrongType ( wrong_value = typ , ref_type = allowed_types ) |
def dirs ( self , pattern = None ) :
"""D . dirs ( ) - > List of this directory ' s subdirectories .
The elements of the list are Path objects .
This does not walk recursively into subdirectories
( but see : meth : ` walkdirs ` ) .
With the optional ` pattern ` argument , this only lists
directories whose names match the given pattern . For
example , ` ` d . dirs ( ' build - * ' ) ` ` .""" | return [ p for p in self . listdir ( pattern ) if p . isdir ( ) ] |
def get_version ( cls , settings : Dict [ str , Any ] , path : str ) -> Optional [ str ] :
"""Generate the version string to be used in static URLs .
` ` settings ` ` is the ` Application . settings ` dictionary and ` ` path ` `
is the relative location of the requested asset on the filesystem .
The returned value should be a string , or ` ` None ` ` if no version
could be determined .
. . versionchanged : : 3.1
This method was previously recommended for subclasses to override ;
` get _ content _ version ` is now preferred as it allows the base
class to handle caching of the result .""" | abs_path = cls . get_absolute_path ( settings [ "static_path" ] , path )
return cls . _get_cached_version ( abs_path ) |
def reshape_nd ( data_or_shape , ndim ) :
"""Return image array or shape with at least ndim dimensions .
Prepend 1s to image shape as necessary .
> > > reshape _ nd ( numpy . empty ( 0 ) , 1 ) . shape
> > > reshape _ nd ( numpy . empty ( 1 ) , 2 ) . shape
(1 , 1)
> > > reshape _ nd ( numpy . empty ( ( 2 , 3 ) ) , 3 ) . shape
(1 , 2 , 3)
> > > reshape _ nd ( numpy . empty ( ( 3 , 4 , 5 ) ) , 3 ) . shape
(3 , 4 , 5)
> > > reshape _ nd ( ( 2 , 3 ) , 3)
(1 , 2 , 3)""" | is_shape = isinstance ( data_or_shape , tuple )
shape = data_or_shape if is_shape else data_or_shape . shape
if len ( shape ) >= ndim :
return data_or_shape
shape = ( 1 , ) * ( ndim - len ( shape ) ) + shape
return shape if is_shape else data_or_shape . reshape ( shape ) |
def _check_categorical_option_type ( option_name , option_value , possible_values ) :
"""Check whether or not the requested option is one of the allowed values .""" | err_msg = '{0} is not a valid option for {1}. ' . format ( option_value , option_name )
err_msg += ' Expected one of: ' . format ( possible_values )
err_msg += ', ' . join ( map ( str , possible_values ) )
if option_value not in possible_values :
raise ToolkitError ( err_msg ) |
def run_it ( ) :
"""Search and download torrents until the user says it so .""" | initialize ( )
parser = get_parser ( )
args = None
first_parse = True
while ( True ) :
if first_parse is True :
first_parse = False
args = parser . parse_args ( )
else :
print ( textwrap . dedent ( '''\
Search again like in the beginning.
-- You can either choose best rated or list mode.
-- This time, you can insert the search string without double quotes.
Remember the list mode options!
0: torrent project.
1: the pirate bay.
2: 1337x.
3: eztv.
4: limetorrents.
5: isohunt.
''' ) )
print ( 'Or.. if you want to exit just write "' + Colors . LRED + 'Q' + Colors . ENDC + '" or "' + Colors . LRED + 'q' + Colors . ENDC + '".' )
input_parse = input ( '>> ' ) . replace ( "'" , "" ) . replace ( '"' , '' )
if input_parse in [ 'Q' , 'q' ] :
sys . exit ( 1 )
args = parser . parse_args ( input_parse . split ( ' ' , 2 ) )
if args . str_search . strip ( ) == "" :
print ( 'Please insert an appropiate non-empty string.' )
else :
auto = AutoPy ( * insert ( args ) )
auto . get_content ( )
auto . select_torrent ( )
auto . download_torrent ( ) |
def render ( self , container , rerender = False ) :
"""Flow the flowables into the containers that have been added to this
chain .""" | if rerender :
container . clear ( )
if not self . _rerendering : # restore saved state on this chain ' s 1st container on this page
self . _state = copy ( self . _fresh_page_state )
self . _rerendering = True
try :
self . done = False
self . flowables . flow ( container , last_descender = None , state = self . _state )
# all flowables have been rendered
if container == self . last_container :
self . _init_state ( )
# reset state for the next rendering loop
self . done = True
except PageBreakException as exc :
self . _state = exc . flowable_state
self . _fresh_page_state = copy ( self . _state )
raise
except EndOfContainer as e :
self . _state = e . flowable_state
if container == self . last_container : # save state for when ReflowRequired occurs
self . _fresh_page_state = copy ( self . _state )
except ReflowRequired :
self . _rerendering = False
raise |
def catalog_datacenters ( consul_url = None , token = None ) :
'''Return list of available datacenters from catalog .
: param consul _ url : The Consul server URL .
: return : The list of available datacenters .
CLI Example :
. . code - block : : bash
salt ' * ' consul . catalog _ datacenters''' | ret = { }
if not consul_url :
consul_url = _get_config ( )
if not consul_url :
log . error ( 'No Consul URL found.' )
ret [ 'message' ] = 'No Consul URL found.'
ret [ 'res' ] = False
return ret
function = 'catalog/datacenters'
ret = _query ( consul_url = consul_url , function = function , token = token )
return ret |
def _get_query ( self , cursor ) :
'''Query tempalte for source Solr , sorts by id by default .''' | query = { 'q' : '*:*' , 'sort' : 'id desc' , 'rows' : self . _rows , 'cursorMark' : cursor }
if self . _date_field :
query [ 'sort' ] = "{} asc, id desc" . format ( self . _date_field )
if self . _per_shard :
query [ 'distrib' ] = 'false'
return query |
def FromString ( cls , string_rep ) :
"""Create a DataStream from a string representation .
The format for stream designators when encoded as strings is :
[ system ] ( buffered | unbuffered | constant | input | count | output ) < integer >
Args :
string _ rep ( str ) : The string representation to turn into a
DataStream""" | rep = str ( string_rep )
parts = rep . split ( )
if len ( parts ) > 3 :
raise ArgumentError ( "Too many whitespace separated parts of stream designator" , input_string = string_rep )
elif len ( parts ) == 3 and parts [ 0 ] != u'system' :
raise ArgumentError ( "Too many whitespace separated parts of stream designator" , input_string = string_rep )
elif len ( parts ) < 2 :
raise ArgumentError ( "Too few components in stream designator" , input_string = string_rep )
# Now actually parse the string
if len ( parts ) == 3 :
system = True
stream_type = parts [ 1 ]
stream_id = parts [ 2 ]
else :
system = False
stream_type = parts [ 0 ]
stream_id = parts [ 1 ]
try :
stream_id = int ( stream_id , 0 )
except ValueError as exc :
raise ArgumentError ( "Could not convert stream id to integer" , error_string = str ( exc ) , stream_id = stream_id )
try :
stream_type = cls . StringToType [ stream_type ]
except KeyError :
raise ArgumentError ( "Invalid stream type given" , stream_type = stream_type , known_types = cls . StringToType . keys ( ) )
return DataStream ( stream_type , stream_id , system ) |
def ensure_readable ( self ) :
"""Make sure the location exists and is readable .""" | self . ensure_exists ( )
if not self . context . is_readable ( self . directory ) :
if self . context . have_superuser_privileges :
msg = "The directory %s isn't readable!"
raise ValueError ( msg % self )
else :
raise ValueError ( compact ( """
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""" , location = self ) ) |
def check_actors ( self , actors ) :
"""Performs checks on the actors that are to be used . Raises an exception if invalid setup .
: param actors : the actors to check
: type actors : list""" | super ( Flow , self ) . check_actors ( actors )
actor = self . first_active
if ( actor is not None ) and not base . is_source ( actor ) :
raise Exception ( "First active actor is not a source: " + actor . full_name ) |
def acquire ( self ) :
'''Returns an available instance .
Returns :
browser from pool , if available
Raises :
NoBrowsersAvailable if none available''' | with self . _lock :
if len ( self . _in_use ) >= self . size :
raise NoBrowsersAvailable
browser = self . _fresh_browser ( )
self . _in_use . add ( browser )
return browser |
def parse_map_file ( mapFNH ) :
"""Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID
( default ) or a user - supplied one . The only required fields are SampleID ,
BarcodeSequence , LinkerPrimerSequence ( in that order ) , and Description
( which must be the final field ) .
: type mapFNH : str
: param mapFNH : Either the full path to the map file or an open file handle
: rtype : tuple , dict
: return : A tuple of header line for mapping file and a map associating each line of
the mapping file with the appropriate sample ID ( each value of the map also
contains the sample ID ) . An OrderedDict is used for mapping so the returned
map is guaranteed to have the same order as the input file .
Example data :
# SampleID BarcodeSequence LinkerPrimerSequence State Description
11 . V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat _ Oral""" | m = OrderedDict ( )
map_header = None
with file_handle ( mapFNH ) as mapF :
for line in mapF :
if line . startswith ( "#SampleID" ) :
map_header = line . strip ( ) . split ( "\t" )
if line . startswith ( "#" ) or not line :
continue
line = line . strip ( ) . split ( "\t" )
m [ line [ 0 ] ] = line
return map_header , m |
def init ( filename , order = 3 , tokenizer = None ) :
"""Initialize a brain . This brain ' s file must not already exist .
Keyword arguments :
order - - Order of the forward / reverse Markov chains ( integer )
tokenizer - - One of Cobe , MegaHAL ( default Cobe ) . See documentation
for cobe . tokenizers for details . ( string )""" | log . info ( "Initializing a cobe brain: %s" % filename )
if tokenizer is None :
tokenizer = "Cobe"
if tokenizer not in ( "Cobe" , "MegaHAL" ) :
log . info ( "Unknown tokenizer: %s. Using CobeTokenizer" , tokenizer )
tokenizer = "Cobe"
graph = Graph ( sqlite3 . connect ( filename ) )
with trace_us ( "Brain.init_time_us" ) :
graph . init ( order , tokenizer ) |
def reqAccountSummary ( self , reqId , groupName , tags ) :
"""reqAccountSummary ( EClientSocketBase self , int reqId , IBString const & groupName , IBString const & tags )""" | return _swigibpy . EClientSocketBase_reqAccountSummary ( self , reqId , groupName , tags ) |
def create_init_path ( init_path , uid = - 1 , gid = - 1 ) :
"""create the init path if it does not exist""" | if not os . path . exists ( init_path ) :
with open ( init_path , 'wb' ) :
pass
os . chown ( init_path , uid , gid ) ; |
def site ( self , action ) :
"""Returns site query""" | query = None
viewdays = 7
hostpath = self . uri + self . endpoint
if action == 'siteinfo' :
query = hostpath + ( '?action=query' '&meta=siteinfo|siteviews' '&siprop=general|statistics' '&list=mostviewed&pvimlimit=max' )
query += '&pvisdays=%d' % viewdays
# meta = siteviews
self . set_status ( 'query' , 'siteinfo|siteviews|mostviewed' )
elif action == 'sitematrix' :
query = hostpath + '?action=sitematrix'
self . set_status ( 'sitematrix' , 'all' )
elif action == 'sitevisitors' :
query = hostpath + ( '?action=query' '&meta=siteviews&pvismetric=uniques' )
query += '&pvisdays=%d' % viewdays
# meta = siteviews
self . set_status ( 'query' , 'siteviews:uniques' )
if not query :
raise ValueError ( "Could not form query" )
query += '&format=json&formatversion=2'
return query |
def user_exists ( username , token_manager = None , app_url = defaults . APP_URL ) :
"""check if the user exists with the specified username""" | headers = token_manager . get_access_token_headers ( )
auth_url = environment . get_auth_url ( app_url = app_url )
url = "%s/api/v1/accounts?username=%s" % ( auth_url , username )
response = requests . get ( url , headers = headers )
if response . status_code == 404 :
return False
elif response . status_code == 200 :
return True
else :
raise JutException ( 'Error %s: %s' % ( response . status_code , response . text ) ) |
def tree_iterator ( self , visited = None , path = None ) :
'''Generator function that traverse the dr tree start from this node ( self ) .''' | if visited is None :
visited = set ( )
if self not in visited :
if path and isinstance ( path , list ) :
path . append ( self )
visited . add ( self )
yield self
if not hasattr ( self , 'dterms' ) :
yield
for dterm in self . dterms :
if hasattr ( self , dterm ) :
child = getattr ( self , dterm )
if hasattr ( child , 'dterms' ) or hasattr ( child , 'terms' ) :
for node in child . tree_iterator ( visited ) :
yield node |
def refresh ( self , props , body ) :
"""the refresh method called when on demand refresh service receive a message . then call the parent actor sniff
method if message is compliant on what is attended
: param props : the message properties
: param body : the message body
: return :""" | LOGGER . debug ( "InjectorCachedComponent.refresh" )
operation = props [ 'OPERATION' ]
if operation == "REFRESH" :
if self . parent_actor_ref is not None :
parent_actor = self . parent_actor_ref . proxy ( )
parent_actor . sniff ( ) . get ( )
else :
LOGGER . error ( "InjectorCachedComponent.refresh - Unsupported operation " + str ( operation ) ) |
def get_instance ( self , payload ) :
"""Build an instance of ChallengeInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . authy . v1 . service . entity . factor . challenge . ChallengeInstance
: rtype : twilio . rest . authy . v1 . service . entity . factor . challenge . ChallengeInstance""" | return ChallengeInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , identity = self . _solution [ 'identity' ] , factor_sid = self . _solution [ 'factor_sid' ] , ) |
def check_finished ( self ) :
"""Poll all processes and handle any finished processes .""" | changed = False
for key in list ( self . processes . keys ( ) ) : # Poll process and check if it finshed
process = self . processes [ key ]
process . poll ( )
if process . returncode is not None : # If a process is terminated by ` stop ` or ` kill `
# we want to queue it again instead closing it as failed .
if key not in self . stopping : # Get std _ out and err _ out
output , error_output = process . communicate ( )
descriptor = self . descriptors [ key ]
descriptor [ 'stdout' ] . seek ( 0 )
descriptor [ 'stderr' ] . seek ( 0 )
output = get_descriptor_output ( descriptor [ 'stdout' ] , key , handler = self )
error_output = get_descriptor_output ( descriptor [ 'stderr' ] , key , handler = self )
# Mark queue entry as finished and save returncode
self . queue [ key ] [ 'returncode' ] = process . returncode
if process . returncode != 0 :
self . queue [ key ] [ 'status' ] = 'failed'
else :
self . queue [ key ] [ 'status' ] = 'done'
# Add outputs to queue
self . queue [ key ] [ 'stdout' ] = output
self . queue [ key ] [ 'stderr' ] = error_output
self . queue [ key ] [ 'end' ] = str ( datetime . now ( ) . strftime ( "%H:%M" ) )
self . queue . write ( )
changed = True
else :
self . stopping . remove ( key )
if key in self . to_remove :
self . to_remove . remove ( key )
del self . queue [ key ]
else :
if key in self . to_stash :
self . to_stash . remove ( key )
self . queue [ key ] [ 'status' ] = 'stashed'
else :
self . queue [ key ] [ 'status' ] = 'queued'
self . queue [ key ] [ 'start' ] = ''
self . queue [ key ] [ 'end' ] = ''
self . queue . write ( )
self . clean_descriptor ( key )
del self . processes [ key ]
# If anything should be logged we return True
return changed |
def bootstrap_options ( self ) :
"""The post - bootstrap options , computed from the env , args , and fully discovered Config .
Re - computing options after Config has been fully expanded allows us to pick up bootstrap values
( such as backends ) from a config override file , for example .
Because this can be computed from the in - memory representation of these values , it is not part
of the object ' s identity .""" | return self . parse_bootstrap_options ( self . env , self . bootstrap_args , self . config ) |
def pretty_print_error ( err_json ) :
"""Pretty print Flask - Potion error messages for the user .""" | # Special case validation errors
if len ( err_json ) == 1 and "validationOf" in err_json [ 0 ] :
required_fields = ", " . join ( err_json [ 0 ] [ "validationOf" ] [ "required" ] )
return "Validation error. Requires properties: {}." . format ( required_fields )
# General error handling
msg = "; " . join ( err . get ( "message" , "" ) for err in err_json )
# Fallback
if not msg :
msg = "Bad request."
return msg |
def helpEvent ( self , event ) :
"""Displays a tool tip for the given help event .
: param event | < QHelpEvent >""" | item = self . itemAt ( event . scenePos ( ) )
if ( item and item and item . toolTip ( ) ) :
parent = self . parent ( )
rect = item . path ( ) . boundingRect ( )
point = event . scenePos ( )
point . setY ( item . pos ( ) . y ( ) + rect . bottom ( ) )
point = parent . mapFromScene ( point )
point = parent . mapToGlobal ( point )
XPopupWidget . showToolTip ( item . toolTip ( ) , point = point , parent = parent )
event . accept ( )
else :
super ( XCalendarScene , self ) . helpEvent ( event ) |
def run ( bam_file , data , out_dir ) :
"""Run coverage QC analysis""" | out = dict ( )
out_dir = utils . safe_makedir ( out_dir )
if dd . get_coverage ( data ) and dd . get_coverage ( data ) not in [ "None" ] :
merged_bed_file = bedutils . clean_file ( dd . get_coverage_merged ( data ) , data , prefix = "cov-" , simple = True )
target_name = "coverage"
elif dd . get_coverage_interval ( data ) != "genome" :
merged_bed_file = dd . get_variant_regions_merged ( data ) or dd . get_sample_callable ( data )
target_name = "variant_regions"
else :
merged_bed_file = None
target_name = "genome"
avg_depth = cov . get_average_coverage ( target_name , merged_bed_file , data )
if target_name == "coverage" :
out_files = cov . coverage_region_detailed_stats ( target_name , merged_bed_file , data , out_dir )
else :
out_files = [ ]
out [ 'Avg_coverage' ] = avg_depth
samtools_stats_dir = os . path . join ( out_dir , os . path . pardir , 'samtools' )
from bcbio . qc import samtools
samtools_stats = samtools . run ( bam_file , data , samtools_stats_dir ) [ "metrics" ]
out [ "Total_reads" ] = total_reads = int ( samtools_stats [ "Total_reads" ] )
out [ "Mapped_reads" ] = mapped = int ( samtools_stats [ "Mapped_reads" ] )
out [ "Mapped_paired_reads" ] = int ( samtools_stats [ "Mapped_paired_reads" ] )
out [ 'Duplicates' ] = dups = int ( samtools_stats [ "Duplicates" ] )
if total_reads :
out [ "Mapped_reads_pct" ] = 100.0 * mapped / total_reads
if mapped :
out [ 'Duplicates_pct' ] = 100.0 * dups / mapped
if dd . get_coverage_interval ( data ) == "genome" :
mapped_unique = mapped - dups
else :
mapped_unique = readstats . number_of_mapped_reads ( data , bam_file , keep_dups = False )
out [ 'Mapped_unique_reads' ] = mapped_unique
if merged_bed_file :
ontarget = readstats . number_of_mapped_reads ( data , bam_file , keep_dups = False , bed_file = merged_bed_file , target_name = target_name )
out [ "Ontarget_unique_reads" ] = ontarget
if mapped_unique :
out [ "Ontarget_pct" ] = 100.0 * ontarget / mapped_unique
out [ 'Offtarget_pct' ] = 100.0 * ( mapped_unique - ontarget ) / mapped_unique
if dd . get_coverage_interval ( data ) != "genome" : # Skip padded calculation for WGS even if the " coverage " file is specified
# the padded statistic makes only sense for exomes and panels
padded_bed_file = bedutils . get_padded_bed_file ( out_dir , merged_bed_file , 200 , data )
ontarget_padded = readstats . number_of_mapped_reads ( data , bam_file , keep_dups = False , bed_file = padded_bed_file , target_name = target_name + "_padded" )
out [ "Ontarget_padded_pct" ] = 100.0 * ontarget_padded / mapped_unique
if total_reads :
out [ 'Usable_pct' ] = 100.0 * ontarget / total_reads
indexcov_files = _goleft_indexcov ( bam_file , data , out_dir )
out_files += [ x for x in indexcov_files if x and utils . file_exists ( x ) ]
out = { "metrics" : out }
if len ( out_files ) > 0 :
out [ "base" ] = out_files [ 0 ]
out [ "secondary" ] = out_files [ 1 : ]
return out |
def _split_keys_v1 ( joined ) :
"""Split two keys out a string created by _ join _ keys _ v1.""" | left , _ , right = joined . partition ( '::' )
return _decode_v1 ( left ) , _decode_v1 ( right ) |
def _extract_line_features ( self ) :
"""Parse raw log lines and convert it to a dictionary with extracted features .""" | for line in self . line_iterator :
m = COMPILED_AUTH_LOG_REGEX . match ( line )
data = { 'raw' : line }
if m :
data . update ( { 'timestamp' : self . _to_epoch ( m . group ( 1 ) ) , 'hostname' : m . group ( 2 ) , 'program' : m . group ( 3 ) , 'processid' : m . group ( 4 ) , 'message' : m . group ( 5 ) , } )
self . parsed_lines . append ( data ) |
def run_command ( self , scan_id , host , cmd ) :
"""Run a single command via SSH and return the content of stdout or
None in case of an Error . A scan error is issued in the latter
case .
For logging into ' host ' , the scan options ' port ' , ' username ' ,
' password ' and ' ssh _ timeout ' are used .""" | ssh = paramiko . SSHClient ( )
ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) )
options = self . get_scan_options ( scan_id )
port = int ( options [ 'port' ] )
timeout = int ( options [ 'ssh_timeout' ] )
# For backward compatibility , consider the legacy mode to get
# credentials as scan _ option .
# First and second modes should be removed in future releases .
# On the third case it receives the credentials as a subelement of
# the < target > .
credentials = self . get_scan_credentials ( scan_id , host )
if ( 'username_password' in options and ':' in options [ 'username_password' ] ) :
username , password = options [ 'username_password' ] . split ( ':' , 1 )
elif 'username' in options and options [ 'username' ] :
username = options [ 'username' ]
password = options [ 'password' ]
elif credentials :
cred_params = credentials . get ( 'ssh' )
username = cred_params . get ( 'username' , '' )
password = cred_params . get ( 'password' , '' )
else :
self . add_scan_error ( scan_id , host = host , value = 'Erroneous username_password value' )
raise ValueError ( 'Erroneous username_password value' )
try :
ssh . connect ( hostname = host , username = username , password = password , timeout = timeout , port = port )
except ( paramiko . ssh_exception . AuthenticationException , socket . error ) as err : # Errors : No route to host , connection timeout , authentication
# failure etc , .
self . add_scan_error ( scan_id , host = host , value = str ( err ) )
return None
_ , stdout , _ = ssh . exec_command ( cmd )
result = stdout . readlines ( )
ssh . close ( )
return result |
def parse_value ( self , querydict ) :
"""extract value
extarct value from querydict and convert it to native
missing and empty values result to None""" | value = self . field . get_value ( querydict )
if value in ( None , fields . empty , '' ) :
return None
return self . field . to_internal_value ( value ) |
def create_client ( self , addr , timeout ) :
"""Create client ( s ) based on addr""" | def make ( addr ) :
c = Client ( addr )
c . socket . _set_recv_timeout ( timeout )
return c
if ',' in addr :
addrs = addr . split ( ',' )
addrs = [ a . strip ( ) for a in addrs ]
return { a : make ( a ) for a in addrs }
return make ( addr ) |
def read_release_version ( ) :
"""Read the release version from ` ` _ version . py ` ` .""" | import re
dirname = os . path . abspath ( os . path . dirname ( __file__ ) )
try :
f = open ( os . path . join ( dirname , "_version.py" ) , "rt" )
for line in f . readlines ( ) :
m = re . match ( "__version__ = '([^']+)'" , line )
if m :
ver = m . group ( 1 )
return ver
except :
return None
return None |
def reset ( self ) :
"""Clears ` nick ` and ` own _ ids ` , sets ` center ` to ` world . center ` ,
and then calls ` cells _ changed ( ) ` .""" | self . own_ids . clear ( )
self . nick = ''
self . center = self . world . center
self . cells_changed ( ) |
def _random_ipv4_address_from_subnet ( self , subnet , network = False ) :
"""Produces a random IPv4 address or network with a valid CIDR
from within a given subnet .
: param subnet : IPv4Network to choose from within
: param network : Return a network address , and not an IP address""" | address = str ( subnet [ self . generator . random . randint ( 0 , subnet . num_addresses - 1 , ) ] , )
if network :
address += '/' + str ( self . generator . random . randint ( subnet . prefixlen , subnet . max_prefixlen , ) )
address = str ( ip_network ( address , strict = False ) )
return address |
def area ( self ) :
"""The surface area of the primitive extrusion .
Calculated from polygon and height to avoid mesh creation .
Returns
area : float , surface area of 3D extrusion""" | # area of the sides of the extrusion
area = abs ( self . primitive . height * self . primitive . polygon . length )
# area of the two caps of the extrusion
area += self . primitive . polygon . area * 2
return area |
def logout_service_description ( self ) :
"""Logout service description .""" | label = 'Logout from ' + self . name
if ( self . auth_type ) :
label = label + ' (' + self . auth_type + ')'
return ( { "@id" : self . logout_uri , "profile" : self . profile_base + 'logout' , "label" : label } ) |
def inline ( self ) -> str :
"""Return endpoint string
: return :""" | inlined = [ str ( info ) for info in ( self . server , self . port ) if info ]
return ESSubscribtionEndpoint . API + " " + " " . join ( inlined ) |
def add_stats ( self , args ) :
"""Callback to add motif statistics .""" | bg_name , stats = args
logger . debug ( "Stats: %s %s" , bg_name , stats )
for motif_id in stats . keys ( ) :
if motif_id not in self . stats :
self . stats [ motif_id ] = { }
self . stats [ motif_id ] [ bg_name ] = stats [ motif_id ] |
def collapse_times ( ) :
"""Make copies of everything , assign to global shortcuts so functions work
on them , extract the times , then restore the running stacks .""" | orig_ts = f . timer_stack
orig_ls = f . loop_stack
copy_ts = _copy_timer_stack ( )
copy_ls = copy . deepcopy ( f . loop_stack )
f . timer_stack = copy_ts
f . loop_stack = copy_ls
f . refresh_shortcuts ( )
while ( len ( f . timer_stack ) > 1 ) or f . t . in_loop :
_collapse_subdivision ( )
timer_pub . stop ( )
collapsed_times = f . r
f . timer_stack = orig_ts
# ( loops throw error if not same object ! )
f . loop_stack = orig_ls
f . refresh_shortcuts ( )
return collapsed_times |
def lastAnchor ( self , block , column ) :
"""Find the last open bracket before the current line .
Return ( block , column , char ) or ( None , None , None )""" | currentPos = - 1
currentBlock = None
currentColumn = None
currentChar = None
for char in '({[' :
try :
foundBlock , foundColumn = self . findBracketBackward ( block , column , char )
except ValueError :
continue
else :
pos = foundBlock . position ( ) + foundColumn
if pos > currentPos :
currentBlock = foundBlock
currentColumn = foundColumn
currentChar = char
currentPos = pos
return currentBlock , currentColumn , currentChar |
def set_out ( self , que_out , num_followers ) :
"""Set the queue in output and the number of parallel tasks that follow""" | self . _que_out = que_out
self . _num_followers = num_followers |
def SetCacheMode ( mode ) :
"""Set the Configure cache mode . mode must be one of " auto " , " force " ,
or " cache " .""" | global cache_mode
if mode == "auto" :
cache_mode = AUTO
elif mode == "force" :
cache_mode = FORCE
elif mode == "cache" :
cache_mode = CACHE
else :
raise ValueError ( "SCons.SConf.SetCacheMode: Unknown mode " + mode ) |
def permission_required ( perm , login_url = None ) :
"""Replacement for django . contrib . auth . decorators . permission _ required that
returns 403 Forbidden if the user is already logged in .""" | return user_passes_test ( lambda u : u . has_perm ( perm ) , login_url = login_url ) |
async def connect ( self , server_info , proto_code = None , * , use_tor = False , disable_cert_verify = False , proxy = None , short_term = False ) :
'''Start connection process .
Destination must be specified in a ServerInfo ( ) record ( first arg ) .''' | self . server_info = server_info
if not proto_code :
proto_code , * _ = server_info . protocols
self . proto_code = proto_code
logger . debug ( "Connecting to: %r" % server_info )
if proto_code == 'g' : # websocket
# to do this , we ' ll need a websockets implementation that
# operates more like a asyncio . Transport
# maybe : ` asyncws ` or ` aiohttp `
raise NotImplementedError ( 'sorry no WebSocket transport yet' )
hostname , port , use_ssl = server_info . get_port ( proto_code )
if use_tor :
if have_aiosocks : # Connect via Tor proxy proxy , assumed to be on localhost : 9050
# unless a tuple is given with another host / port combo .
try :
socks_host , socks_port = use_tor
except TypeError :
socks_host , socks_port = 'localhost' , 9050
# basically no - one has . onion SSL certificates , and
# pointless anyway .
disable_cert_verify = True
assert not proxy , "Sorry not yet supporting proxy->tor->dest"
logger . debug ( " .. using TOR" )
proxy = aiosocks . Socks5Addr ( socks_host , int ( socks_port ) )
else :
logger . debug ( "Error: want to use tor, but no aiosocks module." )
if use_ssl == True and disable_cert_verify : # Create a more liberal SSL context that won ' t
# object to self - signed certicates . This is
# very bad on public Internet , but probably ok
# over Tor
use_ssl = ssl . create_default_context ( )
use_ssl . check_hostname = False
use_ssl . verify_mode = ssl . CERT_NONE
logger . debug ( " .. SSL cert check disabled" )
async def _reconnect ( ) :
if self . protocol :
return
# race / duplicate work
if proxy :
if have_aiosocks :
transport , protocol = await aiosocks . create_connection ( StratumProtocol , proxy = proxy , proxy_auth = None , remote_resolve = True , ssl = use_ssl , dst = ( hostname , port ) )
else :
logger . debug ( "Error: want to use proxy, but no aiosocks module." )
else :
transport , protocol = await self . loop . create_connection ( StratumProtocol , host = hostname , port = port , ssl = use_ssl )
self . protocol = protocol
protocol . client = self
# capture actual values used
self . actual_connection = dict ( hostname = hostname , port = int ( port ) , ssl = bool ( use_ssl ) , tor = bool ( proxy ) )
self . actual_connection [ 'ip_addr' ] = transport . get_extra_info ( 'peername' , default = [ 'unknown' ] ) [ 0 ]
if not short_term :
self . ka_task = self . loop . create_task ( self . _keepalive ( ) )
logger . debug ( "Connected to: %r" % server_info )
# close whatever we had
if self . protocol :
self . protocol . close ( )
self . protocol = None
self . reconnect = _reconnect
await self . reconnect ( ) |
def makedirs ( p ) :
"""A makedirs that avoids a race conditions for multiple processes attempting to create the same directory .""" | try :
os . makedirs ( p , settings . FILE_UPLOAD_PERMISSIONS )
except OSError : # Perhaps someone beat us to the punch ?
if not os . path . isdir ( p ) : # Nope , must be something else . . .
raise |
def _parse_udiff ( self ) :
"""Parse the diff an return data for the template .""" | lineiter = self . lines
files = [ ]
try :
line = lineiter . next ( )
# skip first context
skipfirst = True
while 1 : # continue until we found the old file
if not line . startswith ( '--- ' ) :
line = lineiter . next ( )
continue
chunks = [ ]
filename , old_rev , new_rev = self . _extract_rev ( line , lineiter . next ( ) )
files . append ( { 'filename' : filename , 'old_revision' : old_rev , 'new_revision' : new_rev , 'chunks' : chunks } )
line = lineiter . next ( )
while line :
match = self . _chunk_re . match ( line )
if not match :
break
lines = [ ]
chunks . append ( lines )
old_line , old_end , new_line , new_end = [ int ( x or 1 ) for x in match . groups ( ) [ : - 1 ] ]
old_line -= 1
new_line -= 1
context = len ( match . groups ( ) ) == 5
old_end += old_line
new_end += new_line
if context :
if not skipfirst :
lines . append ( { 'old_lineno' : '...' , 'new_lineno' : '...' , 'action' : 'context' , 'line' : line , } )
else :
skipfirst = False
line = lineiter . next ( )
while old_line < old_end or new_line < new_end :
if line :
command , line = line [ 0 ] , line [ 1 : ]
else :
command = ' '
affects_old = affects_new = False
# ignore those if we don ' t expect them
if command in '#@' :
continue
elif command == '+' :
affects_new = True
action = 'add'
elif command == '-' :
affects_old = True
action = 'del'
else :
affects_old = affects_new = True
action = 'unmod'
old_line += affects_old
new_line += affects_new
lines . append ( { 'old_lineno' : affects_old and old_line or '' , 'new_lineno' : affects_new and new_line or '' , 'action' : action , 'line' : line } )
line = lineiter . next ( )
except StopIteration :
pass
# highlight inline changes
for file in files :
for chunk in chunks :
lineiter = iter ( chunk )
# first = True
try :
while 1 :
line = lineiter . next ( )
if line [ 'action' ] != 'unmod' :
nextline = lineiter . next ( )
if nextline [ 'action' ] == 'unmod' or nextline [ 'action' ] == line [ 'action' ] :
continue
self . differ ( line , nextline )
except StopIteration :
pass
return files |
def add ( self , * args ) :
"""Add a path template and handler .
: param name : Optional . If specified , allows reverse path lookup with
: meth : ` reverse ` .
: param template : A string or : class : ` ~ potpy . template . Template `
instance used to match paths against . Strings will be wrapped in a
Template instance .
: param handler : A callable or : class : ` ~ potpy . router . Route ` instance
which will handle calls for the given path . See
: meth : ` potpy . router . Router . add ` for details .""" | if len ( args ) > 2 :
name , template = args [ : 2 ]
args = args [ 2 : ]
else :
name = None
template = args [ 0 ]
args = args [ 1 : ]
if isinstance ( template , tuple ) :
template , type_converters = template
template = Template ( template , ** type_converters )
elif not isinstance ( template , Template ) :
template = Template ( template )
if name :
self . _templates [ name ] = template
super ( PathRouter , self ) . add ( template , * args ) |
def namedtuple ( typename , field_names , verbose = False , rename = False ) :
"""Returns a new subclass of tuple with named fields .
This is a patched version of collections . namedtuple from the stdlib .
Unlike the latter , it accepts non - identifier strings as field names .
All values are accessible through dict syntax . Fields whose names are
identifiers are also accessible via attribute syntax as in ordinary namedtuples , alongside traditional
indexing . This feature is needed as SDMX allows field names
to contain ' - ' .
> > > Point = namedtuple ( ' Point ' , [ ' x ' , ' y ' ] )
> > > Point . _ _ doc _ _ # docstring for the new class
' Point ( x , y ) '
> > > p = Point ( 11 , y = 22 ) # instantiate with positional args or keywords
> > > p [ 0 ] + p [ 1 ] # indexable like a plain tuple
33
> > > x , y = p # unpack like a regular tuple
> > > x , y
(11 , 22)
> > > p . x + p . y # fields also accessable by name
33
> > > d = p . _ asdict ( ) # convert to a dictionary
> > > d [ ' x ' ]
11
> > > Point ( * * d ) # convert from a dictionary
Point ( x = 11 , y = 22)
> > > p . _ replace ( x = 100 ) # _ replace ( ) is like str . replace ( ) but targets named fields
Point ( x = 100 , y = 22)""" | if isinstance ( field_names , str ) :
field_names = field_names . replace ( ',' , ' ' ) . split ( )
field_names = list ( map ( str , field_names ) )
typename = str ( typename )
for name in [ typename ] + field_names :
if type ( name ) != str :
raise TypeError ( 'Type names and field names must be strings' )
if _iskeyword ( name ) :
raise ValueError ( 'Type names and field names cannot be a ' 'keyword: %r' % name )
if not _isidentifier ( typename ) :
raise ValueError ( 'Type names must be valid ' 'identifiers: %r' % name )
seen = set ( )
for name in field_names :
if name . startswith ( '_' ) and not rename :
raise ValueError ( 'Field names cannot start with an underscore: ' '%r' % name )
if name in seen :
raise ValueError ( 'Encountered duplicate field name: %r' % name )
seen . add ( name )
arg_names = [ '_' + str ( i ) for i in range ( len ( field_names ) ) ]
# Fill - in the class template
class_definition = _class_template . format ( typename = typename , field_names = tuple ( field_names ) , num_fields = len ( field_names ) , arg_list = repr ( tuple ( arg_names ) ) . replace ( "'" , "" ) [ 1 : - 1 ] , repr_fmt = ', ' . join ( _repr_template . format ( name = name ) for name in field_names ) , field_defs = '\n' . join ( _field_template . format ( index = index , name = name ) for index , name in enumerate ( field_names ) if _isidentifier ( name ) ) )
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame . f _ globals [ ' _ _ name _ _ ' ]
namespace = dict ( __name__ = 'namedtuple_%s' % typename )
exec ( class_definition , namespace )
result = namespace [ typename ]
result . _source = class_definition
if verbose :
print ( result . _source )
# For pickling to work , the _ _ module _ _ variable needs to be set to the frame
# where the named tuple is created . Bypass this step in environments where
# sys . _ getframe is not defined ( Jython for example ) or sys . _ getframe is not
# defined for arguments greater than 0 ( IronPython ) .
try :
result . __module__ = _sys . _getframe ( 1 ) . f_globals . get ( '__name__' , '__main__' )
except ( AttributeError , ValueError ) :
pass
return result |
def get_email_link ( application ) :
"""Retrieve a link that can be emailed to the applicant .""" | # don ' t use secret _ token unless we have to
if ( application . content_type . model == 'person' and application . applicant . has_usable_password ( ) ) :
url = '%s/applications/%d/' % ( settings . REGISTRATION_BASE_URL , application . pk )
is_secret = False
else :
url = '%s/applications/%s/' % ( settings . REGISTRATION_BASE_URL , application . secret_token )
is_secret = True
return url , is_secret |
def fetch ( self ) :
"""Fetch a FieldValueInstance
: returns : Fetched FieldValueInstance
: rtype : twilio . rest . autopilot . v1 . assistant . field _ type . field _ value . FieldValueInstance""" | params = values . of ( { } )
payload = self . _version . fetch ( 'GET' , self . _uri , params = params , )
return FieldValueInstance ( self . _version , payload , assistant_sid = self . _solution [ 'assistant_sid' ] , field_type_sid = self . _solution [ 'field_type_sid' ] , sid = self . _solution [ 'sid' ] , ) |
def destroy ( self , request , pk = None , parent_lookup_organization = None ) :
'''Remove a user from an organization .''' | user = get_object_or_404 ( User , pk = pk )
org = get_object_or_404 ( SeedOrganization , pk = parent_lookup_organization )
self . check_object_permissions ( request , org )
org . users . remove ( user )
return Response ( status = status . HTTP_204_NO_CONTENT ) |
def requestViewMenu ( self , point ) :
"""Emits the itemMenuRequested and viewMenuRequested signals
for the given item .
: param point | < QPoint >""" | vitem = self . uiGanttVIEW . itemAt ( point )
if vitem :
glbl_pos = self . uiGanttVIEW . mapToGlobal ( point )
item = vitem . treeItem ( )
self . viewMenuRequested . emit ( vitem , glbl_pos )
self . itemMenuRequested . emit ( item , glbl_pos ) |
def _persist ( self ) -> None :
"""Persists the current data group""" | if self . _store :
self . _store . save ( self . _key , self . _snapshot ) |
def get_by_start_id ( self , start_id ) :
""": yield : Log entries starting from : start _ id : and ending 200 entries
after . In most cases easier to call than the paginate one
because there is no need to keep track of the already read
entries in a specific page .""" | url = '/scans/%s/log?id=%s' % ( self . scan_id , start_id )
code , page = self . conn . send_request ( url , method = 'GET' )
if code != 200 :
message = page . get ( 'message' , 'None' )
args = ( code , message )
raise APIException ( 'Failed to retrieve scan log. Received HTTP' ' response code %s. Message: "%s"' % args )
entries = page . get ( 'entries' , None )
if entries is None :
raise APIException ( 'Could not retrieve log entries attribute' )
for entry_dict in entries :
yield LogEntry . from_entry_dict ( entry_dict ) |
def get_metrics ( self , reset : bool = False ) -> Dict [ str , float ] :
"""We track three metrics here :
1 . dpd _ acc , which is the percentage of the time that our best output action sequence is
in the set of action sequences provided by DPD . This is an easy - to - compute lower bound
on denotation accuracy for the set of examples where we actually have DPD output . We
only score dpd _ acc on that subset .
2 . denotation _ acc , which is the percentage of examples where we get the correct
denotation . This is the typical " accuracy " metric , and it is what you should usually
report in an experimental result . You need to be careful , though , that you ' re
computing this on the full data , and not just the subset that has DPD output ( make sure
you pass " keep _ if _ no _ dpd = True " to the dataset reader , which we do for validation data ,
but not training data ) .
3 . lf _ percent , which is the percentage of time that decoding actually produces a
finished logical form . We might not produce a valid logical form if the decoder gets
into a repetitive loop , or we ' re trying to produce a super long logical form and run
out of time steps , or something .""" | return { 'dpd_acc' : self . _action_sequence_accuracy . get_metric ( reset ) , 'denotation_acc' : self . _denotation_accuracy . get_metric ( reset ) , 'lf_percent' : self . _has_logical_form . get_metric ( reset ) , } |
def preprocess_batch ( images_batch , preproc_func = None ) :
"""Creates a preprocessing graph for a batch given a function that processes
a single image .
: param images _ batch : A tensor for an image batch .
: param preproc _ func : ( optional function ) A function that takes in a
tensor and returns a preprocessed input .""" | if preproc_func is None :
return images_batch
with tf . variable_scope ( 'preprocess' ) :
images_list = tf . split ( images_batch , int ( images_batch . shape [ 0 ] ) )
result_list = [ ]
for img in images_list :
reshaped_img = tf . reshape ( img , img . shape [ 1 : ] )
processed_img = preproc_func ( reshaped_img )
result_list . append ( tf . expand_dims ( processed_img , axis = 0 ) )
result_images = tf . concat ( result_list , axis = 0 )
return result_images |
def romanize ( text : str ) -> str :
"""Rendering Thai words in the Latin alphabet or " romanization " ,
using the Royal Thai General System of Transcription ( RTGS ) ,
which is the official system published by the Royal Institute of Thailand .
ถอดเสียงภาษาไทยเป็นอักษรละติน
: param str text : Thai text to be romanized
: return : A string of Thai words rendered in the Latin alphabet .""" | words = word_tokenize ( text )
romanized_words = [ _romanize ( word ) for word in words ]
return "" . join ( romanized_words ) |
def dtrajs ( self ) :
"""get discrete trajectories""" | if not self . _estimated :
self . logger . info ( "not yet parametrized, running now." )
self . parametrize ( )
return self . _chain [ - 1 ] . dtrajs |
def _check_descendant ( self , item ) :
"""Check the boxes of item ' s descendants .""" | children = self . get_children ( item )
for iid in children :
self . change_state ( iid , "checked" )
self . _check_descendant ( iid ) |
def get_stats ( self , start = int ( time ( ) ) , stop = int ( time ( ) ) + 10 , step = 10 ) :
"""Get stats of a monitored machine
: param start : Time formatted as integer , from when to fetch stats ( default now )
: param stop : Time formatted as integer , until when to fetch stats ( default + 10 seconds )
: param step : Step to fetch stats ( default 10 seconds )
: returns : A dict of stats""" | payload = { 'v' : 2 , 'start' : start , 'stop' : stop , 'step' : step }
data = json . dumps ( payload )
req = self . request ( self . mist_client . uri + "/clouds/" + self . cloud . id + "/machines/" + self . id + "/stats" , data = data )
stats = req . get ( ) . json ( )
return stats |
def mongodump ( mongo_user , mongo_password , mongo_dump_directory_path , database = None , silent = False ) :
"""Runs mongodump using the provided credentials on the running mongod
process .
WARNING : This function will delete the contents of the provided
directory before it runs .""" | if path . exists ( mongo_dump_directory_path ) : # If a backup dump already exists , delete it
rmtree ( mongo_dump_directory_path )
if silent :
dump_command = ( "mongodump --quiet -u %s -p %s -o %s" % ( mongo_user , mongo_password , mongo_dump_directory_path ) )
else :
dump_command = ( "mongodump -u %s -p %s -o %s" % ( mongo_user , mongo_password , mongo_dump_directory_path ) )
if database :
dump_command += ( " --db %s" % database )
call ( dump_command , silent = silent ) |
def forever ( self , key , value ) :
"""Store an item in the cache indefinitely .
: param key : The cache key
: type key : str
: param value : The value to store
: type value : mixed""" | value = self . serialize ( value )
self . _redis . set ( self . _prefix + key , value ) |
def khash ( * args ) :
'''hash arguments . khash handles None in the same way accross runs ( which is good : ) )''' | ksum = sum ( [ hash ( arg if arg is not None else - 13371337 ) for arg in args ] )
return hash ( str ( ksum ) ) |
def create_translation ( self , context_id , static_ip , remote_ip , notes ) :
"""Creates an address translation on a tunnel context /
: param int context _ id : The id - value representing the context instance .
: param string static _ ip : The IP address value representing the
internal side of the translation entry ,
: param string remote _ ip : The IP address value representing the remote
side of the translation entry ,
: param string notes : The notes to supply with the translation entry ,
: return dict : Mapping of properties for the new translation entry .""" | return self . context . createAddressTranslation ( { 'customerIpAddress' : remote_ip , 'internalIpAddress' : static_ip , 'notes' : notes } , id = context_id ) |
def dsc_comment ( self , comment ) :
"""Emit a comment into the PostScript output for the given surface .
The comment is expected to conform to
the PostScript Language Document Structuring Conventions ( DSC ) .
Please see that manual for details on the available comments
and their meanings .
In particular , the ` ` % % IncludeFeature ` ` comment allows
a device - independent means of controlling printer device features .
So the PostScript Printer Description Files Specification
will also be a useful reference .
The comment string must begin with a percent character ( % )
and the total length of the string
( including any initial percent characters )
must not exceed 255 bytes .
Violating either of these conditions will
place surface into an error state .
But beyond these two conditions ,
this method will not enforce conformance of the comment
with any particular specification .
The comment string should not have a trailing newline .
The DSC specifies different sections
in which particular comments can appear .
This method provides for comments to be emitted
within three sections :
the header , the Setup section , and the PageSetup section .
Comments appearing in the first two sections
apply to the entire document
while comments in the BeginPageSetup section
apply only to a single page .
For comments to appear in the header section ,
this method should be called after the surface is created ,
but before a call to : meth : ` dsc _ begin _ setup ` .
For comments to appear in the Setup section ,
this method should be called after a call to : meth : ` dsc _ begin _ setup `
but before a call to : meth : ` dsc _ begin _ page _ setup ` .
For comments to appear in the PageSetup section ,
this method should be called after a call to
: meth : ` dsc _ begin _ page _ setup ` .
Note that it is only necessary to call : meth : ` dsc _ begin _ page _ setup `
for the first page of any surface .
After a call to : meth : ` ~ Surface . show _ page `
or : meth : ` ~ Surface . copy _ page `
comments are unambiguously directed
to the PageSetup section of the current page .
But it doesn ' t hurt to call this method
at the beginning of every page
as that consistency may make the calling code simpler .
As a final note ,
cairo automatically generates several comments on its own .
As such , applications must not manually generate
any of the following comments :
Header section : ` ` % ! PS - Adobe - 3.0 ` ` , ` ` % % Creator ` ` , ` ` % % CreationDate ` ` ,
` ` % % Pages ` ` , ` ` % % BoundingBox ` ` , ` ` % % DocumentData ` ` ,
` ` % % LanguageLevel ` ` , ` ` % % EndComments ` ` .
Setup section : ` ` % % BeginSetup ` ` , ` ` % % EndSetup ` ` .
PageSetup section : ` ` % % BeginPageSetup ` ` , ` ` % % PageBoundingBox ` ` ,
` ` % % EndPageSetup ` ` .
Other sections : ` ` % % BeginProlog ` ` , ` ` % % EndProlog ` ` , ` ` % % Page ` ` ,
` ` % % Trailer ` ` , ` ` % % EOF ` ` .""" | cairo . cairo_ps_surface_dsc_comment ( self . _pointer , _encode_string ( comment ) )
self . _check_status ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.