signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def delete ( self , force = False ) :
"""Volumes cannot be deleted if either a ) they are attached to a device , or
b ) they have any snapshots . This method overrides the base delete ( )
method to both better handle these failures , and also to offer a ' force '
option . When ' force ' is True , the volume is detached , and any dependent
snapshots are deleted before calling the volume ' s delete .""" | if force :
self . detach ( )
self . delete_all_snapshots ( )
try :
super ( CloudBlockStorageVolume , self ) . delete ( )
except exc . VolumeNotAvailable : # Notify the user ? Record it somewhere ?
# For now , just re - raise
raise |
def setup_parser ( ) :
"""Sets up the argument parser and returns it
: returns : the parser
: rtype : : class : ` optparse . OptionParser `
: raises : None""" | parser = optparse . OptionParser ( usage = """\
usage: %prog [options] -o <output_path> <module_path> [exclude_path, ...]
Look recursively in <module_path> for Python modules and packages and create
one reST file with automodule directives per package in the <output_path>.
The <exclude_path>s can be files and/or directories that will be excluded
from generation.
Note: By default this script will not overwrite already created files.""" )
parser . add_option ( '-o' , '--output-dir' , action = 'store' , dest = 'destdir' , help = 'Directory to place all output' , default = '' )
parser . add_option ( '-d' , '--maxdepth' , action = 'store' , dest = 'maxdepth' , help = 'Maximum depth of submodules to show in the TOC ' '(default: 4)' , type = 'int' , default = 4 )
parser . add_option ( '-f' , '--force' , action = 'store_true' , dest = 'force' , help = 'Overwrite existing files' )
parser . add_option ( '-l' , '--follow-links' , action = 'store_true' , dest = 'followlinks' , default = False , help = 'Follow symbolic links. Powerful when combined ' 'with collective.recipe.omelette.' )
parser . add_option ( '-n' , '--dry-run' , action = 'store_true' , dest = 'dryrun' , help = 'Run the script without creating files' )
parser . add_option ( '-e' , '--separate' , action = 'store_true' , dest = 'separatemodules' , help = 'Put documentation for each module on its own page' )
parser . add_option ( '-P' , '--private' , action = 'store_true' , dest = 'includeprivate' , help = 'Include "_private" modules' )
parser . add_option ( '-T' , '--no-toc' , action = 'store_true' , dest = 'notoc' , help = 'Don\'t create a table of contents file' )
parser . add_option ( '-E' , '--no-headings' , action = 'store_true' , dest = 'noheadings' , help = 'Don\'t create headings for the module/package ' 'packages (e.g. when the docstrings already contain ' 'them)' )
parser . add_option ( '-s' , '--suffix' , action = 'store' , dest = 'suffix' , help = 'file suffix (default: rst)' , default = 'rst' )
parser . add_option ( '-F' , '--full' , action = 'store_true' , dest = 'full' , help = 'Generate a full project with sphinx-quickstart' )
parser . add_option ( '-H' , '--doc-project' , action = 'store' , dest = 'header' , help = 'Project name (default: root module name)' )
parser . add_option ( '-A' , '--doc-author' , action = 'store' , dest = 'author' , type = 'str' , help = 'Project author(s), used when --full is given' )
parser . add_option ( '-V' , '--doc-version' , action = 'store' , dest = 'version' , help = 'Project version, used when --full is given' )
parser . add_option ( '-R' , '--doc-release' , action = 'store' , dest = 'release' , help = 'Project release, used when --full is given, ' 'defaults to --doc-version' )
return parser |
def query ( options , collection_name , num_to_skip , num_to_return , query , field_selector = None ) :
"""Get a * * query * * message .""" | data = struct . pack ( "<I" , options )
data += bson . _make_c_string ( collection_name )
data += struct . pack ( "<i" , num_to_skip )
data += struct . pack ( "<i" , num_to_return )
data += bson . BSON . encode ( query )
if field_selector is not None :
data += bson . BSON . encode ( field_selector )
return __pack_message ( 2004 , data ) |
def AddStop ( self , lat , lng , name , stop_id = None ) :
"""Add a stop to this schedule .
Args :
lat : Latitude of the stop as a float or string
lng : Longitude of the stop as a float or string
name : Name of the stop , which will appear in the feed
stop _ id : stop _ id of the stop or None , in which case a unique id is picked
Returns :
A new Stop object""" | if stop_id is None :
stop_id = util . FindUniqueId ( self . stops )
stop = self . _gtfs_factory . Stop ( stop_id = stop_id , lat = lat , lng = lng , name = name )
self . AddStopObject ( stop )
return stop |
def mix ( self , cls ) :
"""Returns a subclass of ` cls ` mixed with ` self . mixins ` .
: param cls : The base class to mix into
: type cls : ` class `""" | if hasattr ( cls , 'unmixed_class' ) :
base_class = cls . unmixed_class
old_mixins = cls . __bases__ [ 1 : ]
# Skip the original unmixed class
mixins = old_mixins + tuple ( mixin for mixin in self . _mixins if mixin not in old_mixins )
else :
base_class = cls
mixins = self . _mixins
mixin_key = ( base_class , mixins )
if mixin_key not in _CLASS_CACHE : # Only lock if we ' re about to make a new class
with _CLASS_CACHE_LOCK : # Use setdefault so that if someone else has already
# created a class before we got the lock , we don ' t
# overwrite it
return _CLASS_CACHE . setdefault ( mixin_key , type ( base_class . __name__ + str ( 'WithMixins' ) , # type ( ) requires native str
( base_class , ) + mixins , { 'unmixed_class' : base_class } ) )
else :
return _CLASS_CACHE [ mixin_key ] |
def upload_resumable ( self , fd , filesize , filehash , unit_hash , unit_id , unit_size , quick_key = None , action_on_duplicate = None , mtime = None , version_control = None , folder_key = None , filedrop_key = None , path = None , previous_hash = None ) :
"""upload / resumable
http : / / www . mediafire . com / developers / core _ api / 1.3 / upload / # resumable""" | action = 'upload/resumable'
headers = { 'x-filesize' : str ( filesize ) , 'x-filehash' : filehash , 'x-unit-hash' : unit_hash , 'x-unit-id' : str ( unit_id ) , 'x-unit-size' : str ( unit_size ) }
params = QueryParams ( { 'quick_key' : quick_key , 'action_on_duplicate' : action_on_duplicate , 'mtime' : mtime , 'version_control' : version_control , 'folder_key' : folder_key , 'filedrop_key' : filedrop_key , 'path' : path , 'previous_hash' : previous_hash } )
upload_info = { "fd" : fd , "filename" : "chunk" }
return self . request ( action , params , action_token_type = "upload" , upload_info = upload_info , headers = headers ) |
def bool ( self , state ) :
"""Returns the Boolean evaluation of the clause with respect to a given state
Parameters
state : dict
Key - value mapping describing a Boolean state or assignment
Returns
boolean
The evaluation of the clause with respect to the given state or assignment""" | value = 1
for source , sign in self :
value = value and ( state [ source ] if sign == 1 else not state [ source ] )
if not value :
break
return value |
def get_object ( context ) :
"""Get an object from the context or view .""" | object = None
view = context . get ( 'view' )
if view : # View is more reliable then an ' object ' variable in the context .
# Works if this is a SingleObjectMixin
object = getattr ( view , 'object' , None )
if object is None :
object = context . get ( 'object' , None )
return object |
def download_static_assets ( doc , destination , base_url , request_fn = make_request , url_blacklist = [ ] , js_middleware = None , css_middleware = None , derive_filename = _derive_filename ) :
"""Download all static assets referenced from an HTML page .
The goal is to easily create HTML5 apps ! Downloads JS , CSS , images , and
audio clips .
Args :
doc : The HTML page source as a string or BeautifulSoup instance .
destination : The folder to download the static assets to !
base _ url : The base URL where assets will be downloaded from .
request _ fn : The function to be called to make requests , passed to
ricecooker . utils . html . download _ file ( ) . Pass in a custom one for custom
caching logic .
url _ blacklist : A list of keywords of files to not include in downloading .
Will do substring matching , so e . g . ' acorn . js ' will match
' / some / path / to / acorn . js ' .
js _ middleware : If specificed , JS content will be passed into this callback
which is expected to return JS content with any modifications .
css _ middleware : If specificed , CSS content will be passed into this callback
which is expected to return CSS content with any modifications .
Return the modified page HTML with links rewritten to the locations of the
downloaded static files , as a BeautifulSoup object . ( Call str ( ) on it to
extract the raw HTML . )""" | if not isinstance ( doc , BeautifulSoup ) :
doc = BeautifulSoup ( doc , "html.parser" )
# Helper function to download all assets for a given CSS selector .
def download_assets ( selector , attr , url_middleware = None , content_middleware = None , node_filter = None ) :
nodes = doc . select ( selector )
for i , node in enumerate ( nodes ) :
if node_filter :
if not node_filter ( node ) :
src = node [ attr ]
node [ attr ] = ''
print ( ' Skipping node with src ' , src )
continue
if node [ attr ] . startswith ( 'data:' ) :
continue
url = urljoin ( base_url , node [ attr ] )
if _is_blacklisted ( url , url_blacklist ) :
print ( ' Skipping downloading blacklisted url' , url )
node [ attr ] = ""
continue
if url_middleware :
url = url_middleware ( url )
filename = derive_filename ( url )
node [ attr ] = filename
print ( " Downloading" , url , "to filename" , filename )
download_file ( url , destination , request_fn = request_fn , filename = filename , middleware_callbacks = content_middleware )
def js_content_middleware ( content , url , ** kwargs ) :
if js_middleware :
content = js_middleware ( content , url , ** kwargs )
# Polyfill localStorage and document . cookie as iframes can ' t access
# them
return ( content . replace ( "localStorage" , "_localStorage" ) . replace ( 'document.cookie.split' , '"".split' ) . replace ( 'document.cookie' , 'window._document_cookie' ) )
def css_node_filter ( node ) :
return "stylesheet" in node [ "rel" ]
def css_content_middleware ( content , url , ** kwargs ) :
if css_middleware :
content = css_middleware ( content , url , ** kwargs )
file_dir = os . path . dirname ( urlparse ( url ) . path )
# Download linked fonts and images
def repl ( match ) :
src = match . group ( 1 )
if src . startswith ( '//localhost' ) :
return 'url()'
# Don ' t download data : files
if src . startswith ( 'data:' ) :
return match . group ( 0 )
src_url = urljoin ( base_url , os . path . join ( file_dir , src ) )
if _is_blacklisted ( src_url , url_blacklist ) :
print ( ' Skipping downloading blacklisted url' , src_url )
return 'url()'
derived_filename = derive_filename ( src_url )
download_file ( src_url , destination , request_fn = request_fn , filename = derived_filename )
return 'url("%s")' % derived_filename
return _CSS_URL_RE . sub ( repl , content )
# Download all linked static assets .
download_assets ( "img[src]" , "src" )
# Images
download_assets ( "link[href]" , "href" , content_middleware = css_content_middleware , node_filter = css_node_filter )
# CSS
download_assets ( "script[src]" , "src" , content_middleware = js_content_middleware )
# JS
download_assets ( "source[src]" , "src" )
# Potentially audio
download_assets ( "source[srcset]" , "srcset" )
# Potentially audio
# . . . and also run the middleware on CSS / JS embedded in the page source to
# get linked files .
for node in doc . select ( 'style' ) :
node . string = css_content_middleware ( node . get_text ( ) , url = '' )
for node in doc . select ( 'script' ) :
if not node . attrs . get ( 'src' ) :
node . string = js_content_middleware ( node . get_text ( ) , url = '' )
return doc |
def prepare ( self ) :
"""Method to check if the impact function can be run .
: return : A tuple with the status of the IF and an error message if
needed .
The status is PREPARE _ SUCCESS if everything was fine .
The status is PREPARE _ FAILED _ BAD _ INPUT if the client should fix
something .
The status is PREPARE _ FAILED _ INSUFFICIENT _ OVERLAP if the client
should fix the analysis extent .
The status is PREPARE _ FAILED _ BAD _ CODE if something went wrong
from the code .
: rtype : ( int , m . Message )""" | self . _provenance_ready = False
# save layer reference before preparing .
# used to display it in maps
original_exposure = self . exposure
original_hazard = self . hazard
original_aggregation = self . aggregation
try :
if not self . exposure :
message = generate_input_error_message ( tr ( 'The exposure layer is compulsory' ) , m . Paragraph ( tr ( 'The impact function needs an exposure layer to run. ' 'You must provide it.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
status , message = check_input_layer ( self . exposure , 'exposure' )
if status != PREPARE_SUCCESS :
return status , message
if not self . hazard :
message = generate_input_error_message ( tr ( 'The hazard layer is compulsory' ) , m . Paragraph ( tr ( 'The impact function needs a hazard layer to run. ' 'You must provide it.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
status , message = check_input_layer ( self . hazard , 'hazard' )
if status != PREPARE_SUCCESS :
return status , message
if self . aggregation :
if self . _requested_extent :
message = generate_input_error_message ( tr ( 'Error with the requested extent' ) , m . Paragraph ( tr ( 'Requested Extent must be null when an ' 'aggregation is provided.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
if self . _crs :
message = generate_input_error_message ( tr ( 'Error with the requested extent' ) , m . Paragraph ( tr ( 'Requested Extent CRS must be null when an ' 'aggregation is provided.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
if self . use_exposure_view_only :
message = generate_input_error_message ( tr ( 'Error with the requested extent' ) , m . Paragraph ( tr ( 'Use exposure view only can not be set to True if ' 'you use an aggregation layer.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
status , message = check_input_layer ( self . aggregation , 'aggregation' )
aggregation_source = full_layer_uri ( self . aggregation )
aggregation_keywords = copy_layer_keywords ( self . aggregation . keywords )
if status != PREPARE_SUCCESS :
return status , message
else :
aggregation_source = None
aggregation_keywords = None
if not self . _crs :
message = generate_input_error_message ( tr ( 'Error with the requested CRS' ) , m . Paragraph ( tr ( 'CRS must be set when you don\'t use an ' 'aggregation layer. It will be used for the ' 'analysis CRS.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
if self . requested_extent and self . use_exposure_view_only :
message = generate_input_error_message ( tr ( 'Error with the requested extent' ) , m . Paragraph ( tr ( 'Requested Extent must be null when you use the ' 'exposure view only.' ) ) )
return PREPARE_FAILED_BAD_INPUT , message
# We need to check if the hazard is OK to run on the exposure .
hazard_keywords = self . hazard . keywords
exposure_key = self . exposure . keywords [ 'exposure' ]
if not active_thresholds_value_maps ( hazard_keywords , exposure_key ) :
warning_heading = m . Heading ( tr ( 'Incompatible exposure/hazard' ) , ** WARNING_STYLE )
warning_message = tr ( 'The hazard layer is not set up for this kind of ' 'exposure. In InaSAFE, you need to define keywords in the ' 'hazard layer for each exposure type that you want to use ' 'with the hazard.' )
suggestion_heading = m . Heading ( tr ( 'Suggestion' ) , ** SUGGESTION_STYLE )
suggestion = tr ( 'Please select the hazard layer in the legend and then ' 'run the keyword wizard to define the needed keywords for ' '{exposure_type} exposure.' ) . format ( exposure_type = exposure_key )
message = m . Message ( )
message . add ( warning_heading )
message . add ( warning_message )
message . add ( suggestion_heading )
message . add ( suggestion )
return PREPARE_FAILED_BAD_INPUT , message
status , message = self . _compute_analysis_extent ( )
if status != PREPARE_SUCCESS :
return status , message
# Set the name
hazard_name = get_name ( self . hazard . keywords . get ( 'hazard' ) )
exposure_name = get_name ( self . exposure . keywords . get ( 'exposure' ) )
hazard_geometry_name = get_name ( geometry_type ( self . hazard ) )
exposure_geometry_name = get_name ( geometry_type ( self . exposure ) )
self . _name = tr ( '{hazard_type} {hazard_geometry} On {exposure_type} ' '{exposure_geometry}' ) . format ( hazard_type = hazard_name , hazard_geometry = hazard_geometry_name , exposure_type = exposure_name , exposure_geometry = exposure_geometry_name ) . title ( )
# Set the title
if self . exposure . keywords . get ( 'exposure' ) == 'population' :
self . _title = tr ( 'need evacuation' )
else :
self . _title = tr ( 'be affected' )
for pre_processor in pre_processors :
if pre_processor [ 'condition' ] ( self ) :
self . _preprocessors . append ( pre_processor )
except Exception as e :
if self . debug_mode : # We run in debug mode , we do not want to catch the exception .
# You should download the First Aid plugin for instance .
raise
else :
message = get_error_message ( e )
return PREPARE_FAILED_BAD_CODE , message
else : # Everything was fine .
self . _is_ready = True
set_provenance ( self . _provenance , provenance_exposure_layer , full_layer_uri ( self . exposure ) )
# reference to original layer being used
set_provenance ( self . _provenance , provenance_exposure_layer_id , original_exposure . id ( ) )
set_provenance ( self . _provenance , provenance_exposure_keywords , copy_layer_keywords ( self . exposure . keywords ) )
set_provenance ( self . _provenance , provenance_hazard_layer , full_layer_uri ( self . hazard ) )
# reference to original layer being used
set_provenance ( self . _provenance , provenance_hazard_layer_id , original_hazard . id ( ) )
set_provenance ( self . _provenance , provenance_hazard_keywords , copy_layer_keywords ( self . hazard . keywords ) )
# reference to original layer being used
if original_aggregation :
set_provenance ( self . _provenance , provenance_aggregation_layer_id , original_aggregation . id ( ) )
else :
set_provenance ( self . _provenance , provenance_aggregation_layer_id , None )
set_provenance ( self . _provenance , provenance_aggregation_layer , aggregation_source )
set_provenance ( self . _provenance , provenance_aggregation_keywords , aggregation_keywords )
# Set output layer expected
self . _output_layer_expected = self . _compute_output_layer_expected ( )
return PREPARE_SUCCESS , None |
def _safe_unicode ( o ) :
"""Returns an equivalent unicode object , trying harder to avoid
dependencies on the Python default encoding .""" | def clean ( s ) :
return u'' . join ( [ c if c in ASCII_PRINTABLE else '?' for c in s ] )
if USING_PYTHON2 :
try :
return unicode ( o )
except :
try :
s = str ( o )
try :
return s . decode ( "utf-8" )
except :
return clean ( s [ : 2048 ] ) + u" [Raw error message: " + unicode ( s . encode ( "hex" ) , 'utf-8' ) + u"]"
except :
return u"(Unable to decode Python exception message)"
else :
return str ( o ) |
def _extract_submission ( self , filename ) :
"""Extracts submission and moves it into self . _ extracted _ submission _ dir .""" | # verify filesize
file_size = os . path . getsize ( filename )
if file_size > MAX_SUBMISSION_SIZE_ZIPPED :
logging . error ( 'Submission archive size %d is exceeding limit %d' , file_size , MAX_SUBMISSION_SIZE_ZIPPED )
return False
# determime archive type
exctract_command_tmpl = get_extract_command_template ( filename )
if not exctract_command_tmpl :
logging . error ( 'Input file has to be zip, tar or tar.gz archive; however ' 'found: %s' , filename )
return False
# extract archive
submission_dir = os . path . dirname ( filename )
submission_basename = os . path . basename ( filename )
logging . info ( 'Extracting archive %s' , filename )
retval = shell_call ( [ 'docker' , 'run' , '--network=none' , '-v' , '{0}:/input_dir' . format ( submission_dir ) , '-v' , '{0}:/output_dir' . format ( self . _tmp_extracted_dir ) , 'busybox:1.27.2' ] + exctract_command_tmpl , src = os . path . join ( '/input_dir' , submission_basename ) , dst = '/output_dir' )
if not retval :
logging . error ( 'Failed to extract submission from file %s' , filename )
return False
if not make_directory_writable ( self . _tmp_extracted_dir ) :
return False
# find submission root
root_dir = self . _tmp_extracted_dir
root_dir_content = [ d for d in os . listdir ( root_dir ) if d != '__MACOSX' ]
if ( len ( root_dir_content ) == 1 and os . path . isdir ( os . path . join ( root_dir , root_dir_content [ 0 ] ) ) ) :
logging . info ( 'Looks like submission root is in subdirectory "%s" of ' 'the archive' , root_dir_content [ 0 ] )
root_dir = os . path . join ( root_dir , root_dir_content [ 0 ] )
# Move files to self . _ extracted _ submission _ dir .
# At this point self . _ extracted _ submission _ dir does not exist ,
# so following command will simply rename root _ dir into
# self . _ extracted _ submission _ dir
if not shell_call ( [ 'mv' , root_dir , self . _extracted_submission_dir ] ) :
logging . error ( 'Can' 't move submission files from root directory' )
return False
return True |
def update_reward ( self , new_reward ) :
"""Updates reward value for policy .
: param new _ reward : New reward to save .""" | self . sess . run ( self . model . update_reward , feed_dict = { self . model . new_reward : new_reward } ) |
def _create ( archive , compression , cmd , format , verbosity , filenames ) :
"""Create an LZMA or XZ archive with the lzma Python module .""" | if len ( filenames ) > 1 :
raise util . PatoolError ( 'multi-file compression not supported in Python lzma' )
try :
with lzma . LZMAFile ( archive , mode = 'wb' , ** _get_lzma_options ( format , preset = 9 ) ) as lzmafile :
filename = filenames [ 0 ]
with open ( filename , 'rb' ) as srcfile :
data = srcfile . read ( READ_SIZE_BYTES )
while data :
lzmafile . write ( data )
data = srcfile . read ( READ_SIZE_BYTES )
except Exception as err :
msg = "error creating %s: %s" % ( archive , err )
raise util . PatoolError ( msg )
return None |
def tk_to_dagcircuit ( circ : Circuit , _qreg_name : str = "q" ) -> DAGCircuit :
"""Convert a : math : ` \\ mathrm { t | ket } \\ rangle ` : py : class : ` Circuit ` to a : py : class : ` qiskit . DAGCircuit ` . Requires
that the circuit only conatins : py : class : ` OpType ` s from the qelib set .
: param circ : A circuit to be converted
: return : The converted circuit""" | dc = DAGCircuit ( )
qreg = QuantumRegister ( circ . n_qubits ( ) , name = _qreg_name )
dc . add_qreg ( qreg )
grid = circ . _int_routing_grid ( )
slices = _grid_to_slices ( grid )
qubits = _grid_to_qubits ( grid , qreg )
in_boundary = circ . _get_boundary ( ) [ 0 ]
out_boundary = circ . _get_boundary ( ) [ 1 ]
for s in slices :
for v in s :
o = circ . _unsigned_to_op ( v )
qargs = [ qubits [ ( v , i ) ] for i in range ( o . get_n_inputs ( ) ) ]
name , cargs , params = _translate_ops ( circ , v )
if cargs :
_extend_cregs ( dc , cargs )
if name :
dc . add_basis_element ( name , o . get_n_inputs ( ) , number_classical = len ( cargs ) , number_parameters = len ( params ) )
ins = Instruction ( name , list ( map ( _normalise_param_out , params ) ) , qargs , cargs )
dc . apply_operation_back ( ins , qargs = qargs , cargs = cargs )
tk2dg_outs = { }
for v in out_boundary :
tk2dg_outs [ v ] = dc . output_map [ qubits [ ( v , 0 ) ] ]
for i , v in enumerate ( out_boundary ) :
dc . multi_graph . node [ tk2dg_outs [ v ] ] [ "wire" ] = [ qubits [ ( in_boundary [ i ] , 0 ) ] ]
dc . output_map [ qubits [ ( in_boundary [ i ] , 0 ) ] ] = tk2dg_outs [ v ]
return dc |
def get ( self , columns = None ) :
"""Execute the query as a " select " statement .
: param columns : The columns to get
: type columns : list
: rtype : orator . Collection""" | models = self . get_models ( columns )
# If we actually found models we will also eager load any relationships that
# have been specified as needing to be eager loaded , which will solve the
# n + 1 query issue for the developers to avoid running a lot of queries .
if len ( models ) > 0 :
models = self . eager_load_relations ( models )
collection = self . _model . new_collection ( models )
return collection |
def forced_insert ( self ) :
"""Insert tokens if self . insert _ till hasn ' t been reached yet
Will respect self . inserted _ line and make sure token is inserted before it
Returns True if it appends anything or if it reached the insert _ till token""" | # If we have any tokens we are waiting for
if self . insert_till : # Determine where to append this token
append_at = - 1
if self . inserted_line :
append_at = - self . inserted_line + 1
# Reset insert _ till if we found it
if self . current . tokenum == self . insert_till [ 0 ] and self . current . value == self . insert_till [ 1 ] :
self . insert_till = None
else : # Adjust self . adjust _ indent _ at to take into account the new token
for index , value in enumerate ( self . adjust_indent_at ) :
if value < len ( self . result ) - append_at :
self . adjust_indent_at [ index ] = value + 1
# Insert the new token
self . result . insert ( append_at , ( self . current . tokenum , self . current . value ) )
# We appended the token
return True |
def Debugger_setScriptSource ( self , scriptId , scriptSource , ** kwargs ) :
"""Function path : Debugger . setScriptSource
Domain : Debugger
Method name : setScriptSource
Parameters :
Required arguments :
' scriptId ' ( type : Runtime . ScriptId ) - > Id of the script to edit .
' scriptSource ' ( type : string ) - > New content of the script .
Optional arguments :
' dryRun ' ( type : boolean ) - > If true the change will not actually be applied . Dry run may be used to get result description without actually modifying the code .
Returns :
' callFrames ' ( type : array ) - > New stack trace in case editing has happened while VM was stopped .
' stackChanged ' ( type : boolean ) - > Whether current call stack was modified after applying the changes .
' asyncStackTrace ' ( type : Runtime . StackTrace ) - > Async stack trace , if any .
' exceptionDetails ' ( type : Runtime . ExceptionDetails ) - > Exception details if any .
Description : Edits JavaScript source live .""" | assert isinstance ( scriptSource , ( str , ) ) , "Argument 'scriptSource' must be of type '['str']'. Received type: '%s'" % type ( scriptSource )
if 'dryRun' in kwargs :
assert isinstance ( kwargs [ 'dryRun' ] , ( bool , ) ) , "Optional argument 'dryRun' must be of type '['bool']'. Received type: '%s'" % type ( kwargs [ 'dryRun' ] )
expected = [ 'dryRun' ]
passed_keys = list ( kwargs . keys ( ) )
assert all ( [ ( key in expected ) for key in passed_keys ] ) , "Allowed kwargs are ['dryRun']. Passed kwargs: %s" % passed_keys
subdom_funcs = self . synchronous_command ( 'Debugger.setScriptSource' , scriptId = scriptId , scriptSource = scriptSource , ** kwargs )
return subdom_funcs |
def explode ( self , hostgroups , contactgroups ) :
"""Explode hosts with hostgroups , contactgroups : :
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
: param hostgroups : Hostgroups to explode
: type hostgroups : alignak . objects . hostgroup . Hostgroups
: param contactgroups : Contactgorups to explode
: type contactgroups : alignak . objects . contactgroup . Contactgroups
: return : None""" | for template in list ( self . templates . values ( ) ) : # items : : explode _ contact _ groups _ into _ contacts
# take all contacts from our contact _ groups into our contact property
self . explode_contact_groups_into_contacts ( template , contactgroups )
# Register host in the hostgroups
for host in self : # items : : explode _ contact _ groups _ into _ contacts
# take all contacts from our contact _ groups into our contact property
self . explode_contact_groups_into_contacts ( host , contactgroups )
if hasattr ( host , 'host_name' ) and hasattr ( host , 'hostgroups' ) :
hname = host . host_name
for hostgroup in host . hostgroups :
hostgroups . add_member ( hname , hostgroup . strip ( ) ) |
def val_to_json ( key , val , mode = "summary" , step = None ) :
"""Converts a wandb datatype to its JSON representation""" | converted = val
typename = util . get_full_typename ( val )
if util . is_matplotlib_typename ( typename ) : # This handles plots with images in it because plotly doesn ' t support it
# TODO : should we handle a list of plots ?
val = util . ensure_matplotlib_figure ( val )
if any ( len ( ax . images ) > 0 for ax in val . axes ) :
PILImage = util . get_module ( "PIL.Image" , required = "Logging plots with images requires pil: pip install pillow" )
buf = six . BytesIO ( )
val . savefig ( buf )
val = Image ( PILImage . open ( buf ) )
else :
converted = util . convert_plots ( val )
elif util . is_plotly_typename ( typename ) :
converted = util . convert_plots ( val )
if isinstance ( val , IterableMedia ) :
val = [ val ]
if isinstance ( val , collections . Sequence ) and len ( val ) > 0 :
is_media = [ isinstance ( v , IterableMedia ) for v in val ]
if all ( is_media ) :
cwd = wandb . run . dir if wandb . run else "."
if step is None :
step = "summary"
if isinstance ( val [ 0 ] , Image ) :
converted = Image . transform ( val , cwd , "{}_{}.jpg" . format ( key , step ) )
elif isinstance ( val [ 0 ] , Audio ) :
converted = Audio . transform ( val , cwd , key , step )
elif isinstance ( val [ 0 ] , Html ) :
converted = Html . transform ( val , cwd , key , step )
elif isinstance ( val [ 0 ] , Object3D ) :
converted = Object3D . transform ( val , cwd , key , step )
elif any ( is_media ) :
raise ValueError ( "Mixed media types in the same list aren't supported" )
elif isinstance ( val , Histogram ) :
converted = Histogram . transform ( val )
elif isinstance ( val , Graph ) :
if mode == "history" :
raise ValueError ( "Graphs are only supported in summary" )
converted = Graph . transform ( val )
elif isinstance ( val , Table ) :
converted = Table . transform ( val )
return converted |
def create_client ( ) :
"""Create a new client driver . The driver is automatically created in
before _ request function .""" | result = False
if g . client_id in drivers :
result = True
return jsonify ( { 'Success' : result } ) |
def apply_bios_properties_filter ( settings , filter_to_be_applied ) :
"""Applies the filter to return the dict of filtered BIOS properties .
: param settings : dict of BIOS settings on which filter to be applied .
: param filter _ to _ be _ applied : list of keys to be applied as filter .
: returns : A dictionary of filtered BIOS settings .""" | if not settings or not filter_to_be_applied :
return settings
return { k : settings [ k ] for k in filter_to_be_applied if k in settings } |
def can_aquire_user_lock ( repository_path , session_token ) :
"""Allow a user to acquire the lock if no other user is currently using it , if the original
user is returning , presumably after a network error , or if the lock has expired .""" | # NOTE ALWAYS use within lock access callback
user_file_path = cpjoin ( repository_path , 'user_file' )
if not os . path . isfile ( user_file_path ) :
return True
with open ( user_file_path , 'r' ) as fd2 :
content = fd2 . read ( )
if len ( content ) == 0 :
return True
try :
res = json . loads ( content )
except ValueError :
return True
if res [ 'expires' ] < int ( time . time ( ) ) :
return True
elif res [ 'session_token' ] == session_token :
return True
return False |
def freeze_encrypt ( dest_dir , zip_filename , config , opt ) :
"""Encrypts the zip file""" | pgp_keys = grok_keys ( config )
icefile_prefix = "aomi-%s" % os . path . basename ( os . path . dirname ( opt . secretfile ) )
if opt . icefile_prefix :
icefile_prefix = opt . icefile_prefix
timestamp = time . strftime ( "%H%M%S-%m-%d-%Y" , datetime . datetime . now ( ) . timetuple ( ) )
ice_file = "%s/%s-%s.ice" % ( dest_dir , icefile_prefix , timestamp )
if not encrypt ( zip_filename , ice_file , pgp_keys ) :
raise aomi . exceptions . GPG ( "Unable to encrypt zipfile" )
return ice_file |
def add ( self , name , target ) :
"target should be a Table or SyntheticTable" | if not isinstance ( target , ( table . Table , SyntheticTable ) ) :
raise TypeError ( type ( target ) , target )
if name in self : # note : this is critical for avoiding cycles
raise ScopeCollisionError ( 'scope already has' , name )
self . names [ name ] = target |
def get_allowed_units ( self , database , username , relation_id = None ) :
"""Get list of units with access grants for database with username .
This is typically used to provide shared - db relations with a list of
which units have been granted access to the given database .""" | self . connect ( password = self . get_mysql_root_password ( ) )
allowed_units = set ( )
for unit in related_units ( relation_id ) :
settings = relation_get ( rid = relation_id , unit = unit )
# First check for setting with prefix , then without
for attr in [ "%s_hostname" % ( database ) , 'hostname' ] :
hosts = settings . get ( attr , None )
if hosts :
break
if hosts : # hostname can be json - encoded list of hostnames
try :
hosts = json . loads ( hosts )
except ValueError :
hosts = [ hosts ]
else :
hosts = [ settings [ 'private-address' ] ]
if hosts :
for host in hosts :
host = self . normalize_address ( host )
if self . grant_exists ( database , username , host ) :
log ( "Grant exists for host '%s' on db '%s'" % ( host , database ) , level = DEBUG )
if unit not in allowed_units :
allowed_units . add ( unit )
else :
log ( "Grant does NOT exist for host '%s' on db '%s'" % ( host , database ) , level = DEBUG )
else :
log ( "No hosts found for grant check" , level = INFO )
return allowed_units |
def user_exists_p ( login , connector ) :
"""Determine if user exists in specified environment .""" | url = '/users/' + login + '/'
_r = connector . get ( url )
return ( _r . status_code == Constants . PULP_GET_OK ) |
def showEvent ( self , event ) :
"""Override show event to start waiting spinner .""" | QWidget . showEvent ( self , event )
self . spinner . start ( ) |
def football_data ( season = '1617' , data_set = 'football_data' ) :
"""Football data from English games since 1993 . This downloads data from football - data . co . uk for the given season .""" | league_dict = { 'E0' : 0 , 'E1' : 1 , 'E2' : 2 , 'E3' : 3 , 'EC' : 4 }
def league2num ( string ) :
if isinstance ( string , bytes ) :
string = string . decode ( 'utf-8' )
return league_dict [ string ]
def football2num ( string ) :
if isinstance ( string , bytes ) :
string = string . decode ( 'utf-8' )
if string in football_dict :
return football_dict [ string ]
else :
football_dict [ string ] = len ( football_dict ) + 1
return len ( football_dict ) + 1
def datestr2num ( s ) :
import datetime
from matplotlib . dates import date2num
return date2num ( datetime . datetime . strptime ( s . decode ( 'utf-8' ) , '%d/%m/%y' ) )
data_set_season = data_set + '_' + season
data_resources [ data_set_season ] = copy . deepcopy ( data_resources [ data_set ] )
data_resources [ data_set_season ] [ 'urls' ] [ 0 ] += season + '/'
start_year = int ( season [ 0 : 2 ] )
end_year = int ( season [ 2 : 4 ] )
files = [ 'E0.csv' , 'E1.csv' , 'E2.csv' , 'E3.csv' ]
if start_year > 4 and start_year < 93 :
files += [ 'EC.csv' ]
data_resources [ data_set_season ] [ 'files' ] = [ files ]
if not data_available ( data_set_season ) :
download_data ( data_set_season )
start = True
for file in reversed ( files ) :
filename = os . path . join ( data_path , data_set_season , file )
# rewrite files removing blank rows .
writename = os . path . join ( data_path , data_set_season , 'temp.csv' )
input = open ( filename , encoding = 'ISO-8859-1' )
output = open ( writename , 'w' )
writer = csv . writer ( output )
for row in csv . reader ( input ) :
if any ( field . strip ( ) for field in row ) :
writer . writerow ( row )
input . close ( )
output . close ( )
table = np . loadtxt ( writename , skiprows = 1 , usecols = ( 0 , 1 , 2 , 3 , 4 , 5 ) , converters = { 0 : league2num , 1 : datestr2num , 2 : football2num , 3 : football2num } , delimiter = ',' )
if start :
X = table [ : , : 4 ]
Y = table [ : , 4 : ]
start = False
else :
X = np . append ( X , table [ : , : 4 ] , axis = 0 )
Y = np . append ( Y , table [ : , 4 : ] , axis = 0 )
return data_details_return ( { 'X' : X , 'Y' : Y , 'covariates' : [ discrete ( league_dict , 'league' ) , datenum ( 'match_day' ) , discrete ( football_dict , 'home team' ) , discrete ( football_dict , 'away team' ) ] , 'response' : [ integer ( 'home score' ) , integer ( 'away score' ) ] } , data_set ) |
def _verified_iv_length ( iv_length , algorithm_suite ) : # type : ( int , AlgorithmSuite ) - > int
"""Verify an IV length for an algorithm suite .
: param int iv _ length : IV length to verify
: param AlgorithmSuite algorithm _ suite : Algorithm suite to verify against
: return : IV length
: rtype : int
: raises SerializationError : if IV length does not match algorithm suite""" | if iv_length != algorithm_suite . iv_len :
raise SerializationError ( "Specified IV length ({length}) does not match algorithm IV length ({algorithm})" . format ( length = iv_length , algorithm = algorithm_suite ) )
return iv_length |
def equals ( self , other ) :
"""Ensures : attr : ` subject ` is equal to * other * .""" | self . _run ( unittest_case . assertEqual , ( self . _subject , other ) )
return ChainInspector ( self . _subject ) |
def GetMessages ( self , formatter_mediator , event ) :
"""Determines the formatted message strings for an event object .
Args :
formatter _ mediator ( FormatterMediator ) : mediates the interactions between
formatters and other components , such as storage and Windows EventLog
resources .
event ( EventObject ) : event .
Returns :
tuple ( str , str ) : formatted message string and short message string .
Raises :
WrongFormatter : if the event object cannot be formatted by the formatter .""" | if self . DATA_TYPE != event . data_type :
raise errors . WrongFormatter ( 'Unsupported data type: {0:s}.' . format ( event . data_type ) )
event_values = event . CopyToDict ( )
page_transition_type = event_values . get ( 'page_transition_type' , None )
if page_transition_type is not None :
page_transition , page_transition_long = self . _PAGE_TRANSITIONS . get ( page_transition_type , self . _UNKNOWN_PAGE_TRANSITION )
if page_transition_long :
event_values [ 'page_transition' ] = '{0:s} - {1:s}' . format ( page_transition , page_transition_long )
else :
event_values [ 'page_transition' ] = page_transition
visit_source = event_values . get ( 'visit_source' , None )
if visit_source is not None :
event_values [ 'visit_source' ] = self . _VISIT_SOURCE . get ( visit_source , 'UNKNOWN' )
extras = [ ]
url_hidden = event_values . get ( 'url_hidden' , False )
if url_hidden :
extras . append ( '(url hidden)' )
typed_count = event_values . get ( 'typed_count' , 0 )
if typed_count == 0 :
extras . append ( '(URL not typed directly - no typed count)' )
elif typed_count == 1 :
extras . append ( '(type count {0:d} time)' . format ( typed_count ) )
else :
extras . append ( '(type count {0:d} times)' . format ( typed_count ) )
event_values [ 'extra' ] = ' ' . join ( extras )
return self . _ConditionalFormatMessages ( event_values ) |
def destroy ( self ) :
'''Tear down the minion''' | if self . _running is False :
return
self . _running = False
if hasattr ( self , 'schedule' ) :
del self . schedule
if hasattr ( self , 'pub_channel' ) and self . pub_channel is not None :
self . pub_channel . on_recv ( None )
if hasattr ( self . pub_channel , 'close' ) :
self . pub_channel . close ( )
del self . pub_channel
if hasattr ( self , 'periodic_callbacks' ) :
for cb in six . itervalues ( self . periodic_callbacks ) :
cb . stop ( ) |
def add_pipeline ( subparsers ) :
"""Pipeline subcommands .""" | pipeline_parser = subparsers . add_parser ( 'pipeline' , help = add_pipeline . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter )
pipeline_parser . set_defaults ( func = pipeline_parser . print_help )
pipeline_subparsers = pipeline_parser . add_subparsers ( title = 'Pipelines' )
pipeline_full_parser = pipeline_subparsers . add_parser ( 'app' , help = runner . prepare_app_pipeline . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter )
pipeline_full_parser . set_defaults ( func = runner . prepare_app_pipeline )
pipeline_onetime_parser = pipeline_subparsers . add_parser ( 'onetime' , help = runner . prepare_onetime_pipeline . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter )
pipeline_onetime_parser . set_defaults ( func = runner . prepare_onetime_pipeline )
add_env ( pipeline_onetime_parser ) |
def unlock ( self , request , * args , ** kwargs ) :
"""Unlocks the considered topic and retirects the user to the success URL .""" | self . object = self . get_object ( )
success_url = self . get_success_url ( )
self . object . status = Topic . TOPIC_UNLOCKED
self . object . save ( )
messages . success ( self . request , self . success_message )
return HttpResponseRedirect ( success_url ) |
def _get_cpu_info_from_sysctl ( ) :
'''Returns the CPU info gathered from sysctl .
Returns { } if sysctl is not found .''' | try : # Just return { } if there is no sysctl
if not DataSource . has_sysctl ( ) :
return { }
# If sysctl fails return { }
returncode , output = DataSource . sysctl_machdep_cpu_hw_cpufrequency ( )
if output == None or returncode != 0 :
return { }
# Various fields
vendor_id = _get_field ( False , output , None , None , 'machdep.cpu.vendor' )
processor_brand = _get_field ( True , output , None , None , 'machdep.cpu.brand_string' )
cache_size = _get_field ( False , output , None , None , 'machdep.cpu.cache.size' )
stepping = _get_field ( False , output , int , 0 , 'machdep.cpu.stepping' )
model = _get_field ( False , output , int , 0 , 'machdep.cpu.model' )
family = _get_field ( False , output , int , 0 , 'machdep.cpu.family' )
# Flags
flags = _get_field ( False , output , None , '' , 'machdep.cpu.features' ) . lower ( ) . split ( )
flags . extend ( _get_field ( False , output , None , '' , 'machdep.cpu.leaf7_features' ) . lower ( ) . split ( ) )
flags . extend ( _get_field ( False , output , None , '' , 'machdep.cpu.extfeatures' ) . lower ( ) . split ( ) )
flags . sort ( )
# Convert from GHz / MHz string to Hz
hz_advertised , scale = _parse_cpu_brand_string ( processor_brand )
hz_actual = _get_field ( False , output , None , None , 'hw.cpufrequency' )
hz_actual = _to_decimal_string ( hz_actual )
info = { 'vendor_id_raw' : vendor_id , 'brand_raw' : processor_brand , 'hz_advertised_friendly' : _hz_short_to_friendly ( hz_advertised , scale ) , 'hz_actual_friendly' : _hz_short_to_friendly ( hz_actual , 0 ) , 'hz_advertised' : _hz_short_to_full ( hz_advertised , scale ) , 'hz_actual' : _hz_short_to_full ( hz_actual , 0 ) , 'l2_cache_size' : _to_friendly_bytes ( cache_size ) , 'stepping' : stepping , 'model' : model , 'family' : family , 'flags' : flags }
info = { k : v for k , v in info . items ( ) if v }
return info
except :
return { } |
def wgan ( cls , data : DataBunch , generator : nn . Module , critic : nn . Module , switcher : Callback = None , clip : float = 0.01 , ** learn_kwargs ) :
"Create a WGAN from ` data ` , ` generator ` and ` critic ` ." | return cls ( data , generator , critic , NoopLoss ( ) , WassersteinLoss ( ) , switcher = switcher , clip = clip , ** learn_kwargs ) |
def docs_client ( self ) :
"""A DocsClient singleton , used to look up spreadsheets
by name .""" | if not hasattr ( self , '_docs_client' ) :
client = DocsClient ( )
client . ClientLogin ( self . google_user , self . google_password , SOURCE_NAME )
self . _docs_client = client
return self . _docs_client |
def arcsine_sqrt_transform ( rel_abd ) :
"""Takes the proportion data from relative _ abundance ( ) and applies the
variance stabilizing arcsine square root transformation :
X = sin ^ { - 1 } \ sqrt p""" | arcsint = lambda p : math . asin ( math . sqrt ( p ) )
return { col_id : { row_id : arcsint ( rel_abd [ col_id ] [ row_id ] ) for row_id in rel_abd [ col_id ] } for col_id in rel_abd } |
def pxe ( hostname , timeout = 20 , username = None , password = None ) :
'''Connect to the Dell DRAC and have the boot order set to PXE
and power cycle the system to PXE boot
CLI Example :
. . code - block : : bash
salt - run drac . pxe example . com''' | _cmds = [ 'racadm config -g cfgServerInfo -o cfgServerFirstBootDevice pxe' , 'racadm config -g cfgServerInfo -o cfgServerBootOnce 1' , 'racadm serveraction powercycle' , ]
client = __connect ( hostname , timeout , username , password )
if isinstance ( client , paramiko . SSHClient ) :
for i , cmd in enumerate ( _cmds , 1 ) :
log . info ( 'Executing command %s' , i )
( stdin , stdout , stderr ) = client . exec_command ( cmd )
if 'successful' in stdout . readline ( ) :
log . info ( 'Executing command: %s' , cmd )
else :
log . error ( 'Unable to execute: %s' , cmd )
return False
return True |
def confirm ( text = '' , title = '' , buttons = [ 'OK' , 'Cancel' ] ) :
"""Displays a message box with OK and Cancel buttons . Number and text of buttons can be customized . Returns the text of the button clicked on .""" | retVal = messageBoxFunc ( 0 , text , title , MB_OKCANCEL | MB_ICONQUESTION | MB_SETFOREGROUND | MB_TOPMOST )
if retVal == 1 or len ( buttons ) == 1 :
return buttons [ 0 ]
elif retVal == 2 :
return buttons [ 1 ]
else :
assert False , 'Unexpected return value from MessageBox: %s' % ( retVal ) |
def set ( self , key : bytes , value : bytes ) -> Tuple [ Hash32 ] :
"""Returns all updated hashes in root - > leaf order""" | validate_is_bytes ( key )
validate_length ( key , self . _key_size )
validate_is_bytes ( value )
path = to_int ( key )
node = value
_ , branch = self . _get ( key )
proof_update = [ ]
# Keep track of proof updates
target_bit = 1
# branch is in root - > leaf order , so flip
for sibling_node in reversed ( branch ) : # Set
node_hash = keccak ( node )
proof_update . append ( node_hash )
self . db [ node_hash ] = node
# Update
if ( path & target_bit ) :
node = sibling_node + node_hash
else :
node = node_hash + sibling_node
target_bit <<= 1
# Finally , update root hash
self . root_hash = keccak ( node )
self . db [ self . root_hash ] = node
# updates need to be in root - > leaf order , so flip back
return tuple ( reversed ( proof_update ) ) |
def _decode ( self , obj , context ) :
"""Get the python representation of the obj""" | return b'' . join ( map ( int2byte , [ c + 0x60 for c in bytearray ( obj ) ] ) ) . decode ( "utf8" ) |
def histogram ( a , bins ) :
"""Compute the histogram of a set of data .
: param a : Input data
: param bins : int or sequence of scalars or str , optional
: type a : list | tuple
: type bins : int | list [ int ] | list [ str ]
: return :""" | if any ( map ( lambda x : x < 0 , diff ( bins ) ) ) :
raise ValueError ( 'bins must increase monotonically.' )
try :
sa = sorted ( a )
except TypeError : # Perhaps just a single value ? Treat as a list and carry on
sa = sorted ( [ a ] )
# import numpy as np
# nl = np . searchsorted ( sa , bins [ : - 1 ] , ' left ' )
# nr = np . searchsorted ( sa , bins [ - 1 ] , ' right ' )
# nn = np . r _ [ nl , nr ]
# # cl = list ( accumulate ( Counter ( map ( lambda x : bisect _ left ( bins [ : - 1 ] , x ) , sa ) ) )
# # print ( " cl " )
# # print ( [ cl [ i ] for i in range ( len ( bins ) ) ] )
# print ( " nl " )
# print ( list ( nl ) )
# # print ( Counter ( map ( lambda x : bisect _ right ( [ bins [ - 1 ] ] , x ) , sa ) ) )
# print ( " nr " )
# print ( [ nr ] )
# print ( " nn " )
# print ( list ( nn ) )
# print ( " hist " )
# print ( list ( np . diff ( nn ) ) )
# print ( list ( np . histogram ( a , bins ) [ 0 ] ) )
nl = list ( accumulate ( [ Counter ( map ( lambda x : bisect_left ( bins [ : - 1 ] , x ) , sa ) ) [ i ] for i in range ( len ( bins ) - 1 ) ] ) )
# print ( " nl " )
# print ( nl )
nr = Counter ( map ( lambda x : bisect_right ( [ bins [ 1 ] ] , x ) , sa ) ) [ 1 ]
# print ( nl )
# print ( nr )
n = list ( nl ) + [ nr ]
return diff ( n ) , bins |
async def set_contents ( self , ** params ) :
"""Writes users content to database
Accepts :
- public key ( required )
- content ( required )
- description
- price
- address""" | if params . get ( "message" ) :
params = json . loads ( params . get ( "message" , "{}" ) )
if not params :
return { "error" : 400 , "reason" : "Missed required fields" }
txid = params . get ( "txid" )
public_key = params . get ( "public_key" )
_hash = params . get ( "hash" )
coinid = params . get ( "coinid" )
access = params . get ( "access" )
cid = params . get ( "cid" )
# Try to get account
account = await self . collection . find_one ( { "public_key" : public_key } )
# Return error if does not exist the one
if not account :
return { "error" : 404 , "reason" : "Account was not found" }
database = client [ coinid ]
content_collection = database [ access ]
await content_collection . insert_one ( { "owner" : public_key , "cid" : cid , "txid" : txid , "hash" : _hash } )
success = await content_collection . find_one ( { "txid" : txid } )
if not success :
return { "error" : 500 , "reason" : "Error while writing content to database" }
else :
return { "result" : "ok" } |
def _loadConfig ( self ) :
"""loads configuration from some predictable locations .
: return : the config .""" | configPath = path . join ( self . _getConfigPath ( ) , self . _name + ".yml" )
if os . path . exists ( configPath ) :
self . logger . warning ( "Loading config from " + configPath )
with open ( configPath , 'r' ) as yml :
return yaml . load ( yml , Loader = yaml . FullLoader )
defaultConfig = self . loadDefaultConfig ( )
self . _storeConfig ( defaultConfig , configPath )
return defaultConfig |
def compare_contract_versions ( proxy : ContractProxy , expected_version : str , contract_name : str , address : Address , ) -> None :
"""Compare version strings of a contract .
If not matching raise ContractVersionMismatch . Also may raise AddressWrongContract
if the contract contains no code .""" | assert isinstance ( expected_version , str )
try :
deployed_version = proxy . contract . functions . contract_version ( ) . call ( )
except BadFunctionCallOutput :
raise AddressWrongContract ( '' )
deployed_version = deployed_version . replace ( '_' , '0' )
expected_version = expected_version . replace ( '_' , '0' )
deployed = [ int ( x ) for x in deployed_version . split ( '.' ) ]
expected = [ int ( x ) for x in expected_version . split ( '.' ) ]
if deployed != expected :
raise ContractVersionMismatch ( f'Provided {contract_name} contract ({to_normalized_address(address)}) ' f'version mismatch. Expected: {expected_version} Got: {deployed_version}' , ) |
def get_all_prerequisites ( self ) :
"""Returns all unique ( order - only ) prerequisites for all batches
of this Executor .""" | result = SCons . Util . UniqueList ( [ ] )
for target in self . get_all_targets ( ) :
if target . prerequisites is not None :
result . extend ( target . prerequisites )
return result |
def setup ( self ) :
"""Setup the networks .
Setup only does stuff if there are no networks , this is so it only
runs once at the start of the experiment . It first calls the same
function in the super ( see experiments . py in dallinger ) . Then it adds a
source to each network .""" | if not self . networks ( ) :
super ( IteratedDrawing , self ) . setup ( )
for net in self . networks ( ) :
self . models . DrawingSource ( network = net ) |
def _vector_coef_op_right ( func ) :
"""decorator for operator overloading when VectorCoefs is on the
right""" | @ wraps ( func )
def verif ( self , vcoef ) :
if isinstance ( vcoef , numbers . Number ) :
return VectorCoefs ( func ( self , self . scoef1 . _vec , vcoef ) , func ( self , self . scoef2 . _vec , vcoef ) , self . nmax , self . mmax )
else :
raise TypeError ( err_msg [ 'no_combi_VC' ] )
return verif |
def full_s ( self ) :
"""Get the full singular value matrix of self
Returns
Matrix : Matrix""" | x = np . zeros ( ( self . shape ) , dtype = np . float32 )
x [ : self . s . shape [ 0 ] , : self . s . shape [ 0 ] ] = self . s . as_2d
s = Matrix ( x = x , row_names = self . row_names , col_names = self . col_names , isdiagonal = False , autoalign = False )
return s |
def gateway ( self ) :
"""Return the detail of the gateway .""" | url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'
response = requests . get ( url , headers = self . _headers ( ) )
response . raise_for_status ( )
return response . json ( ) |
def validate ( self , model , checks = [ ] ) :
"""Use a defined schema to validate the given table .""" | records = self . data . to_dict ( "records" )
self . evaluate_report ( validate ( records , headers = list ( records [ 0 ] ) , preset = 'table' , schema = self . schema , order_fields = True , custom_checks = checks ) ) |
def cached_query ( qs , timeout = None ) :
"""Auto cached queryset and generate results .""" | cache_key = generate_cache_key ( qs )
return get_cached ( cache_key , list , args = ( qs , ) , timeout = None ) |
async def send_script ( self , conn_id , data ) :
"""Send a a script to this IOTile device
Args :
conn _ id ( int ) : A unique identifier that will refer to this connection
data ( bytes ) : the script to send to the device""" | self . _ensure_connection ( conn_id , True )
connection_string = self . _get_property ( conn_id , "connection_string" )
msg = dict ( connection_string = connection_string , fragment_count = 1 , fragment_index = 0 , script = base64 . b64encode ( data ) )
await self . _send_command ( OPERATIONS . SEND_SCRIPT , msg , COMMANDS . SendScriptResponse ) |
def _parse_date_columns ( data_frame , parse_dates ) :
"""Force non - datetime columns to be read as such .
Supports both string formatted and integer timestamp columns .""" | parse_dates = _process_parse_dates_argument ( parse_dates )
# we want to coerce datetime64 _ tz dtypes for now to UTC
# we could in theory do a ' nice ' conversion from a FixedOffset tz
# GH11216
for col_name , df_col in data_frame . iteritems ( ) :
if is_datetime64tz_dtype ( df_col ) or col_name in parse_dates :
try :
fmt = parse_dates [ col_name ]
except TypeError :
fmt = None
data_frame [ col_name ] = _handle_date_column ( df_col , format = fmt )
return data_frame |
def define_lattice_from_file ( self , filename , cell_lengths ) :
"""Set up the simulation lattice from a file containing site data .
Uses ` init _ lattice . lattice _ from _ sites _ file ` , which defines the site file spec .
Args :
filename ( Str ) : sites file filename .
cell _ lengths ( List ( x , y , z ) ) : cell lengths for the simulation cell .
Returns :
None""" | self . lattice = init_lattice . lattice_from_sites_file ( filename , cell_lengths = cell_lengths ) |
def get_meta_value_for ( snapshot , key , default = None ) :
"""Returns the metadata value for the given key""" | metadata = get_snapshot_metadata ( snapshot )
return metadata . get ( key , default ) |
def get_attributes ( file , * , attributes = None , mime_type = None , force_document = False , voice_note = False , video_note = False , supports_streaming = False ) :
"""Get a list of attributes for the given file and
the mime type as a tuple ( [ attribute ] , mime _ type ) .""" | # Note : ` ` file . name ` ` works for : tl : ` InputFile ` and some ` IOBase ` streams
name = file if isinstance ( file , str ) else getattr ( file , 'name' , 'unnamed' )
if mime_type is None :
mime_type = mimetypes . guess_type ( name ) [ 0 ]
attr_dict = { types . DocumentAttributeFilename : types . DocumentAttributeFilename ( os . path . basename ( name ) ) }
if is_audio ( file ) :
m = _get_metadata ( file )
if m :
attr_dict [ types . DocumentAttributeAudio ] = types . DocumentAttributeAudio ( voice = voice_note , title = m . get ( 'title' ) if m . has ( 'title' ) else None , performer = m . get ( 'author' ) if m . has ( 'author' ) else None , duration = int ( m . get ( 'duration' ) . seconds if m . has ( 'duration' ) else 0 ) )
if not force_document and is_video ( file ) :
m = _get_metadata ( file )
if m :
doc = types . DocumentAttributeVideo ( round_message = video_note , w = m . get ( 'width' ) if m . has ( 'width' ) else 0 , h = m . get ( 'height' ) if m . has ( 'height' ) else 0 , duration = int ( m . get ( 'duration' ) . seconds if m . has ( 'duration' ) else 0 ) , supports_streaming = supports_streaming )
else :
doc = types . DocumentAttributeVideo ( 0 , 1 , 1 , round_message = video_note , supports_streaming = supports_streaming )
attr_dict [ types . DocumentAttributeVideo ] = doc
if voice_note :
if types . DocumentAttributeAudio in attr_dict :
attr_dict [ types . DocumentAttributeAudio ] . voice = True
else :
attr_dict [ types . DocumentAttributeAudio ] = types . DocumentAttributeAudio ( 0 , voice = True )
# Now override the attributes if any . As we have a dict of
# { cls : instance } , we can override any class with the list
# of attributes provided by the user easily .
if attributes :
for a in attributes :
attr_dict [ type ( a ) ] = a
# Ensure we have a mime type , any ; but it cannot be None
# ' The " octet - stream " subtype is used to indicate that a body
# contains arbitrary binary data . '
if not mime_type :
mime_type = 'application/octet-stream'
return list ( attr_dict . values ( ) ) , mime_type |
def purge ( name , delete_key = True ) :
'''Destroy the named VM''' | ret = { }
client = salt . client . get_local_client ( __opts__ [ 'conf_file' ] )
data = vm_info ( name , quiet = True )
if not data :
__jid_event__ . fire_event ( { 'error' : 'Failed to find VM {0} to purge' . format ( name ) } , 'progress' )
return 'fail'
host = next ( six . iterkeys ( data ) )
try :
cmd_ret = client . cmd_iter ( host , 'virt.purge' , [ name , True ] , timeout = 600 )
except SaltClientError as client_error :
return 'Virtual machine {0} could not be purged: {1}' . format ( name , client_error )
for comp in cmd_ret :
ret . update ( comp )
if delete_key :
log . debug ( 'Deleting key %s' , name )
skey = salt . key . Key ( __opts__ )
skey . delete_key ( name )
__jid_event__ . fire_event ( { 'message' : 'Purged VM {0}' . format ( name ) } , 'progress' )
return 'good' |
def attr ( self , key ) :
"""Returns the attribute string for corresponding input key from the symbol .
This function only works for non - grouped symbols .
Example
> > > data = mx . sym . Variable ( ' data ' , attr = { ' mood ' : ' angry ' } )
> > > data . attr ( ' mood ' )
' angry '
Parameters
key : str
The key corresponding to the desired attribute .
Returns
value : str
The desired attribute value , returns ` ` None ` ` if the attribute does not exist .""" | ret = ctypes . c_char_p ( )
success = ctypes . c_int ( )
check_call ( _LIB . MXSymbolGetAttr ( self . handle , c_str ( key ) , ctypes . byref ( ret ) , ctypes . byref ( success ) ) )
if success . value != 0 :
return py_str ( ret . value )
else :
return None |
def sanitize ( self ) :
'''Check if the current settings conform to the LISP specifications and
fix them where possible .''' | super ( MapReferralMessage , self ) . sanitize ( )
# WARNING : http : / / tools . ietf . org / html / draft - ietf - lisp - ddt - 00
# does not define this field so the description is taken from
# http : / / tools . ietf . org / html / draft - ietf - lisp - 24
# Nonce : An 8 - octet random value created by the sender of the Map -
# Request . This nonce will be returned in the Map - Reply . The
# security of the LISP mapping protocol depends critically on the
# strength of the nonce in the Map - Request message . The nonce
# SHOULD be generated by a properly seeded pseudo - random ( or strong
# random ) source . See [ RFC4086 ] for advice on generating security -
# sensitive random data .
if len ( bytes ( self . nonce ) ) != 8 :
raise ValueError ( 'Invalid nonce' )
# Map - Referral Records : When the M bit is set , this field is the size
# of a single " Record " in the Map - Reply format . This Map - Reply record
# contains the EID - to - RLOC mapping entry associated with the Source
# EID . This allows the ETR which will receive this Map - Request to
# cache the data if it chooses to do so .
for record in self . records :
if not isinstance ( record , MapReferralRecord ) :
raise ValueError ( 'Invalid record' )
record . sanitize ( ) |
def newick ( self ) :
'''Newick string conversion starting at this ` ` Node ` ` object
Returns :
` ` str ` ` : Newick string conversion starting at this ` ` Node ` ` object''' | node_to_str = dict ( )
for node in self . traverse_postorder ( ) :
if node . is_leaf ( ) :
if node . label is None :
node_to_str [ node ] = ''
else :
node_to_str [ node ] = str ( node . label )
else :
out = [ '(' ]
for c in node . children :
out . append ( node_to_str [ c ] )
if c . edge_length is not None :
if isinstance ( c . edge_length , int ) :
l_str = str ( c . edge_length )
elif isinstance ( c . edge_length , float ) and c . edge_length . is_integer ( ) :
l_str = str ( int ( c . edge_length ) )
else :
l_str = str ( c . edge_length )
out . append ( ':%s' % l_str )
out . append ( ',' )
del node_to_str [ c ]
out . pop ( )
# trailing comma
out . append ( ')' )
if node . label is not None :
out . append ( str ( node . label ) )
node_to_str [ node ] = '' . join ( out )
return node_to_str [ self ] |
def lookup_token ( self ) :
"""Convenience method : look up the vault token""" | url = _url_joiner ( self . _vault_url , 'v1/auth/token/lookup-self' )
resp = requests . get ( url , headers = self . _headers )
resp . raise_for_status ( )
data = resp . json ( )
if data . get ( 'errors' ) :
raise VaultException ( u'Error looking up Vault token: {}' . format ( data [ 'errors' ] ) )
return data |
def preprocess_x_y ( x , y ) :
"""Preprocess x , y input data . Returns list of list style .
* * 中文文档 * *
预处理输入的x , y数据 。""" | def is_iterable_slicable ( a ) :
if hasattr ( a , "__iter__" ) and hasattr ( a , "__getitem__" ) :
return True
else :
return False
if is_iterable_slicable ( x ) :
if is_iterable_slicable ( x [ 0 ] ) :
return x , y
else :
return ( x , ) , ( y , )
else :
raise ValueError ( "invalid input!" ) |
def get_default_location ( self ) :
"""Return the default location .""" | res = [ ]
for location in self . distdefault :
res . extend ( self . get_location ( location ) )
return res |
def check_has ( priority = BaseCheck . HIGH , gname = None ) :
"""Decorator to wrap a function to check if a dataset has given attributes .
: param function func : function to wrap""" | def _inner ( func ) :
def _dec ( s , ds ) :
attr_process = kvp_convert ( func ( s , ds ) )
ret_val = [ ]
# could potentially run tests in parallel if we eliminated side
# effects on ` ret _ val `
for kvp in attr_process . items ( ) : # function mutates ret _ val
attr_check ( kvp , ds , priority , ret_val , gname )
return ret_val
return wraps ( func ) ( _dec )
return _inner |
def get_events ( self , ** kwargs ) :
"""Retrieve events from server .""" | force = kwargs . pop ( 'force' , False )
response = api . request_sync_events ( self . blink , self . network_id , force = force )
try :
return response [ 'event' ]
except ( TypeError , KeyError ) :
_LOGGER . error ( "Could not extract events: %s" , response , exc_info = True )
return False |
def auto_scroll ( self , thumbkey ) :
"""Scroll the window to the thumb .""" | if not self . gui_up :
return
# force scroll to bottom of thumbs , if checkbox is set
scrollp = self . w . auto_scroll . get_state ( )
if not scrollp :
return
bnch = self . thumb_dict [ thumbkey ]
# override X parameter because we only want to scroll vertically
pan_x , pan_y = self . c_view . get_pan ( )
self . c_view . panset_xy ( pan_x , bnch . image . y ) |
def polygon ( self , vertexes , attr = 0 , row = None ) :
'adds lines for ( x , y ) vertexes of a polygon' | self . polylines . append ( ( vertexes + [ vertexes [ 0 ] ] , attr , row ) ) |
def pop ( self , n = 1 , raw = False , delete = True ) :
"""Pop status queue
: param int n : number of messages to return as part of peek .
: param bool raw : should message content be returned as is ( no parsing ) .
: param bool delete : should message be deleted after pop . default is True as this is expected of a q .""" | def _pop_specific_q ( _q , _n ) :
has_messages = False
for m in _q . service . get_messages ( _q . name , num_messages = _n ) :
if m is not None :
has_messages = True
result . append ( m if raw else self . _deserialize_message ( m ) )
if delete :
_q . service . delete_message ( _q . name , m . id , m . pop_receipt )
# short circut to prevent unneeded work
if len ( result ) == n :
return True
return has_messages
q_services = self . _get_q_services ( )
random . shuffle ( q_services )
per_q = int ( n / len ( q_services ) ) + 1
result = [ ]
non_empty_qs = [ ]
for q in q_services :
if _pop_specific_q ( q , per_q ) :
non_empty_qs . append ( q )
if len ( result ) == n :
return result
# in - case queues aren ' t balanced , and we didn ' t get enough messages , iterate again and this time get all that we can
for q in non_empty_qs :
_pop_specific_q ( q , n )
if len ( result ) == n :
return result
# because we ask for n / len ( qs ) + 1 , we might get more message then requests
return result |
def get_anchor_labels ( anchors , gt_boxes , crowd_boxes ) :
"""Label each anchor as fg / bg / ignore .
Args :
anchors : Ax4 float
gt _ boxes : Bx4 float , non - crowd
crowd _ boxes : Cx4 float
Returns :
anchor _ labels : ( A , ) int . Each element is { - 1 , 0 , 1}
anchor _ boxes : Ax4 . Contains the target gt _ box for each anchor when the anchor is fg .""" | # This function will modify labels and return the filtered inds
def filter_box_label ( labels , value , max_num ) :
curr_inds = np . where ( labels == value ) [ 0 ]
if len ( curr_inds ) > max_num :
disable_inds = np . random . choice ( curr_inds , size = ( len ( curr_inds ) - max_num ) , replace = False )
labels [ disable_inds ] = - 1
# ignore them
curr_inds = np . where ( labels == value ) [ 0 ]
return curr_inds
NA , NB = len ( anchors ) , len ( gt_boxes )
assert NB > 0
# empty images should have been filtered already
box_ious = np_iou ( anchors , gt_boxes )
# NA x NB
ious_argmax_per_anchor = box_ious . argmax ( axis = 1 )
# NA ,
ious_max_per_anchor = box_ious . max ( axis = 1 )
ious_max_per_gt = np . amax ( box_ious , axis = 0 , keepdims = True )
# 1xNB
# for each gt , find all those anchors ( including ties ) that has the max ious with it
anchors_with_max_iou_per_gt = np . where ( box_ious == ious_max_per_gt ) [ 0 ]
# Setting NA labels : 1 - - fg 0 - - bg - 1 - - ignore
anchor_labels = - np . ones ( ( NA , ) , dtype = 'int32' )
# NA ,
# the order of setting neg / pos labels matter
anchor_labels [ anchors_with_max_iou_per_gt ] = 1
anchor_labels [ ious_max_per_anchor >= cfg . RPN . POSITIVE_ANCHOR_THRESH ] = 1
anchor_labels [ ious_max_per_anchor < cfg . RPN . NEGATIVE_ANCHOR_THRESH ] = 0
# label all non - ignore candidate boxes which overlap crowd as ignore
if crowd_boxes . size > 0 :
cand_inds = np . where ( anchor_labels >= 0 ) [ 0 ]
cand_anchors = anchors [ cand_inds ]
ioas = np_ioa ( crowd_boxes , cand_anchors )
overlap_with_crowd = cand_inds [ ioas . max ( axis = 0 ) > cfg . RPN . CROWD_OVERLAP_THRESH ]
anchor_labels [ overlap_with_crowd ] = - 1
# Subsample fg labels : ignore some fg if fg is too many
target_num_fg = int ( cfg . RPN . BATCH_PER_IM * cfg . RPN . FG_RATIO )
fg_inds = filter_box_label ( anchor_labels , 1 , target_num_fg )
# Keep an image even if there is no foreground anchors
# if len ( fg _ inds ) = = 0:
# raise MalformedData ( " No valid foreground for RPN ! " )
# Subsample bg labels . num _ bg is not allowed to be too many
old_num_bg = np . sum ( anchor_labels == 0 )
if old_num_bg == 0 : # No valid bg in this image , skip .
raise MalformedData ( "No valid background for RPN!" )
target_num_bg = cfg . RPN . BATCH_PER_IM - len ( fg_inds )
filter_box_label ( anchor_labels , 0 , target_num_bg )
# ignore return values
# Set anchor boxes : the best gt _ box for each fg anchor
anchor_boxes = np . zeros ( ( NA , 4 ) , dtype = 'float32' )
fg_boxes = gt_boxes [ ious_argmax_per_anchor [ fg_inds ] , : ]
anchor_boxes [ fg_inds , : ] = fg_boxes
# assert len ( fg _ inds ) + np . sum ( anchor _ labels = = 0 ) = = cfg . RPN . BATCH _ PER _ IM
return anchor_labels , anchor_boxes |
def circ_rayleigh ( alpha , w = None , d = None ) :
"""Rayleigh test for non - uniformity of circular data .
Parameters
alpha : np . array
Sample of angles in radians .
w : np . array
Number of incidences in case of binned angle data .
d : float
Spacing ( in radians ) of bin centers for binned data . If supplied ,
a correction factor is used to correct for bias in the estimation
of r .
Returns
z : float
Z - statistic
pval : float
P - value
Notes
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non - uniform distribution ( Fisher 1995 ) .
H0 : the population is uniformly distributed around the circle
HA : the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that ( 1 ) the distribution has
only one mode and ( 2 ) the data is sampled from a von Mises distribution .
Examples
1 . Simple Rayleigh test for non - uniformity of circular data .
> > > from pingouin import circ _ rayleigh
> > > x = [ 0.785 , 1.570 , 3.141 , 0.839 , 5.934]
> > > z , pval = circ _ rayleigh ( x )
> > > print ( z , pval )
1.236 0.3048435876500138
2 . Specifying w and d
> > > circ _ rayleigh ( x , w = [ . 1 , . 2 , . 3 , . 4 , . 5 ] , d = 0.2)
(0.278 , 0.8069972000769801)""" | alpha = np . array ( alpha )
if w is None :
r = circ_r ( alpha )
n = len ( alpha )
else :
if len ( alpha ) is not len ( w ) :
raise ValueError ( "Input dimensions do not match" )
r = circ_r ( alpha , w , d )
n = np . sum ( w )
# Compute Rayleigh ' s statistic
R = n * r
z = ( R ** 2 ) / n
# Compute p value using approxation in Zar ( 1999 ) , p . 617
pval = np . exp ( np . sqrt ( 1 + 4 * n + 4 * ( n ** 2 - R ** 2 ) ) - ( 1 + 2 * n ) )
return np . round ( z , 3 ) , pval |
def increment_all_elements ( input_list : list ) :
"""Given a list , this function returns a new list where every element is incremented by 1.
Example usage :
> > > increment _ all _ elements ( [ 1 , 2 , 3 ] )
[2 , 3 , 4]
> > > increment _ all _ elements ( [ 5 , 3 , 5 , 2 , 3 , 3 , 9 , 0 , 123 ] )
[6 , 4 , 6 , 3 , 4 , 4 , 10 , 1 , 124]
: param input _ list : Original list whose elements to be incremented
: return : A new list with all elements incremented by 1""" | return [ element + 1 for element in input_list ] |
def get_video_info_for_course_and_profiles ( course_id , profiles ) :
"""Returns a dict of edx _ video _ ids with a dict of requested profiles .
Args :
course _ id ( str ) : id of the course
profiles ( list ) : list of profile _ names
Returns :
( dict ) : Returns all the profiles attached to a specific
edx _ video _ id
edx _ video _ id : {
' duration ' : length of the video in seconds ,
' profiles ' : {
profile _ name : {
' url ' : url of the encoding
' file _ size ' : size of the file in bytes
Example :
Given two videos with two profiles each in course _ id ' test _ course ' :
u ' edx _ video _ id _ 1 ' : {
u ' duration : 1111,
u ' profiles ' : {
u ' mobile ' : {
' url ' : u ' http : / / www . example . com / meow ' ,
' file _ size ' : 2222
u ' desktop ' : {
' url ' : u ' http : / / www . example . com / woof ' ,
' file _ size ' : 4444
u ' edx _ video _ id _ 2 ' : {
u ' duration : 2222,
u ' profiles ' : {
u ' mobile ' : {
' url ' : u ' http : / / www . example . com / roar ' ,
' file _ size ' : 6666
u ' desktop ' : {
' url ' : u ' http : / / www . example . com / bzzz ' ,
' file _ size ' : 8888""" | # In case someone passes in a key ( VAL doesn ' t really understand opaque keys )
course_id = six . text_type ( course_id )
try :
encoded_videos = EncodedVideo . objects . filter ( profile__profile_name__in = profiles , video__courses__course_id = course_id ) . select_related ( )
except Exception :
error_message = u"Could not get encoded videos for course: {0}" . format ( course_id )
logger . exception ( error_message )
raise ValInternalError ( error_message )
# DRF serializers were causing extra queries for some reason . . .
return_dict = { }
for enc_vid in encoded_videos : # Add duration to edx _ video _ id
return_dict . setdefault ( enc_vid . video . edx_video_id , { } ) . update ( { "duration" : enc_vid . video . duration , } )
# Add profile information to edx _ video _ id ' s profiles
return_dict [ enc_vid . video . edx_video_id ] . setdefault ( "profiles" , { } ) . update ( { enc_vid . profile . profile_name : { "url" : enc_vid . url , "file_size" : enc_vid . file_size , } } )
return return_dict |
def print_intervals ( intervals ) :
"""Print out the intervals .""" | res = [ ]
for i in intervals :
res . append ( repr ( i ) )
print ( "" . join ( res ) ) |
def filter_queryset ( self , request , queryset , view ) :
"""Filter permissions queryset .""" | user = request . user
app_label = queryset . model . _meta . app_label
# pylint : disable = protected - access
model_name = queryset . model . _meta . model_name
# pylint : disable = protected - access
kwargs = { }
if model_name == 'storage' :
model_name = 'data'
kwargs [ 'perms_filter' ] = 'data__pk__in'
if model_name == 'relation' :
model_name = 'collection'
kwargs [ 'perms_filter' ] = 'collection__pk__in'
permission = '{}.view_{}' . format ( app_label , model_name )
return get_objects_for_user ( user , permission , queryset , ** kwargs ) |
def _check_value_mapping ( layer , exposure_key = None ) :
"""Loop over the exposure type field and check if the value map is correct .
: param layer : The layer
: type layer : QgsVectorLayer
: param exposure _ key : The exposure key .
: type exposure _ key : str""" | index = layer . fields ( ) . lookupField ( exposure_type_field [ 'field_name' ] )
unique_exposure = layer . uniqueValues ( index )
if layer . keywords [ 'layer_purpose' ] == layer_purpose_hazard [ 'key' ] :
if not exposure_key :
message = tr ( 'Hazard value mapping missing exposure key.' )
raise InvalidKeywordsForProcessingAlgorithm ( message )
value_map = active_thresholds_value_maps ( layer . keywords , exposure_key )
else :
value_map = layer . keywords . get ( 'value_map' )
if not value_map : # The exposure do not have a value _ map , we can skip the layer .
return layer
if layer . keywords [ 'layer_purpose' ] == layer_purpose_hazard [ 'key' ] :
if not exposure_key :
message = tr ( 'Hazard classification is missing exposure key.' )
raise InvalidKeywordsForProcessingAlgorithm ( message )
classification = active_classification ( layer . keywords , exposure_key )
else :
classification = layer . keywords [ 'classification' ]
exposure_classification = definition ( classification )
other = None
if exposure_classification [ 'key' ] != data_driven_classes [ 'key' ] :
other = exposure_classification [ 'classes' ] [ - 1 ] [ 'key' ]
exposure_mapped = [ ]
for group in list ( value_map . values ( ) ) :
exposure_mapped . extend ( group )
diff = list ( unique_exposure - set ( exposure_mapped ) )
if other in list ( value_map . keys ( ) ) :
value_map [ other ] . extend ( diff )
else :
value_map [ other ] = diff
layer . keywords [ 'value_map' ] = value_map
layer . keywords [ 'classification' ] = classification
return layer |
def create_config_file ( filename ) :
"""Create main configuration file if it doesn ' t exist .""" | import textwrap
from six . moves . urllib import parse
if not os . path . exists ( filename ) :
old_default_config_file = os . path . join ( os . path . dirname ( filename ) , '.tksrc' )
if os . path . exists ( old_default_config_file ) :
upgrade = click . confirm ( "\n" . join ( textwrap . wrap ( "It looks like you recently updated Taxi. Some " "configuration changes are required. You can either let " "me upgrade your configuration file or do it " "manually." ) ) + "\n\nProceed with automatic configuration " "file upgrade?" , default = True )
if upgrade :
settings = Settings ( old_default_config_file )
settings . convert_to_4 ( )
with open ( filename , 'w' ) as config_file :
settings . config . write ( config_file )
os . remove ( old_default_config_file )
return
else :
print ( "Ok then." )
sys . exit ( 0 )
welcome_msg = "Welcome to Taxi!"
click . secho ( welcome_msg , fg = 'green' , bold = True )
click . secho ( '=' * len ( welcome_msg ) + '\n' , fg = 'green' , bold = True )
click . echo ( click . wrap_text ( "It looks like this is the first time you run Taxi. You will need " "a configuration file ({}) in order to proceed. Please answer a " "few questions to create your configuration file." . format ( filename ) ) + '\n' )
config = pkg_resources . resource_string ( 'taxi' , 'etc/taxirc.sample' ) . decode ( 'utf-8' )
context = { }
available_backends = plugins_registry . get_available_backends ( )
context [ 'backend' ] = click . prompt ( "Backend you want to use (choices are %s)" % ', ' . join ( available_backends ) , type = click . Choice ( available_backends ) )
context [ 'username' ] = click . prompt ( "Username or token" )
context [ 'password' ] = parse . quote ( click . prompt ( "Password (leave empty if you're using" " a token)" , hide_input = True , default = '' ) , safe = '' )
# Password can be empty in case of token auth so the ' : ' separator
# is not included in the template config , so we add it if the user
# has set a password
if context [ 'password' ] :
context [ 'password' ] = ':' + context [ 'password' ]
context [ 'hostname' ] = click . prompt ( "Hostname of the backend (eg. timesheets.example.com)" , type = Hostname ( ) )
editor = Editor ( ) . get_editor ( )
context [ 'editor' ] = click . prompt ( "Editor command to edit your timesheets" , default = editor )
templated_config = config . format ( ** context )
directory = os . path . dirname ( filename )
if not os . path . exists ( directory ) :
os . makedirs ( directory )
with open ( filename , 'w' ) as f :
f . write ( templated_config )
else :
settings = Settings ( filename )
conversions = settings . needed_conversions
if conversions :
for conversion in conversions :
conversion ( )
settings . write_config ( ) |
def _get_numbers_from_tokens ( tokens : List [ Token ] ) -> List [ Tuple [ str , str ] ] :
"""Finds numbers in the input tokens and returns them as strings . We do some simple heuristic
number recognition , finding ordinals and cardinals expressed as text ( " one " , " first " ,
etc . ) , as well as numerals ( " 7th " , " 3rd " ) , months ( mapping " july " to 7 ) , and units
( " 1ghz " ) .
We also handle year ranges expressed as decade or centuries ( " 1800s " or " 1950s " ) , adding
the endpoints of the range as possible numbers to generate .
We return a list of tuples , where each tuple is the ( number _ string , token _ text ) for a
number found in the input tokens .""" | numbers = [ ]
for i , token in enumerate ( tokens ) :
number : Union [ int , float ] = None
token_text = token . text
text = token . text . replace ( ',' , '' ) . lower ( )
if text in NUMBER_WORDS :
number = NUMBER_WORDS [ text ]
magnitude = 1
if i < len ( tokens ) - 1 :
next_token = tokens [ i + 1 ] . text . lower ( )
if next_token in ORDER_OF_MAGNITUDE_WORDS :
magnitude = ORDER_OF_MAGNITUDE_WORDS [ next_token ]
token_text += ' ' + tokens [ i + 1 ] . text
is_range = False
if len ( text ) > 1 and text [ - 1 ] == 's' and text [ - 2 ] == '0' :
is_range = True
text = text [ : - 1 ]
# We strip out any non - digit characters , to capture things like ' 7th ' , or ' 1ghz ' . The
# way we ' re doing this could lead to false positives for something like ' 1e2 ' , but
# we ' ll take that risk . It shouldn ' t be a big deal .
text = '' . join ( text [ i ] for i , char in enumerate ( text ) if char in NUMBER_CHARACTERS )
try : # We ' ll use a check for float ( text ) to find numbers , because text . isdigit ( ) doesn ' t
# catch things like " - 3 " or " 0.07 " .
number = float ( text )
except ValueError :
pass
if number is not None :
number = number * magnitude
if '.' in text :
number_string = '%.3f' % number
else :
number_string = '%d' % number
numbers . append ( ( number_string , token_text ) )
if is_range : # TODO ( mattg ) : both numbers in the range will have the same text , and so the
# linking score won ' t have any way to differentiate them . . . We should figure
# out a better way to handle this .
num_zeros = 1
while text [ - ( num_zeros + 1 ) ] == '0' :
num_zeros += 1
numbers . append ( ( str ( int ( number + 10 ** num_zeros ) ) , token_text ) )
return numbers |
def measure_float_put ( self , measure , value ) :
"""associates the measure of type Float with the given value""" | if value < 0 : # Should be an error in a later release .
logger . warning ( "Cannot record negative values" )
self . _measurement_map [ measure ] = value |
def uncomment_line ( line , prefix ) :
"""Remove prefix ( and space ) from line""" | if not prefix :
return line
if line . startswith ( prefix + ' ' ) :
return line [ len ( prefix ) + 1 : ]
if line . startswith ( prefix ) :
return line [ len ( prefix ) : ]
return line |
def _format_api_docs_link_message ( self , task_class ) :
"""Format a message referring the reader to the full API docs .
Parameters
task _ class : ` ` lsst . pipe . base . Task ` ` - type
The Task class .
Returns
nodes : ` list ` of docutils nodes
Docutils nodes showing a link to the full API docs .""" | fullname = '{0.__module__}.{0.__name__}' . format ( task_class )
p_node = nodes . paragraph ( )
_ = 'See the '
p_node += nodes . Text ( _ , _ )
xref = PyXRefRole ( )
xref_nodes , _ = xref ( 'py:class' , '~' + fullname , '~' + fullname , self . lineno , self . state . inliner )
p_node += xref_nodes
_ = ' API reference for complete details.'
p_node += nodes . Text ( _ , _ )
seealso_node = seealso ( )
seealso_node += p_node
return [ seealso_node ] |
def from_lt ( rsize , ltm , ltv ) :
"""Compute the corner location and pixel size in units
of unbinned pixels .
. . note : : Translated from ` ` calacs / lib / fromlt . c ` ` .
Parameters
rsize : int
Reference pixel size . Usually 1.
ltm , ltv : tuple of float
See : func : ` get _ lt ` .
Returns
bin : tuple of int
Pixel size in X and Y .
corner : tuple of int
Corner of subarray in X and Y .""" | dbinx = rsize / ltm [ 0 ]
dbiny = rsize / ltm [ 1 ]
dxcorner = ( dbinx - rsize ) - dbinx * ltv [ 0 ]
dycorner = ( dbiny - rsize ) - dbiny * ltv [ 1 ]
# Round off to the nearest integer .
bin = ( _nint ( dbinx ) , _nint ( dbiny ) )
corner = ( _nint ( dxcorner ) , _nint ( dycorner ) )
return bin , corner |
def init_cursor ( self ) :
"""Position the cursor appropriately .
The cursor is set to either the beginning of the oplog , or
wherever it was last left off .
Returns the cursor and True if the cursor is empty .""" | timestamp = self . read_last_checkpoint ( )
if timestamp is None or self . only_dump :
if self . collection_dump : # dump collection and update checkpoint
timestamp = self . dump_collection ( )
if self . only_dump :
LOG . info ( "Finished dump. Exiting." )
timestamp = None
self . running = False
self . update_checkpoint ( timestamp )
if timestamp is None :
return None , True
else : # Collection dump disabled :
# Return cursor to beginning of oplog but do not set the
# checkpoint . The checkpoint will be set after an operation
# has been applied .
cursor = self . get_oplog_cursor ( )
return cursor , self . _cursor_empty ( cursor )
cursor = self . get_oplog_cursor ( timestamp )
cursor_empty = self . _cursor_empty ( cursor )
if cursor_empty : # rollback , update checkpoint , and retry
LOG . debug ( "OplogThread: Initiating rollback from " "get_oplog_cursor" )
self . update_checkpoint ( self . rollback ( ) )
return self . init_cursor ( )
first_oplog_entry = next ( cursor )
oldest_ts_long = util . bson_ts_to_long ( self . get_oldest_oplog_timestamp ( ) )
checkpoint_ts_long = util . bson_ts_to_long ( timestamp )
if checkpoint_ts_long < oldest_ts_long : # We ' ve fallen behind , the checkpoint has fallen off the oplog
return None , True
cursor_ts_long = util . bson_ts_to_long ( first_oplog_entry [ "ts" ] )
if cursor_ts_long > checkpoint_ts_long : # The checkpoint is not present in this oplog and the oplog
# did not rollover . This means that we connected to a new
# primary which did not replicate the checkpoint and which has
# new changes in its oplog for us to process .
# rollback , update checkpoint , and retry
LOG . debug ( "OplogThread: Initiating rollback from " "get_oplog_cursor: new oplog entries found but " "checkpoint is not present" )
self . update_checkpoint ( self . rollback ( ) )
return self . init_cursor ( )
# first entry has been consumed
return cursor , cursor_empty |
def path_from_structure ( cls , ndivsm , structure ) :
"""See _ path for the meaning of the variables""" | return cls . _path ( ndivsm , structure = structure , comment = "K-path generated automatically from structure" ) |
def formatted_str_to_val ( data , format , enum_set = None ) :
"""Return an unsigned integer representation of the data given format specified .
: param data : a string holding the value to convert
: param format : a string holding a format which will be used to convert the data string
: param enum _ set : an iterable of enums which are used as part of the converstion process
Given a string ( not a wirevector ! ) covert that to an unsigned integer ready for input
to the simulation enviornment . This helps deal with signed / unsigned numbers ( simulation
assumes the values have been converted via two ' s complement already ) , but it also takes
hex , binary , and enum types as inputs . It is easiest to see how it works with some
examples . : :
formatted _ str _ to _ val ( ' 2 ' , ' s3 ' ) = = 2 # 0b010
formatted _ str _ to _ val ( ' - 1 ' , ' s3 ' ) = = 7 # 0b111
formatted _ str _ to _ val ( ' 101 ' , ' b3 ' ) = = 5
formatted _ str _ to _ val ( ' 5 ' , ' u3 ' ) = = 5
formatted _ str _ to _ val ( ' - 3 ' , ' s3 ' ) = = 5
formatted _ str _ to _ val ( ' a ' , ' x3 ' ) = = 10
class Ctl ( Enum ) :
ADD = 5
SUB = 12
formatted _ str _ to _ val ( ' ADD ' , ' e3 / Ctl ' , [ Ctl ] ) = = 5
formatted _ str _ to _ val ( ' SUB ' , ' e3 / Ctl ' , [ Ctl ] ) = = 12""" | type = format [ 0 ]
bitwidth = int ( format [ 1 : ] . split ( '/' ) [ 0 ] )
bitmask = ( 1 << bitwidth ) - 1
if type == 's' :
rval = int ( data ) & bitmask
elif type == 'x' :
rval = int ( data , 16 )
elif type == 'b' :
rval = int ( data , 2 )
elif type == 'u' :
rval = int ( data )
if rval < 0 :
raise PyrtlError ( 'unsigned format requested, but negative value provided' )
elif type == 'e' :
enumname = format . split ( '/' ) [ 1 ]
enum_inst_list = [ e for e in enum_set if e . __name__ == enumname ]
if len ( enum_inst_list ) == 0 :
raise PyrtlError ( 'enum "{}" not found in passed enum_set "{}"' . format ( enumname , enum_set ) )
rval = getattr ( enum_inst_list [ 0 ] , data ) . value
else :
raise PyrtlError ( 'unknown format type {}' . format ( format ) )
return rval |
def put_annotation ( self , key , value ) :
"""Annotate segment or subsegment with a key - value pair .
Annotations will be indexed for later search query .
: param str key : annotation key
: param object value : annotation value . Any type other than
string / number / bool will be dropped""" | self . _check_ended ( )
if not isinstance ( key , string_types ) :
log . warning ( "ignoring non string type annotation key with type %s." , type ( key ) )
return
if not isinstance ( value , annotation_value_types ) :
log . warning ( "ignoring unsupported annotation value type %s." , type ( value ) )
return
if any ( character not in _valid_annotation_key_characters for character in key ) :
log . warning ( "ignoring annnotation with unsupported characters in key: '%s'." , key )
return
self . annotations [ key ] = value |
def p_default_clause ( self , p ) :
"""default _ clause : DEFAULT COLON source _ elements""" | p [ 0 ] = self . asttypes . Default ( elements = p [ 3 ] )
p [ 0 ] . setpos ( p ) |
def initialize_snapshot ( self ) :
"""Copy the DAG and validate""" | logger . debug ( 'Initializing DAG snapshot for job {0}' . format ( self . name ) )
if self . snapshot is not None :
logging . warn ( "Attempting to initialize DAG snapshot without " + "first destroying old snapshot." )
snapshot_to_validate = deepcopy ( self . graph )
is_valid , reason = self . validate ( snapshot_to_validate )
if not is_valid :
raise DagobahError ( reason )
self . snapshot = snapshot_to_validate |
def set_topic_attributes ( self , topic , attr_name , attr_value ) :
"""Get attributes of a Topic
: type topic : string
: param topic : The ARN of the topic .
: type attr _ name : string
: param attr _ name : The name of the attribute you want to set .
Only a subset of the topic ' s attributes are mutable .
Valid values : Policy | DisplayName
: type attr _ value : string
: param attr _ value : The new value for the attribute .""" | params = { 'ContentType' : 'JSON' , 'TopicArn' : topic , 'AttributeName' : attr_name , 'AttributeValue' : attr_value }
response = self . make_request ( 'SetTopicAttributes' , params , '/' , 'GET' )
body = response . read ( )
if response . status == 200 :
return json . loads ( body )
else :
boto . log . error ( '%s %s' % ( response . status , response . reason ) )
boto . log . error ( '%s' % body )
raise self . ResponseError ( response . status , response . reason , body ) |
def inSignJoy ( self ) :
"""Returns if the object is in its sign of joy .""" | return props . object . signJoy [ self . obj . id ] == self . obj . sign |
def get_original ( mod_name , item_name ) :
"""Retrieve the original object from a module .
If the object has not been patched , then that object will still be retrieved .
: param item _ name : A string or sequence of strings naming the attribute ( s ) on the module
` ` mod _ name ` ` to return .
: return : The original value if a string was given for ` ` item _ name ` ` or a sequence
of original values if a sequence was passed .""" | if isinstance ( item_name , string_types ) :
return _get_original ( mod_name , [ item_name ] ) [ 0 ]
else :
return _get_original ( mod_name , item_name ) |
def async_aldb_loaded_callback ( self ) :
"""Unlock the ALDB load lock when loading is complete .""" | if self . aldb_load_lock . locked ( ) :
self . aldb_load_lock . release ( )
_LOGGING . info ( 'ALDB Loaded' ) |
def biopax_process_pc_pathsbetween ( ) :
"""Process PathwayCommons paths between genes , return INDRA Statements .""" | if request . method == 'OPTIONS' :
return { }
response = request . body . read ( ) . decode ( 'utf-8' )
body = json . loads ( response )
genes = body . get ( 'genes' )
bp = biopax . process_pc_pathsbetween ( genes )
return _stmts_from_proc ( bp ) |
def update_image_member ( self , img_id , status ) :
"""Updates the image whose ID is given with the status specified . This
must be called by the user whose project _ id is in the members for the
image . If called by the owner of the image , an InvalidImageMember
exception will be raised .
Valid values for ' status ' include :
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised .""" | if status not in ( "pending" , "accepted" , "rejected" ) :
raise exc . InvalidImageMemberStatus ( "The status value must be one " "of 'accepted', 'rejected', or 'pending'. Received: '%s'" % status )
api = self . api
project_id = api . identity . tenant_id
uri = "/%s/%s/members/%s" % ( self . uri_base , img_id , project_id )
body = { "status" : status }
try :
resp , resp_body = self . api . method_put ( uri , body = body )
except exc . NotFound as e :
raise exc . InvalidImageMember ( "The update member request could not " "be completed. No member request for that image was found." ) |
def seek ( self , offset ) :
"""shifts on a given number of record in the original file
: param offset : number of record""" | if self . _shifts :
if 0 <= offset < len ( self . _shifts ) :
current_pos = self . _file . tell ( )
new_pos = self . _shifts [ offset ]
if current_pos != new_pos :
if current_pos == self . _shifts [ - 1 ] : # reached the end of the file
self . _data = self . __reader ( )
self . __file = iter ( self . _file . readline , '' )
self . _file . seek ( 0 )
next ( self . _data )
if offset : # move not to the beginning of the file
self . _file . seek ( new_pos )
else :
if not self . __already_seeked :
if self . _shifts [ 0 ] < current_pos : # in the middle of the file
self . _data . send ( True )
self . __already_seeked = True
self . _file . seek ( new_pos )
else :
raise IndexError ( 'invalid offset' )
else :
raise self . _implement_error |
def ipa_substrings ( unicode_string , single_char_parsing = False ) :
"""Return a list of ( non - empty ) substrings of the given string ,
where each substring is either :
1 . the longest Unicode string starting at the current index
representing a ( known ) valid IPA character , or
2 . a single Unicode character ( which is not IPA valid ) .
If ` ` single _ char _ parsing ` ` is ` ` False ` ` ,
parse the string one Unicode character at a time ,
that is , do not perform the greedy parsing .
For example , if ` ` s = u " \u006e \u0361 \u006d " ` ` ,
with ` ` single _ char _ parsing = True ` ` the result will be
a list with a single element : ` ` [ u " \u006e \u0361 \u006d " ] ` ` ,
while ` ` single _ char _ parsing = False ` ` will yield a list with three elements :
` ` [ u " \u006e " , u " \u0361 " , u " \u006d " ] ` ` .
Return ` ` None ` ` if ` ` unicode _ string ` ` is ` ` None ` ` .
: param str unicode _ string : the Unicode string to be parsed
: param bool single _ char _ parsing : if ` ` True ` ` , parse one Unicode character at a time
: rtype : list of str""" | return split_using_dictionary ( string = unicode_string , dictionary = UNICODE_TO_IPA , max_key_length = UNICODE_TO_IPA_MAX_KEY_LENGTH , single_char_parsing = single_char_parsing ) |
def yield_pair_gradients ( self , index1 , index2 ) :
"""Yields pairs ( ( s ' ( r _ ij ) , grad _ i v ( bar { r } _ ij ) )""" | d_2 = 1 / self . distances [ index1 , index2 ] ** 2
if self . charges is not None :
c1 = self . charges [ index1 ]
c2 = self . charges [ index2 ]
yield - c1 * c2 * d_2 , np . zeros ( 3 )
if self . dipoles is not None :
d_4 = d_2 ** 2
d_6 = d_2 ** 3
delta = self . deltas [ index1 , index2 ]
p1 = self . dipoles [ index1 ]
p2 = self . dipoles [ index2 ]
yield - 3 * d_4 * np . dot ( p1 , p2 ) , np . zeros ( 3 )
yield 15 * d_6 , p1 * np . dot ( p2 , delta ) + p2 * np . dot ( p1 , delta )
if self . charges is not None :
yield - 3 * c1 * d_4 , p2
yield - 3 * c2 * d_4 , - p1 |
def write_row ( dictionary , card , log ) :
"""Processes a single row from the file .""" | rowhdr = { 'card' : card , 'log' : log }
# Do this as a list of 1 - char strings .
# Can ' t use a string b / c strings are immutable .
row = [ ' ' ] * 80
# Make the row header .
for e in [ 'log' , 'card' ] :
strt , stop , item = _put_field ( cols ( 0 ) , e , rowhdr [ e ] )
if item is not None :
row [ strt : stop ] = list ( item )
# Now make the rest of the row .
for field in cols ( card ) :
strt , stop , item = _put_field ( cols ( card ) , field , dictionary . get ( field ) )
if item is not None :
row [ strt : stop ] = list ( item )
return '' . join ( row ) + '\n' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.