signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def instruction_DEC_memory ( self , opcode , ea , m ) :
"""Decrement memory location""" | r = self . DEC ( m )
# log . debug ( " $ % x DEC memory value $ % x - 1 = $ % x and write it to $ % x \ t | % s " % (
# self . program _ counter ,
# m , r , ea ,
# self . cfg . mem _ info . get _ shortest ( ea )
return ea , r & 0xff |
def _f90repr ( self , value ) :
"""Convert primitive Python types to equivalent Fortran strings .""" | if isinstance ( value , bool ) :
return self . _f90bool ( value )
elif isinstance ( value , numbers . Integral ) :
return self . _f90int ( value )
elif isinstance ( value , numbers . Real ) :
return self . _f90float ( value )
elif isinstance ( value , numbers . Complex ) :
return self . _f90complex ( value )
elif isinstance ( value , basestring ) :
return self . _f90str ( value )
elif value is None :
return ''
else :
raise ValueError ( 'Type {0} of {1} cannot be converted to a Fortran' ' type.' . format ( type ( value ) , value ) ) |
def run_ideal_classifier ( args = { } ) :
"""Create and train classifier using given parameters .""" | numObjects = args . get ( "numObjects" , 10 )
numLocations = args . get ( "numLocations" , 10 )
numFeatures = args . get ( "numFeatures" , 10 )
numPoints = args . get ( "numPoints" , 10 )
trialNum = args . get ( "trialNum" , 42 )
useLocation = args . get ( "useLocation" , 1 )
numColumns = args . get ( "numColumns" , 1 )
objects = createObjectMachine ( machineType = "simple" , numInputBits = 20 , sensorInputSize = 150 , externalInputSize = 2400 , numCorticalColumns = numColumns , numFeatures = numFeatures , numLocations = numLocations , seed = trialNum )
random . seed ( trialNum )
objects . createRandomObjects ( numObjects , numPoints = numPoints , numLocations = numLocations , numFeatures = numFeatures )
objectSDRs = objects . provideObjectsToLearn ( )
objectNames = objectSDRs . keys ( )
featureWidth = objects . sensorInputSize
locationWidth = objects . externalInputSize
# compute the number of sensations across all objects
numInputVectors = numPoints * numObjects
if useLocation :
inputWidth = featureWidth + locationWidth
else :
inputWidth = featureWidth
# create " training " dataset
data = np . zeros ( ( numInputVectors , inputWidth ) )
label = np . zeros ( ( numInputVectors , numObjects ) )
k = 0
for i in range ( numObjects ) :
numSensations = len ( objectSDRs [ objectNames [ i ] ] )
for j in range ( numSensations ) :
activeBitsFeature = np . array ( list ( objectSDRs [ objectNames [ i ] ] [ j ] [ 0 ] [ 1 ] ) )
data [ k , activeBitsFeature ] = 1
if useLocation :
activeBitsLocation = np . array ( list ( objectSDRs [ objectNames [ i ] ] [ j ] [ 0 ] [ 0 ] ) )
data [ k , featureWidth + activeBitsLocation ] = 1
label [ k , i ] = 1
k += 1
# enumerate number of distinct " words " .
# Note : this could be done much more easily if we simply use the
# location / feature pairs that are stored in the object machine .
wordList = np . zeros ( ( 0 , inputWidth ) , dtype = 'int32' )
featureList = np . zeros ( ( numInputVectors , ) )
for i in range ( numInputVectors ) :
index = findWordInVocabulary ( data [ i , : ] , wordList )
if index is not None :
featureList [ i ] = index
else :
newWord = np . zeros ( ( 1 , inputWidth ) , dtype = 'int32' )
newWord [ 0 , : ] = data [ i , : ]
wordList = np . concatenate ( ( wordList , newWord ) )
featureList [ i ] = wordList . shape [ 0 ] - 1
numWords = wordList . shape [ 0 ]
# convert objects to vectorized word representations
storedObjectRepresentations = np . zeros ( ( numObjects , numWords ) , dtype = np . int32 )
k = 0
for i in range ( numObjects ) :
numSensations = len ( objectSDRs [ objectNames [ i ] ] )
for j in range ( numSensations ) :
index = findWordInVocabulary ( data [ k , : ] , wordList )
storedObjectRepresentations [ i , index ] += 1
k += 1
# Cool plot of feature vectors
# plt . figure ( )
# plt . imshow ( np . transpose ( storedObjectRepresentations ) )
# plt . xlabel ( ' Object # ' )
# plt . ylabel ( ' Word # ' )
# plt . title ( " Object representations " )
# Create random order of sensations for each object
objectSensations = [ ]
for i in range ( numObjects ) :
senseList = [ ]
wordIndices = np . where ( storedObjectRepresentations [ i , : ] ) [ 0 ]
# An object can have multiple instances of a word , in which case we
# add all of them
for w in wordIndices :
senseList . extend ( storedObjectRepresentations [ i , w ] * [ w ] )
random . shuffle ( senseList )
objectSensations . append ( senseList )
# plot accuracy as a function of number of sensations
accuracyList = [ ]
classificationOutcome = np . zeros ( ( numObjects , numPoints + 1 ) )
for sensationNumber in range ( 1 , numPoints + 1 ) :
bowVectorsTest = np . zeros ( ( numObjects , numWords ) , dtype = np . int32 )
for objectId in range ( numObjects ) : # No . sensations for object objectId
sensations = objectSensations [ objectId ]
numPointsToInclude = computeUniquePointsSensed ( numColumns , len ( sensations ) , sensationNumber )
for j in range ( numPointsToInclude ) :
index = sensations [ j ]
bowVectorsTest [ objectId , index ] += 1
# Count the number of correct classifications .
# A correct classification is where object i is unambiguously recognized .
numCorrect = 0
for i in range ( numObjects ) :
overlaps = classifierPredict ( bowVectorsTest [ i , : ] , storedObjectRepresentations )
bestOverlap = max ( overlaps )
outcome = ( ( overlaps [ i ] == bestOverlap ) and len ( np . where ( overlaps == bestOverlap ) [ 0 ] ) == 1 )
numCorrect += outcome
classificationOutcome [ i , sensationNumber ] = outcome
accuracy = float ( numCorrect ) / numObjects
accuracyList . append ( accuracy )
convergencePoint = np . zeros ( ( numObjects , ) )
for i in range ( numObjects ) :
if np . max ( classificationOutcome [ i , : ] ) > 0 :
convergencePoint [ i ] = locateConvergencePoint ( classificationOutcome [ i , : ] )
else :
convergencePoint [ i ] = 11
args . update ( { "accuracy" : accuracyList } )
args . update ( { "numTouches" : range ( 1 , 11 ) } )
args . update ( { "convergencePoint" : np . mean ( convergencePoint ) } )
args . update ( { "classificationOutcome" : classificationOutcome } )
print "objects={}, features={}, locations={}, distinct words={}, numColumns={}" . format ( numObjects , numFeatures , numLocations , numWords , numColumns ) ,
print "==> convergencePoint:" , args [ "convergencePoint" ]
return args |
async def emit ( self , record : LogRecord ) : # type : ignore
"""Actually log the specified logging record to the stream .""" | if self . writer is None :
self . writer = await self . _init_writer ( )
try :
msg = self . format ( record ) + self . terminator
self . writer . write ( msg . encode ( ) )
await self . writer . drain ( )
except Exception :
await self . handleError ( record ) |
def MODE ( self , setmode ) :
"""Set mode .""" | if setmode == self . BOOST_MODE :
self . actionNodeData ( 'BOOST_MODE' , True )
elif setmode in [ self . AUTO_MODE , self . MANU_MODE ] :
if self . getAttributeData ( "BOOST_MODE" ) :
self . actionNodeData ( 'BOOST_MODE' , False )
self . actionNodeData ( 'CONTROL_MODE' , setmode ) |
def convert_metadata ( self ) :
"""Method invoked when OK button is clicked .""" | # Converter parameter
converter_parameter = { }
# Target exposure
if self . target_exposure_combo_box . isEnabled ( ) :
exposure_index = self . target_exposure_combo_box . currentIndex ( )
exposure_key = self . target_exposure_combo_box . itemData ( exposure_index , Qt . UserRole )
converter_parameter [ 'exposure' ] = exposure_key
# Metadata
current_metadata = self . keyword_io . read_keywords ( self . layer )
old_metadata = convert_metadata ( current_metadata , ** converter_parameter )
# Input
input_layer_path = self . layer . source ( )
input_directory_path = os . path . dirname ( input_layer_path )
input_file_name = os . path . basename ( input_layer_path )
input_base_name = os . path . splitext ( input_file_name ) [ 0 ]
input_extension = os . path . splitext ( input_file_name ) [ 1 ]
# Output
output_path = self . output_path_line_edit . text ( )
output_directory_path = os . path . dirname ( output_path )
output_file_name = os . path . basename ( output_path )
output_base_name = os . path . splitext ( output_file_name ) [ 0 ]
# Copy all related files , if exists
extensions = [ # Vector layer
'.shp' , '.geojson' , '.qml' , '.shx' , '.dbf' , '.prj' , 'qpj' , # Raster layer
'.tif' , '.tiff' , '.asc' , ]
for extension in extensions :
source_path = os . path . join ( input_directory_path , input_base_name + extension )
if not os . path . exists ( source_path ) :
continue
target_path = os . path . join ( output_directory_path , output_base_name + extension )
QFile . copy ( source_path , target_path )
# Replace the metadata with the old one
output_file_path = os . path . join ( output_directory_path , output_base_name + input_extension )
write_iso19115_metadata ( output_file_path , old_metadata , version_35 = True ) |
def child_task ( self ) :
'''child process - this holds all the GUI elements''' | self . parent_pipe_send . close ( )
self . parent_pipe_recv . close ( )
import wx_processguard
from wx_loader import wx
from wxconsole_ui import ConsoleFrame
app = wx . App ( False )
app . frame = ConsoleFrame ( state = self , title = self . title )
app . frame . Show ( )
app . MainLoop ( ) |
def process_row ( cls , data , column_map ) :
"""Process the row data from Rekall""" | row = { }
for key , value in data . iteritems ( ) :
if not value :
value = '-'
elif isinstance ( value , list ) :
value = value [ 1 ]
elif isinstance ( value , dict ) :
if 'type_name' in value :
if 'UnixTimeStamp' in value [ 'type_name' ] :
value = datetime . datetime . utcfromtimestamp ( value [ 'epoch' ] )
if value == datetime . datetime ( 1970 , 1 , 1 , 0 , 0 ) :
value = '-'
# Assume the value is somehow well formed when we get here
row [ column_map [ key ] ] = value
return row |
def deleteSettings ( self , groupName = None ) :
"""Deletes registry items from the persistent store .""" | groupName = groupName if groupName else self . settingsGroupName
settings = QtCore . QSettings ( )
logger . info ( "Deleting {} from: {}" . format ( groupName , settings . fileName ( ) ) )
removeSettingsGroup ( groupName ) |
def get_input_score_start_range_metadata ( self ) :
"""Gets the metadata for the input score start range .
return : ( osid . Metadata ) - metadata for the input score start
range
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'input_score_start_range' ] )
metadata . update ( { 'existing_decimal_values' : self . _my_map [ 'inputScoreStartRange' ] } )
return Metadata ( ** metadata ) |
def view_slow_sources ( token , dstore , maxrows = 20 ) :
"""Returns the slowest sources""" | info = dstore [ 'source_info' ] . value
info . sort ( order = 'calc_time' )
return rst_table ( info [ : : - 1 ] [ : maxrows ] ) |
def click ( self , x , y ) :
"""Simulate click within window screen .
Args :
x , y : int , pixel distance from window ( left , top ) as origin
Returns :
None""" | print 'click at' , x , y
self . _input_left_mouse ( x , y ) |
def _flatten_ancestors ( self , include_part_of = True ) :
"""Determines and stores all ancestors of each GO term .
Parameters
include _ part _ of : bool , optional
Whether to include ` ` part _ of ` ` relations in determining
ancestors .
Returns
None""" | def get_all_ancestors ( term ) :
ancestors = set ( )
for id_ in term . is_a :
ancestors . add ( id_ )
ancestors . update ( get_all_ancestors ( self . terms [ id_ ] ) )
if include_part_of :
for id_ in term . part_of :
ancestors . add ( id_ )
ancestors . update ( get_all_ancestors ( self . terms [ id_ ] ) )
return ancestors
for term in self . terms . values ( ) :
term . ancestors = get_all_ancestors ( term ) |
def cleanup_logger ( self ) :
"""Clean up logger to close out file handles .
After this is called , writing to self . log will get logs ending up
getting discarded .""" | self . log_handler . close ( )
self . log . removeHandler ( self . log_handler ) |
def get_terms ( self , kwargs ) :
"""Checks URL parameters for slug and / or version to pull the right TermsAndConditions object""" | slug = kwargs . get ( "slug" )
version = kwargs . get ( "version" )
if slug and version :
terms = [ TermsAndConditions . objects . filter ( slug = slug , version_number = version ) . latest ( 'date_active' ) ]
elif slug :
terms = [ TermsAndConditions . get_active ( slug ) ]
else : # Return a list of not agreed to terms for the current user for the list view
terms = TermsAndConditions . get_active_terms_not_agreed_to ( self . request . user )
return terms |
def get_dataset_key ( self , key , ** kwargs ) :
"""Get the fully qualified ` DatasetID ` matching ` key ` .
See ` satpy . readers . get _ key ` for more information about kwargs .""" | return get_key ( key , self . ids . keys ( ) , ** kwargs ) |
def compute_freq_cross ( self , csd , asd , output = 'coherence' ) :
"""Compute cross - spectrum , gain , phase shift and / or coherence .
Parameters
csd : list of dict with ' data ' key as instance of ChanFreq
cross - spectral density , one channel
asd : list of dict with ' data ' key as instance of ChanFreq
autospectral density , two channels
output : str
' coherence ' or ' gainphase '
Returns
tuple of list of dict with ' data ' key as instance of ChanFreq
if coherence , tuple contains one dict
if gainphase , tuple contains : xgain , ygain , phase
where xgain is gain with x as input and y as output""" | if output == 'coherence' :
coh_list = [ ]
for i in range ( len ( csd ) ) :
dat = ChanFreq ( )
dat . data = empty ( 1 , dtype = 'O' )
dat . data [ 0 ] = empty ( ( 1 , csd [ i ] [ 'data' ] . number_of ( 'freq' ) [ 0 ] ) , dtype = 'f' )
dat . axis [ 'freq' ] = empty ( 1 , dtype = 'O' )
dat . axis [ 'freq' ] [ 0 ] = csd [ i ] [ 'data' ] . axis [ 'freq' ] [ 0 ]
dat . axis [ 'chan' ] = csd [ i ] [ 'data' ] . axis [ 'chan' ]
newdict = dict ( csd [ i ] )
newdict [ 'data' ] = dat
Pxy = csd [ i ] [ 'data' ] . data [ 0 ] [ 0 ]
Pxx = asd [ i ] [ 'data' ] . data [ 0 ] [ 0 ]
Pyy = asd [ i ] [ 'data' ] . data [ 0 ] [ 1 ]
Cxy = abs ( Pxy ) ** 2 / Pxx / Pyy
# ms coherence
dat . data [ 0 ] [ 0 , : ] = Cxy
coh_list . append ( newdict )
out = ( coh_list , )
elif output == 'gainphase' :
xg_list = [ ]
yg_list = [ ]
ph_list = [ ]
for i in range ( len ( csd ) ) :
xgain = ChanFreq ( )
xgain . data = empty ( 1 , dtype = 'O' )
xgain . data [ 0 ] = empty ( ( 1 , csd [ i ] [ 'data' ] . number_of ( 'freq' ) [ 0 ] ) , dtype = 'f' )
xgain . axis [ 'freq' ] = empty ( 1 , dtype = 'O' )
xgain . axis [ 'freq' ] [ 0 ] = csd [ i ] [ 'data' ] . axis [ 'freq' ] [ 0 ]
xgain . axis [ 'chan' ] = empty ( 1 , dtype = 'O' )
ygain = ChanFreq ( )
ygain . data = empty ( 1 , dtype = 'O' )
ygain . data [ 0 ] = empty ( ( 1 , csd [ i ] [ 'data' ] . number_of ( 'freq' ) [ 0 ] ) , dtype = 'f' )
ygain . axis [ 'freq' ] = empty ( 1 , dtype = 'O' )
ygain . axis [ 'freq' ] [ 0 ] = csd [ i ] [ 'data' ] . axis [ 'freq' ] [ 0 ]
ygain . axis [ 'chan' ] = empty ( 1 , dtype = 'O' )
phase = ChanFreq ( )
phase . data = empty ( 1 , dtype = 'O' )
phase . data [ 0 ] = empty ( ( 1 , csd [ i ] [ 'data' ] . number_of ( 'freq' ) [ 0 ] ) , dtype = 'f' )
phase . axis [ 'freq' ] = empty ( 1 , dtype = 'O' )
phase . axis [ 'freq' ] [ 0 ] = csd [ i ] [ 'data' ] . axis [ 'freq' ] [ 0 ]
phase . axis [ 'chan' ] = empty ( 1 , dtype = 'O' )
xchan = asd [ i ] [ 'data' ] . axis [ 'chan' ] [ 0 ] [ 0 ]
ychan = asd [ i ] [ 'data' ] . axis [ 'chan' ] [ 0 ] [ 1 ]
xgain . axis [ 'chan' ] [ 0 ] = asarray ( [ '-->' . join ( ( xchan , ychan ) ) ] , dtype = 'U' )
ygain . axis [ 'chan' ] [ 0 ] = asarray ( [ '-->' . join ( ( ychan , xchan ) ) ] , dtype = 'U' )
phase . axis [ 'chan' ] [ 0 ] = asarray ( [ '-->' . join ( ( xchan , ychan ) ) ] , dtype = 'U' )
Pxy = csd [ i ] [ 'data' ] . data [ 0 ] [ 0 ]
Pxx = asd [ i ] [ 'data' ] . data [ 0 ] [ 0 ]
Pyy = asd [ i ] [ 'data' ] . data [ 0 ] [ 1 ]
Hx = Pxy / Pxx
Hy = Pxy / Pyy
xgain . data [ 0 ] [ 0 , : ] = abs ( Hx )
ygain . data [ 0 ] [ 0 , : ] = abs ( Hy )
phase . data [ 0 ] [ 0 , : ] = angle ( Hx , deg = True )
# phase is same in both directions , since Pxx and Pyy are real
xg_dict = dict ( csd [ i ] )
xg_dict [ 'data' ] = xgain
xg_list . append ( xg_dict )
yg_dict = dict ( csd [ i ] )
yg_dict [ 'data' ] = ygain
yg_list . append ( yg_dict )
ph_dict = dict ( csd [ i ] )
ph_dict [ 'data' ] = phase
ph_list . append ( ph_dict )
out = ( xg_list , yg_list , ph_list )
return out |
def _remove_observer ( self , signal , observer ) :
"""Remove an observer to a valid signal .
Parameters
signal : str
a valid signal .
observer : @ func
an obervation function to be removed .""" | if observer in self . _observers [ signal ] :
self . _observers [ signal ] . remove ( observer ) |
def get_executions ( self , ** kwargs ) :
"""Retrieve the executions related to the current service .
. . versionadded : : 1.13
: param kwargs : ( optional ) additional search keyword arguments to limit the search even further .
: type kwargs : dict
: return : list of ServiceExecutions associated to the current service .""" | return self . _client . service_executions ( service = self . id , scope = self . scope_id , ** kwargs ) |
def chpl_type_name ( self ) :
"""Returns iterator or method or ' ' depending on object type .""" | if not self . objtype . endswith ( 'method' ) :
return ''
elif self . objtype . startswith ( 'iter' ) :
return 'iterator'
elif self . objtype == 'method' :
return 'method'
else :
return '' |
def download_file ( filename : str ) :
"""downloads the specified project file if it exists""" | project = cd . project . get_internal_project ( )
source_directory = project . source_directory if project else None
if not filename or not project or not source_directory :
return '' , 204
path = os . path . realpath ( os . path . join ( source_directory , '..' , '__cauldron_downloads' , filename ) )
if not os . path . exists ( path ) :
return '' , 204
return flask . send_file ( path , mimetype = mimetypes . guess_type ( path ) [ 0 ] ) |
def search_tags ( self , series_search_text = None , response_type = None , params = None ) :
"""Function to request the FRED tags for a series search .
` < https : / / research . stlouisfed . org / docs / api / fred / series _ search _ tags . html > ` _
: arg str series _ search _ text : The words to match against economic data series . Required .
: arg str response _ type : File extension of response . Options are ' xml ' , ' json ' ,
' dict ' , ' df ' , ' numpy ' , ' csv ' , ' tab , ' pipe ' . Required .
: arg str realtime _ start : The start of the real - time period . Format " YYYY - MM - DD "
: arg str realtime _ end : The end of the real - time period . Format " YYYY - MM - DD "
: arg int limit : The maximum number of results to return . Options 1 to 1000
: arg int offset : Data offset . Options > = 0
: arg str order _ by : Order results by values of the specified attribute . Options are ' series _ count ' ,
' popularity ' , ' created ' , ' name ' , ' group _ id '
: arg str sort _ order : Sort results for attribute values specified by order _ by . Options are ' asc ' , ' desc '
: arg str tag _ names : Tag names that series match . Separate with semicolon as in " income ; bea "
: arg str tag _ group _ id : Tag ID to filter tags by . Options are ' freq ' , ' gen ' , ' geo ' , ' geot ' , ' rls ' , ' seas ' , ' src '
: arg str tag _ search _ text : The words to find matching tags with .
: arg bool ssl _ verify : To verify HTTPs .""" | path = '/series/search/tags?'
params [ 'series_search_text' ] = series_search_text
response_type = response_type if response_type else self . response_type
if response_type != 'xml' :
params [ 'file_type' ] = 'json'
response = _get_request ( self . url_root , self . api_key , path , response_type , params , self . ssl_verify )
return response |
def getargvalues ( frame ) :
"""Get information about arguments passed into a particular frame .
A tuple of four things is returned : ( args , varargs , varkw , locals ) .
' args ' is a list of the argument names ( it may contain nested lists ) .
' varargs ' and ' varkw ' are the names of the * and * * arguments or None .
' locals ' is the locals dictionary of the given frame .""" | args , varargs , varkw = getargs ( frame . f_code )
return args , varargs , varkw , frame . f_locals |
def get_user ( self , screen_name ) :
"""Method to perform the usufy searches .
: param screen _ name : nickname to be searched .
: return : User .""" | # Connecting to the API
api = self . _connectToAPI ( )
# Verifying the limits of the API
self . _rate_limit_status ( api = api , mode = "get_user" )
aux = [ ]
try :
user = api . get_user ( screen_name )
# Iterate through the results using user . _ json
aux . append ( user . _json )
except tweepy . error . TweepError as e :
pass
res = [ ]
# Extracting the information from each profile
for a in aux :
res . append ( self . _processUser ( a ) )
return res |
def load ( file = '/etc/pf.conf' , noop = False ) :
'''Load a ruleset from the specific file , overwriting the currently loaded ruleset .
file :
Full path to the file containing the ruleset .
noop :
Don ' t actually load the rules , just parse them .
CLI example :
. . code - block : : bash
salt ' * ' pf . load / etc / pf . conf . d / lockdown . conf''' | # We cannot precisely determine if loading the ruleset implied
# any changes so assume it always does .
ret = { 'changes' : True }
cmd = [ 'pfctl' , '-f' , file ]
if noop :
ret [ 'changes' ] = False
cmd . append ( '-n' )
result = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False )
if result [ 'retcode' ] != 0 :
raise CommandExecutionError ( 'Problem loading the ruleset from {0}' . format ( file ) , info = { 'errors' : [ result [ 'stderr' ] ] , 'changes' : False } )
return ret |
def check_item_type_uniqueness ( tag , previous_tags ) :
"""Check the uniqueness of the ' item type ' for an object .""" | fail = False
# If the tag is being created . . .
if not tag . id : # . . . and the new item type is different from previous item types ( for
# example , different from the first of them ) , fail
fail = previous_tags and tag . item_type != previous_tags [ 0 ] . item_type
# If the tag is being modifying . . .
else : # . . . but there is only one previous tag ( the one that is being
# modifying ) , do not fail
fail = previous_tags . count ( ) > 1 and tag . item_type != previous_tags [ 0 ] . item_type
return fail |
def openParametersDialog ( params , title = None ) :
'''Opens a dialog to enter parameters .
Parameters are passed as a list of Parameter objects
Returns a dict with param names as keys and param values as values
Returns None if the dialog was cancelled''' | QApplication . setOverrideCursor ( QCursor ( Qt . ArrowCursor ) )
dlg = ParametersDialog ( params , title )
dlg . exec_ ( )
QApplication . restoreOverrideCursor ( )
return dlg . values |
def add_func ( self , function , outputs = None , weight = None , inputs_kwargs = False , inputs_defaults = False , filters = None , input_domain = None , await_domain = None , await_result = None , inp_weight = None , out_weight = None , description = None , inputs = None , function_id = None , ** kwargs ) :
"""Add a single function node to dispatcher .
: param inputs _ kwargs :
Do you want to include kwargs as inputs ?
: type inputs _ kwargs : bool
: param inputs _ defaults :
Do you want to set default values ?
: type inputs _ defaults : bool
: param function _ id :
Function node id .
If None will be assigned as < fun . _ _ name _ _ > .
: type function _ id : str , optional
: param function :
Data node estimation function .
: type function : callable , optional
: param inputs :
Ordered arguments ( i . e . , data node ids ) needed by the function .
If None it will take parameters names from function signature .
: type inputs : list , optional
: param outputs :
Ordered results ( i . e . , data node ids ) returned by the function .
: type outputs : list , optional
: param input _ domain :
A function that checks if input values satisfy the function domain .
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain , otherwise
False . In this case the dispatch algorithm doesn ' t pass on the node .
: type input _ domain : callable , optional
: param weight :
Node weight . It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow .
: type weight : float , int , optional
: param inp _ weight :
Edge weights from data nodes to the function node .
It is a dictionary ( key = data node id ) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow .
: type inp _ weight : dict [ str , float | int ] , optional
: param out _ weight :
Edge weights from the function node to data nodes .
It is a dictionary ( key = data node id ) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow .
: type out _ weight : dict [ str , float | int ] , optional
: param description :
Function node ' s description .
: type description : str , optional
: param filters :
A list of functions that are invoked after the invocation of the
main function .
: type filters : list [ function ] , optional
: param await _ domain :
If True the Dispatcher waits all input results before executing the
` input _ domain ` function . If a number is defined this is used as
` timeout ` for ` Future . result ` method [ default : True ] . Note this is
used when asynchronous or parallel execution is enable .
: type await _ domain : bool | int | float , optional
: param await _ result :
If True the Dispatcher waits output results before assigning them to
the workflow . If a number is defined this is used as ` timeout ` for
` Future . result ` method [ default : False ] . Note this is used when
asynchronous or parallel execution is enable .
: type await _ result : bool | int | float , optional
: param kwargs :
Set additional node attributes using key = value .
: type kwargs : keyword arguments , optional
: return :
Self .
: rtype : BlueDispatcher""" | kwargs . update ( _call_kw ( locals ( ) ) )
self . deferred . append ( ( 'add_func' , kwargs ) )
return self |
def tagfunc ( nargs = None , ndefs = None , nouts = None ) :
"""decorate of tagged function""" | def wrapper ( f ) :
return wraps ( f ) ( FunctionWithTag ( f , nargs = nargs , nouts = nouts , ndefs = ndefs ) )
return wrapper |
def _nonzero_intersection ( m , m_hat ) :
"""Count the number of nonzeros in and between m and m _ hat .
Returns
m _ nnz : number of nonzeros in m ( w / o diagonal )
m _ hat _ nnz : number of nonzeros in m _ hat ( w / o diagonal )
intersection _ nnz : number of nonzeros in intersection of m / m _ hat
( w / o diagonal )""" | n_features , _ = m . shape
m_no_diag = m . copy ( )
m_no_diag [ np . diag_indices ( n_features ) ] = 0
m_hat_no_diag = m_hat . copy ( )
m_hat_no_diag [ np . diag_indices ( n_features ) ] = 0
m_hat_nnz = len ( np . nonzero ( m_hat_no_diag . flat ) [ 0 ] )
m_nnz = len ( np . nonzero ( m_no_diag . flat ) [ 0 ] )
intersection_nnz = len ( np . intersect1d ( np . nonzero ( m_no_diag . flat ) [ 0 ] , np . nonzero ( m_hat_no_diag . flat ) [ 0 ] ) )
return m_nnz , m_hat_nnz , intersection_nnz |
def list_all_free_item_coupons ( cls , ** kwargs ) :
"""List FreeItemCoupons
Return a list of FreeItemCoupons
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ free _ item _ coupons ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ FreeItemCoupon ]
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_free_item_coupons_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_free_item_coupons_with_http_info ( ** kwargs )
return data |
def pre_render ( self ) :
"""Last things to do before rendering""" | self . add_styles ( )
self . add_scripts ( )
self . root . set ( 'viewBox' , '0 0 %d %d' % ( self . graph . width , self . graph . height ) )
if self . graph . explicit_size :
self . root . set ( 'width' , str ( self . graph . width ) )
self . root . set ( 'height' , str ( self . graph . height ) ) |
def get_and_union_features ( features ) :
"""Get and combine features in a : class : ` FeatureUnion ` .
Args :
features ( str or List [ str ] , ` ` Features ` ` or List [ ` ` Features ` ` ] , or List [ Tuple [ str , ` ` Features ` ` ] ] ) :
One or more features to be used to transform blocks into a matrix of
numeric values . If more than one , a : class : ` FeatureUnion ` is
automatically constructed . Example inputs : :
features = ' weninger '
features = [ ' weninger ' , ' kohlschuetter ' ]
features = WeningerFeatures ( )
features = [ WeningerFeatures ( ) , KohlschuetterFeatures ( ) ]
features = [ ( ' weninger ' , WeningerFeatures ( ) ) , ( ' kohlschuetter ' , KohlschuetterFeatures ( ) ) ]
Returns :
: class : ` FeatureUnion ` or ` ` Features ` `""" | if not features :
raise ValueError ( 'invalid `features`: may not be null' )
if isinstance ( features , ( list , tuple ) ) :
if isinstance ( features [ 0 ] , tuple ) :
return FeatureUnion ( features )
elif isinstance ( features [ 0 ] , string_ ) :
return FeatureUnion ( [ ( feature , get_feature ( feature ) ) for feature in features ] )
else :
return make_union ( * features )
elif isinstance ( features , string_ ) :
return get_feature ( features )
else :
return features |
async def dataSources ( loop = None , executor = None ) :
"""Returns a dictionary mapping available DSNs to their descriptions .
: param loop : asyncio compatible event loop
: param executor : instance of custom ThreadPoolExecutor , if not supplied
default executor will be used
: return dict : mapping of dsn to driver description""" | loop = loop or asyncio . get_event_loop ( )
sources = await loop . run_in_executor ( executor , _dataSources )
return sources |
def get_channelstate_closing ( chain_state : ChainState , payment_network_id : PaymentNetworkID , token_address : TokenAddress , ) -> List [ NettingChannelState ] :
"""Return the state of closing channels in a token network .""" | return get_channelstate_filter ( chain_state , payment_network_id , token_address , lambda channel_state : channel . get_status ( channel_state ) == CHANNEL_STATE_CLOSING , ) |
def deleted_user_delete ( self , id , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / users # permanently - delete - user" | api_path = "/api/v2/deleted_users/{id}.json"
api_path = api_path . format ( id = id )
return self . call ( api_path , method = "DELETE" , ** kwargs ) |
def create_application ( self , full_layout = True ) :
"""makes the application object and the buffers""" | layout_manager = LayoutManager ( self )
if full_layout :
layout = layout_manager . create_layout ( ExampleLexer , ToolbarLexer )
else :
layout = layout_manager . create_tutorial_layout ( )
buffers = { DEFAULT_BUFFER : Buffer ( is_multiline = True ) , 'description' : Buffer ( is_multiline = True , read_only = True ) , 'parameter' : Buffer ( is_multiline = True , read_only = True ) , 'examples' : Buffer ( is_multiline = True , read_only = True ) , 'bottom_toolbar' : Buffer ( is_multiline = True ) , 'example_line' : Buffer ( is_multiline = True ) , 'default_values' : Buffer ( ) , 'symbols' : Buffer ( ) , 'progress' : Buffer ( is_multiline = False ) }
writing_buffer = Buffer ( history = self . history , auto_suggest = AutoSuggestFromHistory ( ) , enable_history_search = True , completer = self . completer , complete_while_typing = Always ( ) )
return Application ( mouse_support = False , style = self . style , buffer = writing_buffer , on_input_timeout = self . on_input_timeout , key_bindings_registry = InteractiveKeyBindings ( self ) . registry , layout = layout , buffers = buffers , ) |
def make_uhs ( hmap , info ) :
"""Make Uniform Hazard Spectra curves for each location .
: param hmap :
array of shape ( N , M , P )
: param info :
a dictionary with keys poes , imtls , uhs _ dt
: returns :
a composite array containing uniform hazard spectra""" | uhs = numpy . zeros ( len ( hmap ) , info [ 'uhs_dt' ] )
for p , poe in enumerate ( info [ 'poes' ] ) :
for m , imt in enumerate ( info [ 'imtls' ] ) :
if imt . startswith ( ( 'PGA' , 'SA' ) ) :
uhs [ str ( poe ) ] [ imt ] = hmap [ : , m , p ]
return uhs |
def _convert_xml_to_queue_messages ( response , decode_function , require_encryption , key_encryption_key , resolver , content = None ) :
'''< ? xml version = " 1.0 " encoding = " utf - 8 " ? >
< QueueMessagesList >
< QueueMessage >
< MessageId > string - message - id < / MessageId >
< InsertionTime > insertion - time < / InsertionTime >
< ExpirationTime > expiration - time < / ExpirationTime >
< PopReceipt > opaque - string - receipt - data < / PopReceipt >
< TimeNextVisible > time - next - visible < / TimeNextVisible >
< DequeueCount > integer < / DequeueCount >
< MessageText > message - body < / MessageText >
< / QueueMessage >
< / QueueMessagesList >''' | if response is None or response . body is None :
return None
messages = list ( )
list_element = ETree . fromstring ( response . body )
for message_element in list_element . findall ( 'QueueMessage' ) :
message = QueueMessage ( )
message . id = message_element . findtext ( 'MessageId' )
dequeue_count = message_element . findtext ( 'DequeueCount' )
if dequeue_count is not None :
message . dequeue_count = _to_int ( dequeue_count )
# content is not returned for put _ message
if content is not None :
message . content = content
else :
message . content = message_element . findtext ( 'MessageText' )
if ( key_encryption_key is not None ) or ( resolver is not None ) :
message . content = _decrypt_queue_message ( message . content , require_encryption , key_encryption_key , resolver )
message . content = decode_function ( message . content )
message . insertion_time = parser . parse ( message_element . findtext ( 'InsertionTime' ) )
message . expiration_time = parser . parse ( message_element . findtext ( 'ExpirationTime' ) )
message . pop_receipt = message_element . findtext ( 'PopReceipt' )
time_next_visible = message_element . find ( 'TimeNextVisible' )
if time_next_visible is not None :
message . time_next_visible = parser . parse ( time_next_visible . text )
# Add message to list
messages . append ( message )
return messages |
def divergence ( u , v , dx , dy ) :
r"""Calculate the horizontal divergence of the horizontal wind .
Parameters
u : ( M , N ) ndarray
x component of the wind
v : ( M , N ) ndarray
y component of the wind
dx : float or ndarray
The grid spacing ( s ) in the x - direction . If an array , there should be one item less than
the size of ` u ` along the applicable axis .
dy : float or ndarray
The grid spacing ( s ) in the y - direction . If an array , there should be one item less than
the size of ` u ` along the applicable axis .
Returns
( M , N ) ndarray
The horizontal divergence
See Also
vorticity
Notes
If inputs have more than two dimensions , they are assumed to have either leading dimensions
of ( x , y ) or trailing dimensions of ( y , x ) , depending on the value of ` ` dim _ order ` ` .""" | dudx = first_derivative ( u , delta = dx , axis = - 1 )
dvdy = first_derivative ( v , delta = dy , axis = - 2 )
return dudx + dvdy |
def _onInstanceAttribute ( self , name , line , pos , absPosition , level ) :
"""Memorizes a class instance attribute""" | # Instance attributes may appear in member functions only so we already
# have a function on the stack of objects . To get the class object one
# more step is required so we - 1 here .
attributes = self . objectsStack [ level - 1 ] . instanceAttributes
for item in attributes :
if item . name == name :
return
attributes . append ( InstanceAttribute ( name , line , pos , absPosition ) ) |
def delete ( self , method , uri , params = None , data = None , headers = None , auth = None , timeout = None , allow_redirects = False ) :
"""Delete a resource .""" | response = self . request ( method , uri , params = params , data = data , headers = headers , auth = auth , timeout = timeout , allow_redirects = allow_redirects , )
if response . status_code < 200 or response . status_code >= 300 :
raise self . exception ( method , uri , response , 'Unable to delete record' )
return response . status_code == 204 |
def execute_command ( self , command , tab = None ) : # TODO DBUS _ ONLY
"""Execute the ` command ' in the ` tab ' . If tab is None , the
command will be executed in the currently selected
tab . Command should end with ' \n ' , otherwise it will be
appended to the string .""" | # TODO CONTEXTMENU this has to be rewriten and only serves the
# dbus interface , maybe this should be moved to dbusinterface . py
if not self . get_notebook ( ) . has_page ( ) :
self . add_tab ( )
if command [ - 1 ] != '\n' :
command += '\n'
terminal = self . get_notebook ( ) . get_current_terminal ( )
terminal . feed_child ( command ) |
def has_activity ( graph : BELGraph , node : BaseEntity ) -> bool :
"""Return true if over any of the node ' s edges , it has a molecular activity .""" | return _node_has_modifier ( graph , node , ACTIVITY ) |
def get_state_tuple ( state , state_m = None ) :
"""Generates a tuple that holds the state as yaml - strings and its meta data in a dictionary .
The tuple consists of :
[0 ] json _ str for state ,
[1 ] dict of child _ state tuples ,
[2 ] dict of model _ meta - data of self and elements
[3 ] path of state in state machine
[4 ] script _ text
[5 ] file system path
[6 ] semantic data
# states - meta - [ state - , transitions - , data _ flows - , outcomes - , inputs - , outputs - , scopes , states - meta ]
: param rafcon . core . states . state . State state : The state that should be stored
: return : state _ tuple tuple""" | state_str = json . dumps ( state , cls = JSONObjectEncoder , indent = 4 , check_circular = False , sort_keys = True )
state_tuples_dict = { }
if isinstance ( state , ContainerState ) : # print ( state . states , " \ n " )
for child_state_id , child_state in state . states . items ( ) : # print ( " child _ state : % s " % child _ state _ id , child _ state , " \ n " )
state_tuples_dict [ child_state_id ] = get_state_tuple ( child_state )
state_meta_dict = { } if state_m is None else get_state_element_meta ( state_m )
script_content = state . script . script if isinstance ( state , ExecutionState ) else None
state_tuple = ( state_str , state_tuples_dict , state_meta_dict , state . get_path ( ) , script_content , state . file_system_path , copy . deepcopy ( state . semantic_data ) )
return state_tuple |
def init_app ( self , app ) :
"""Do setup that requires a Flask app .
: param app : The application to initialize .
: type app : Flask""" | secrets = self . load_secrets ( app )
self . client_secrets = list ( secrets . values ( ) ) [ 0 ]
secrets_cache = DummySecretsCache ( secrets )
# Set some default configuration options
app . config . setdefault ( 'OIDC_SCOPES' , [ 'openid' , 'email' ] )
app . config . setdefault ( 'OIDC_GOOGLE_APPS_DOMAIN' , None )
app . config . setdefault ( 'OIDC_ID_TOKEN_COOKIE_NAME' , 'oidc_id_token' )
app . config . setdefault ( 'OIDC_ID_TOKEN_COOKIE_PATH' , '/' )
app . config . setdefault ( 'OIDC_ID_TOKEN_COOKIE_TTL' , 7 * 86400 )
# 7 days
# should ONLY be turned off for local debugging
app . config . setdefault ( 'OIDC_COOKIE_SECURE' , True )
app . config . setdefault ( 'OIDC_VALID_ISSUERS' , ( self . client_secrets . get ( 'issuer' ) or GOOGLE_ISSUERS ) )
app . config . setdefault ( 'OIDC_CLOCK_SKEW' , 60 )
# 1 minute
app . config . setdefault ( 'OIDC_REQUIRE_VERIFIED_EMAIL' , False )
app . config . setdefault ( 'OIDC_OPENID_REALM' , None )
app . config . setdefault ( 'OIDC_USER_INFO_ENABLED' , True )
app . config . setdefault ( 'OIDC_CALLBACK_ROUTE' , '/oidc_callback' )
app . config . setdefault ( 'OVERWRITE_REDIRECT_URI' , False )
app . config . setdefault ( "OIDC_EXTRA_REQUEST_AUTH_PARAMS" , { } )
# Configuration for resource servers
app . config . setdefault ( 'OIDC_RESOURCE_SERVER_ONLY' , False )
app . config . setdefault ( 'OIDC_RESOURCE_CHECK_AUD' , False )
# We use client _ secret _ post , because that ' s what the Google
# oauth2client library defaults to
app . config . setdefault ( 'OIDC_INTROSPECTION_AUTH_METHOD' , 'client_secret_post' )
app . config . setdefault ( 'OIDC_TOKEN_TYPE_HINT' , 'access_token' )
if not 'openid' in app . config [ 'OIDC_SCOPES' ] :
raise ValueError ( 'The value "openid" must be in the OIDC_SCOPES' )
# register callback route and cookie - setting decorator
if not app . config [ 'OIDC_RESOURCE_SERVER_ONLY' ] :
app . route ( app . config [ 'OIDC_CALLBACK_ROUTE' ] ) ( self . _oidc_callback )
app . before_request ( self . _before_request )
app . after_request ( self . _after_request )
# Initialize oauth2client
self . flow = flow_from_clientsecrets ( app . config [ 'OIDC_CLIENT_SECRETS' ] , scope = app . config [ 'OIDC_SCOPES' ] , cache = secrets_cache )
assert isinstance ( self . flow , OAuth2WebServerFlow )
# create signers using the Flask secret key
self . extra_data_serializer = JSONWebSignatureSerializer ( app . config [ 'SECRET_KEY' ] , salt = 'flask-oidc-extra-data' )
self . cookie_serializer = JSONWebSignatureSerializer ( app . config [ 'SECRET_KEY' ] , salt = 'flask-oidc-cookie' )
try :
self . credentials_store = app . config [ 'OIDC_CREDENTIALS_STORE' ]
except KeyError :
pass |
def _create_validate_config ( vrn_file , rm_file , rm_interval_file , base_dir , data ) :
"""Create a bcbio . variation configuration input for validation .""" | ref_call = { "file" : str ( rm_file ) , "name" : "ref" , "type" : "grading-ref" , "fix-sample-header" : True , "remove-refcalls" : True }
a_intervals = get_analysis_intervals ( data , vrn_file , base_dir )
if a_intervals :
a_intervals = shared . remove_lcr_regions ( a_intervals , [ data ] )
if rm_interval_file :
ref_call [ "intervals" ] = rm_interval_file
eval_call = { "file" : vrn_file , "name" : "eval" , "remove-refcalls" : True }
exp = { "sample" : data [ "name" ] [ - 1 ] , "ref" : dd . get_ref_file ( data ) , "approach" : "grade" , "calls" : [ ref_call , eval_call ] }
if a_intervals :
exp [ "intervals" ] = os . path . abspath ( a_intervals )
if data . get ( "align_bam" ) :
exp [ "align" ] = data [ "align_bam" ]
elif data . get ( "work_bam" ) :
exp [ "align" ] = data [ "work_bam" ]
return { "dir" : { "base" : base_dir , "out" : "work" , "prep" : "work/prep" } , "experiments" : [ exp ] } |
def main ( ) :
'''main routine''' | # Load Azure app defaults
try :
with open ( 'azurermconfig.json' ) as config_file :
config_data = json . load ( config_file )
except FileNotFoundError :
sys . exit ( 'Error: Expecting azurermconfig.json in current folder' )
tenant_id = config_data [ 'tenantId' ]
app_id = config_data [ 'appId' ]
app_secret = config_data [ 'appSecret' ]
subscription_id = config_data [ 'subscriptionId' ]
access_token = azurerm . get_access_token ( tenant_id , app_id , app_secret )
# list storage accounts per sub
sa_list = azurerm . list_storage_accounts_sub ( access_token , subscription_id )
# print ( sa _ list )
for sta in sa_list [ 'value' ] :
print ( sta [ 'name' ] + ', ' + sta [ 'properties' ] [ 'primaryLocation' ] + ', ' + rgfromid ( sta [ 'id' ] ) )
# get storage account quota
quota_info = azurerm . get_storage_usage ( access_token , subscription_id )
used = quota_info [ 'value' ] [ 0 ] [ 'currentValue' ]
limit = quota_info [ "value" ] [ 0 ] [ "limit" ]
print ( '\nUsing ' + str ( used ) + ' accounts out of ' + str ( limit ) + '.' ) |
def cut_range ( string ) :
"""A custom argparse ' type ' to deal with sequences ranges such as 5:500.
Returns a 0 - based slice corresponding to the selection defined by the slice""" | value_range = string . split ( ':' )
if len ( value_range ) == 1 :
start = int ( value_range [ 0 ] )
stop = start
elif len ( value_range ) == 2 :
start , stop = tuple ( int ( i ) if i else None for i in value_range )
else :
msg = "{0} is not a valid, 1-indexed range." . format ( string )
raise argparse . ArgumentTypeError ( msg )
if start == 0 or ( stop or sys . maxsize ) < ( start or 0 ) :
msg = "{0} is not a valid, 1-indexed range." . format ( string )
raise argparse . ArgumentTypeError ( msg )
# Convert from 1 - indexed to 0 - indexed
if start is not None and start > 0 :
start -= 1
return slice ( start , stop ) |
def definition_to_json ( source ) :
"""Convert a bytecode . yaml file into a prepared bytecode . json .
Jawa internally uses a YAML file to define all bytecode opcodes , operands ,
runtime exceptions , default transforms , etc . . .
However since JSON is available in the python stdlib and YAML is not , we
process this YAML file before distribution to prevent adding an unnecessary
dependency .""" | try :
import yaml
except ImportError :
click . echo ( 'The pyyaml module could not be found and is required' ' to use this command.' , err = True )
return
y = yaml . load ( source )
for k , v in y . items ( ) : # We guarantee some keys should always exist to make life easier for
# developers .
v . setdefault ( 'operands' , None )
v . setdefault ( 'can_be_wide' , False )
v . setdefault ( 'transform' , { } )
v [ 'mnemonic' ] = k
click . echo ( json . dumps ( y , indent = 4 , sort_keys = True ) ) |
def reset_logging_framework ( ) :
"""After fork , ensure any logging . Handler locks are recreated , as a variety of
threads in the parent may have been using the logging package at the moment
of fork .
It is not possible to solve this problem in general ; see
https : / / github . com / dw / mitogen / issues / 150 for a full discussion .""" | logging . _lock = threading . RLock ( )
# The root logger does not appear in the loggerDict .
for name in [ None ] + list ( logging . Logger . manager . loggerDict ) :
for handler in logging . getLogger ( name ) . handlers :
handler . createLock ( )
root = logging . getLogger ( )
root . handlers = [ handler for handler in root . handlers if not isinstance ( handler , mitogen . core . LogHandler ) ] |
def _ctype_dict ( param_dict ) :
"""Returns ctype arrays for keys and values ( converted to strings ) in a dictionary""" | assert ( isinstance ( param_dict , dict ) ) , "unexpected type for param_dict: " + str ( type ( param_dict ) )
c_keys = c_array ( ctypes . c_char_p , [ c_str ( k ) for k in param_dict . keys ( ) ] )
c_vals = c_array ( ctypes . c_char_p , [ c_str ( str ( v ) ) for v in param_dict . values ( ) ] )
return ( c_keys , c_vals ) |
def analyzeParameters ( expName , suite ) :
"""Analyze the impact of each list parameter in this experiment""" | print ( "\n================" , expName , "=====================" )
try :
expParams = suite . get_params ( expName )
pprint . pprint ( expParams )
for p in [ "boost_strength" , "k" , "learning_rate" , "weight_sparsity" , "k_inference_factor" , "boost_strength_factor" , "c1_out_channels" , "c1_k" , "learning_rate_factor" , "batches_in_epoch" , ] :
if p in expParams and type ( expParams [ p ] ) == list :
print ( "\n" , p )
for v1 in expParams [ p ] : # Retrieve the last totalCorrect from each experiment
# Print them sorted from best to worst
values , params = suite . get_values_fix_params ( expName , 0 , "testerror" , "last" , ** { p : v1 } )
v = np . array ( values )
try :
print ( "Average/min/max for" , p , v1 , "=" , v . mean ( ) , v . min ( ) , v . max ( ) )
# sortedIndices = v . argsort ( )
# for i in sortedIndices [ : : - 1 ] :
# print ( v [ i ] , params [ i ] [ " name " ] )
except :
print ( "Can't compute stats for" , p )
except :
print ( "Couldn't load experiment" , expName ) |
def used_in_func ( statement : str , filename : str = '<string>' , mode : str = 'exec' ) :
'''Parse a Python statement and analyze the symbols used . The result
will be used to determine what variables a step depends upon .''' | try :
return get_used_in_func ( ast . parse ( statement , filename , mode ) )
except Exception as e :
raise RuntimeError ( f'Failed to parse statement: {statement} {e}' ) |
def send_auto ( self , payload , tries = 1 , timeout = 60 ) :
'''Detect the encryption type based on the payload''' | enc = payload . get ( 'enc' , 'clear' )
load = payload . get ( 'load' , { } )
return self . send ( enc , load , tries , timeout ) |
def get ( self , addresses ) :
"""Returns the value in this context , or None , for each address in
addresses . Useful for gets on the context manager .
Args :
addresses ( list of str ) : The addresses to return values for , if
within this context .
Returns :
results ( list of bytes ) : The values in state for these addresses .""" | with self . _lock :
results = [ ]
for add in addresses :
self . validate_read ( add )
results . append ( self . _get ( add ) )
return results |
def idngram2stats ( input_file , output_file , n = 3 , fof_size = 50 , verbosity = 2 , ascii_input = False ) :
"""Lists the frequency - of - frequencies for each of the 2 - grams , . . . , n - grams , which can enable the user to choose appropriate cut - offs , and to specify appropriate memory requirements with the spec _ num parameter in idngram2lm .""" | cmd = [ 'idngram2stats' ]
if n :
cmd . extend ( [ '-n' , n ] )
if fof_size :
cmd . extend ( [ '-fof_size' ] , fof_size )
if verbosity :
cmd . extend ( [ '-verbosity' ] , verbosity )
if ascii_input :
cmd . append ( [ '-ascii_input' ] )
# Ensure that every parameter is of type ' str '
cmd = [ str ( x ) for x in cmd ]
with open ( input_file , 'r' ) as input_f :
with open ( output_file , 'w+' ) as output_f :
with output_to_debuglogger ( ) as err_f :
exitcode = subprocess . call ( cmd , stdin = input_f , stdout = output_f , stderr = err_f )
logger = logging . getLogger ( __name__ )
logger . debug ( "Command '%s' returned with exit code '%d'." % ( ' ' . join ( cmd ) , exitcode ) )
if exitcode != 0 :
raise ConversionError ( "'%s' returned with non-zero exit status '%s'" % ( cmd [ 0 ] , exitcode ) ) |
def retrieve_styles ( self , asset_url_path ) :
"""Get style URLs from the source HTML page and specified cached
asset base URL .""" | if not asset_url_path . endswith ( '/' ) :
asset_url_path += '/'
self . style_urls . extend ( self . _get_style_urls ( asset_url_path ) ) |
def start ( self , phone = lambda : input ( 'Please enter your phone (or bot token): ' ) , password = lambda : getpass . getpass ( 'Please enter your password: ' ) , * , bot_token = None , force_sms = False , code_callback = None , first_name = 'New User' , last_name = '' , max_attempts = 3 ) :
"""Convenience method to interactively connect and sign in if required ,
also taking into consideration that 2FA may be enabled in the account .
If the phone doesn ' t belong to an existing account ( and will hence
` sign _ up ` for a new one ) , * * you are agreeing to Telegram ' s
Terms of Service . This is required and your account
will be banned otherwise . * * See https : / / telegram . org / tos
and https : / / core . telegram . org / api / terms .
Example usage :
> > > client = . . .
> > > client . start ( phone )
Please enter the code you received : 12345
Please enter your password : * * * * *
( You are now logged in )
If the event loop is already running , this method returns a
coroutine that you should await on your own code ; otherwise
the loop is ran until said coroutine completes .
Args :
phone ( ` str ` | ` int ` | ` callable ` ) :
The phone ( or callable without arguments to get it )
to which the code will be sent . If a bot - token - like
string is given , it will be used as such instead .
The argument may be a coroutine .
password ( ` str ` , ` callable ` , optional ) :
The password for 2 Factor Authentication ( 2FA ) .
This is only required if it is enabled in your account .
The argument may be a coroutine .
bot _ token ( ` str ` ) :
Bot Token obtained by ` @ BotFather < https : / / t . me / BotFather > ` _
to log in as a bot . Cannot be specified with ` ` phone ` ` ( only
one of either allowed ) .
force _ sms ( ` bool ` , optional ) :
Whether to force sending the code request as SMS .
This only makes sense when signing in with a ` phone ` .
code _ callback ( ` callable ` , optional ) :
A callable that will be used to retrieve the Telegram
login code . Defaults to ` input ( ) ` .
The argument may be a coroutine .
first _ name ( ` str ` , optional ) :
The first name to be used if signing up . This has no
effect if the account already exists and you sign in .
last _ name ( ` str ` , optional ) :
Similar to the first name , but for the last . Optional .
max _ attempts ( ` int ` , optional ) :
How many times the code / password callback should be
retried or switching between signing in and signing up .
Returns :
This ` TelegramClient ` , so initialization
can be chained with ` ` . start ( ) ` ` .""" | if code_callback is None :
def code_callback ( ) :
return input ( 'Please enter the code you received: ' )
elif not callable ( code_callback ) :
raise ValueError ( 'The code_callback parameter needs to be a callable ' 'function that returns the code you received by Telegram.' )
if not phone and not bot_token :
raise ValueError ( 'No phone number or bot token provided.' )
if phone and bot_token and not callable ( phone ) :
raise ValueError ( 'Both a phone and a bot token provided, ' 'must only provide one of either' )
coro = self . _start ( phone = phone , password = password , bot_token = bot_token , force_sms = force_sms , code_callback = code_callback , first_name = first_name , last_name = last_name , max_attempts = max_attempts )
return ( coro if self . loop . is_running ( ) else self . loop . run_until_complete ( coro ) ) |
def create_project_transfer ( self , project_id , to_user_ids ) :
"""Send POST request to initiate transfer of a project to the specified user ids
: param project _ id : str uuid of the project
: param to _ users : list of user uuids to receive the project
: return : requests . Response containing the successful result""" | data = { "to_users[][id]" : to_user_ids , }
return self . _post ( "/projects/" + project_id + "/transfers" , data , content_type = ContentType . form ) |
def get_op_statistic ( self , name ) :
"""Get the : class : ` ~ pywbem . OperationStatistic ` object for an operation
name or create a new object if an object for that name does not exist .
Parameters :
name ( string ) :
Name of the operation .
Returns :
: class : ` ~ pywbem . OperationStatistic ` : The operation statistic for the
specified operation name . If this statistics container is disabled ,
a dummy operation statistic object is returned .""" | if not self . enabled :
return self . _disabled_stats
if name not in self . _op_stats :
self . _op_stats [ name ] = OperationStatistic ( self , name )
return self . _op_stats [ name ] |
def to_iso8601 ( dt , tz = None ) :
"""Returns an ISO - 8601 representation of a given datetime instance .
> > > to _ iso8601 ( datetime . datetime . now ( ) )
'2014-10-01T23:21:33.718508Z '
: param dt : a : class : ` ~ datetime . datetime ` instance
: param tz : a : class : ` ~ datetime . tzinfo ` to use ; if None - use a default one""" | if tz is not None :
dt = dt . replace ( tzinfo = tz )
iso8601 = dt . isoformat ( )
# Naive datetime objects usually don ' t have info about timezone .
# Let ' s assume it ' s UTC and add Z to the end .
if re . match ( r'.*(Z|[+-]\d{2}:\d{2})$' , iso8601 ) is None :
iso8601 += 'Z'
return iso8601 |
def github_authenticated ( cls , func ) :
"""Does user authentication , creates SSH keys if needed and injects " _ user " attribute
into class / object bound to the decorated function .
Don ' t call any other methods of this class manually , this should be everything you need .""" | def inner ( func_cls , * args , ** kwargs ) :
if not cls . _gh_module :
logger . warning ( 'PyGithub not installed, skipping Github auth procedures.' )
elif not func_cls . _user : # authenticate user , possibly also creating authentication for future use
login = kwargs [ 'login' ] . encode ( utils . defenc ) if not six . PY3 else kwargs [ 'login' ]
func_cls . _user = cls . _get_github_user ( login , kwargs [ 'ui' ] )
if func_cls . _user is None :
msg = 'Github authentication failed, skipping Github command.'
logger . warning ( msg )
return ( False , msg )
# create an ssh key for pushing if we don ' t have one
if not cls . _github_ssh_key_exists ( ) :
cls . _github_create_ssh_key ( )
# next , create ~ / . ssh / config entry for the key , if system username ! = GH login
if cls . _ssh_key_needs_config_entry ( ) :
cls . _create_ssh_config_entry ( )
return func ( func_cls , * args , ** kwargs )
return inner |
def _align_backtrack ( fastq_file , pair_file , ref_file , out_file , names , rg_info , data ) :
"""Perform a BWA alignment using ' aln ' backtrack algorithm .""" | bwa = config_utils . get_program ( "bwa" , data [ "config" ] )
config = data [ "config" ]
sai1_file = "%s_1.sai" % os . path . splitext ( out_file ) [ 0 ]
sai2_file = "%s_2.sai" % os . path . splitext ( out_file ) [ 0 ] if pair_file else ""
if not utils . file_exists ( sai1_file ) :
with file_transaction ( data , sai1_file ) as tx_sai1_file :
_run_bwa_align ( fastq_file , ref_file , tx_sai1_file , config )
if sai2_file and not utils . file_exists ( sai2_file ) :
with file_transaction ( data , sai2_file ) as tx_sai2_file :
_run_bwa_align ( pair_file , ref_file , tx_sai2_file , config )
with postalign . tobam_cl ( data , out_file , pair_file != "" ) as ( tobam_cl , tx_out_file ) :
align_type = "sampe" if sai2_file else "samse"
cmd = ( "unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} " "{fastq_file} {pair_file} | " )
cmd = cmd . format ( ** locals ( ) ) + tobam_cl
do . run ( cmd , "bwa %s" % align_type , data )
return out_file |
def create_subtask ( client , task_id , title , completed = False ) :
'''Creates a subtask with the given title under the task with the given ID''' | _check_title_length ( title , client . api )
data = { 'task_id' : int ( task_id ) if task_id else None , 'title' : title , 'completed' : completed , }
data = { key : value for key , value in data . items ( ) if value is not None }
response = client . authenticated_request ( client . api . Endpoints . SUBTASKS , 'POST' , data = data )
return response . json ( ) |
def _spoken_representation_L1 ( lst_lst_char ) :
"""> > > lst = [ [ ' M ' , ' O ' , ' R ' , ' S ' , ' E ' ] , [ ' C ' , ' O ' , ' D ' , ' E ' ] ]
> > > _ spoken _ representation _ L1 ( lst )
' M O R S E C O D E '
> > > lst = [ [ ] , [ ' M ' , ' O ' , ' R ' , ' S ' , ' E ' ] , [ ' C ' , ' O ' , ' D ' , ' E ' ] ]
> > > _ spoken _ representation _ L1 ( lst )
' M O R S E C O D E '""" | s = ''
inter_char = ' '
inter_word = inter_char * 9
for i , word in enumerate ( lst_lst_char ) :
if i >= 1 :
s += inter_word
for j , c in enumerate ( word ) :
if j != 0 :
s += inter_char
s += _char_to_string_morse ( c )
return s |
def _process_cities_file ( self , file , city_country_mapping ) :
"""Iterate over cities info and extract useful data""" | data = { 'all_regions' : list ( ) , 'regions' : list ( ) , 'cities' : list ( ) , 'city_region_mapping' : dict ( ) }
allowed_countries = settings . IPGEOBASE_ALLOWED_COUNTRIES
for geo_info in self . _line_to_dict ( file , field_names = settings . IPGEOBASE_CITIES_FIELDS ) :
country_code = self . _get_country_code_for_city ( geo_info [ 'city_id' ] , city_country_mapping , data [ 'all_regions' ] )
new_region = { 'name' : geo_info [ 'region_name' ] , 'country__code' : country_code }
if new_region not in data [ 'all_regions' ] :
data [ 'all_regions' ] . append ( new_region )
if allowed_countries and country_code not in allowed_countries :
continue
if new_region not in data [ 'regions' ] :
data [ 'regions' ] . append ( new_region )
data [ 'cities' ] . append ( { 'region__name' : geo_info [ 'region_name' ] , 'name' : geo_info [ 'city_name' ] , 'id' : geo_info [ 'city_id' ] , 'latitude' : Decimal ( geo_info [ 'latitude' ] ) , 'longitude' : Decimal ( geo_info [ 'longitude' ] ) } )
return data |
def get_buffers_of_type ( self , t ) :
"""returns currently open buffers for a given subclass of
: class : ` ~ alot . buffers . Buffer ` .
: param t : Buffer class
: type t : alot . buffers . Buffer
: rtype : list""" | return [ x for x in self . buffers if isinstance ( x , t ) ] |
def generate_password ( self ) -> list :
"""Generate a list of random characters .""" | characterset = self . _get_password_characters ( )
if ( self . passwordlen is None or not characterset ) :
raise ValueError ( "Can't generate password: character set is " "empty or passwordlen isn't set" )
password = [ ]
for _ in range ( 0 , self . passwordlen ) :
password . append ( randchoice ( characterset ) )
self . last_result = password
return password |
def _evaluate ( x , y , weights ) :
'''get the parameters of the , needed by ' function '
through curve fitting''' | i = _validI ( x , y , weights )
xx = x [ i ]
y = y [ i ]
try :
fitParams = _fit ( xx , y )
# bound noise fn to min defined y value :
minY = function ( xx [ 0 ] , * fitParams )
fitParams = np . insert ( fitParams , 0 , minY )
fn = lambda x , minY = minY : boundedFunction ( x , * fitParams )
except RuntimeError :
print ( "couldn't fit noise function with filtered indices, use polynomial fit instead" )
fitParams = None
fn = smooth ( xx , y , weights [ i ] )
return fitParams , fn , i |
def date_this_year ( self , before_today = True , after_today = False ) :
"""Gets a Date object for the current year .
: param before _ today : include days in current year before today
: param after _ today : include days in current year after today
: example Date ( ' 2012-04-04 ' )
: return Date""" | today = date . today ( )
this_year_start = today . replace ( month = 1 , day = 1 )
next_year_start = date ( today . year + 1 , 1 , 1 )
if before_today and after_today :
return self . date_between_dates ( this_year_start , next_year_start )
elif not before_today and after_today :
return self . date_between_dates ( today , next_year_start )
elif not after_today and before_today :
return self . date_between_dates ( this_year_start , today )
else :
return today |
def is_file_ignored ( opts , fname ) :
'''If file _ ignore _ regex or file _ ignore _ glob were given in config ,
compare the given file path against all of them and return True
on the first match .''' | if opts [ 'file_ignore_regex' ] :
for regex in opts [ 'file_ignore_regex' ] :
if re . search ( regex , fname ) :
log . debug ( 'File matching file_ignore_regex. Skipping: %s' , fname )
return True
if opts [ 'file_ignore_glob' ] :
for glob in opts [ 'file_ignore_glob' ] :
if fnmatch . fnmatch ( fname , glob ) :
log . debug ( 'File matching file_ignore_glob. Skipping: %s' , fname )
return True
return False |
def issue_command ( self , command , args = None , dry_run = False , comment = None ) :
"""Issue the given command
: param str command : Either a fully - qualified XTCE name or an alias in the
format ` ` NAMESPACE / NAME ` ` .
: param dict args : named arguments ( if the command requires these )
: param bool dry _ run : If ` ` True ` ` the command is not actually issued . This
can be used to check if the server would generate
errors when preparing the command ( for example
because an argument is missing ) .
: param str comment : Comment attached to the command .
: return : An object providing access to properties of the newly issued
command .
: rtype : . IssuedCommand""" | req = rest_pb2 . IssueCommandRequest ( )
req . sequenceNumber = SequenceGenerator . next ( )
req . origin = socket . gethostname ( )
req . dryRun = dry_run
if comment :
req . comment = comment
if args :
for key in args :
assignment = req . assignment . add ( )
assignment . name = key
assignment . value = str ( args [ key ] )
command = adapt_name_for_rest ( command )
url = '/processors/{}/{}/commands{}' . format ( self . _instance , self . _processor , command )
response = self . _client . post_proto ( url , data = req . SerializeToString ( ) )
proto = rest_pb2 . IssueCommandResponse ( )
proto . ParseFromString ( response . content )
return IssuedCommand ( proto , self ) |
def lwd ( self , astr_startPath , ** kwargs ) :
"""Return the cwd in treeRecurse compatible format .
: return : Return the cwd in treeRecurse compatible format .""" | if self . cd ( astr_startPath ) [ 'status' ] :
self . l_lwd . append ( self . cwd ( ) )
return { 'status' : True , 'cwd' : self . cwd ( ) } |
def get_output_original ( self ) :
"""Build the output the original way . Requires no third party libraries .""" | with open ( self . file , "r" ) as f :
temp = float ( f . read ( ) . strip ( ) ) / 1000
if self . dynamic_color :
perc = int ( self . percentage ( int ( temp ) , self . alert_temp ) )
if ( perc > 99 ) :
perc = 99
color = self . colors [ perc ]
else :
color = self . color if temp < self . alert_temp else self . alert_color
return { "full_text" : self . format . format ( temp = temp ) , "color" : color , } |
def get_package_data ( name , extlist ) :
"""Return data files for package * name * with extensions in * extlist *""" | flist = [ ]
# Workaround to replace os . path . relpath ( not available until Python 2.6 ) :
offset = len ( name ) + len ( os . pathsep )
for dirpath , _dirnames , filenames in os . walk ( name ) :
for fname in filenames :
if not fname . startswith ( '.' ) and osp . splitext ( fname ) [ 1 ] in extlist :
flist . append ( osp . join ( dirpath , fname ) [ offset : ] )
return flist |
def cleanup_codra_edus ( self ) :
"""Remove leading / trailing ' _ ! ' from CODRA EDUs and unescape its double quotes .""" | for leafpos in self . tree . treepositions ( 'leaves' ) :
edu_str = self . tree [ leafpos ]
edu_str = EDU_START_RE . sub ( "" , edu_str )
edu_str = TRIPLE_ESCAPE_RE . sub ( '"' , edu_str )
edu_str = EDU_END_RE . sub ( "" , edu_str )
self . tree [ leafpos ] = edu_str |
def drop_unused_terms ( self ) :
'''Returns
PriorFactory''' | self . term_doc_mat = self . term_doc_mat . remove_terms ( set ( self . term_doc_mat . get_terms ( ) ) - set ( self . priors . index ) )
self . _reindex_priors ( )
return self |
def get_self ( self ) :
"""GetSelf .
Read identity of the home tenant request user .
: rtype : : class : ` < IdentitySelf > < azure . devops . v5_0 . identity . models . IdentitySelf > `""" | response = self . _send ( http_method = 'GET' , location_id = '4bb02b5b-c120-4be2-b68e-21f7c50a4b82' , version = '5.0' )
return self . _deserialize ( 'IdentitySelf' , response ) |
def default ( self , obj ) : # pylint : disable = method - hidden
"""Use the default behavior unless the object to be encoded has a
` strftime ` attribute .""" | if hasattr ( obj , 'strftime' ) :
return obj . strftime ( "%Y-%m-%dT%H:%M:%SZ" )
elif hasattr ( obj , 'get_public_dict' ) :
return obj . get_public_dict ( )
else :
return json . JSONEncoder . default ( self , obj ) |
def transform ( odtfile , debug = False , parsable = False , outputdir = None ) :
"""Given an ODT file this returns a tuple containing
the cnxml , a dictionary of filename - > data , and a list of errors""" | # Store mapping of images extracted from the ODT file ( and their bits )
images = { }
# Log of Errors and Warnings generated
# For example , the text produced by XSLT should be :
# { ' level ' : ' WARNING ' ,
# ' msg ' : ' Headings without text between them are not allowed ' ,
# ' id ' : ' import - auto - id2376 ' }
# That way we can put a little * near all the cnxml where issues arose
errors = [ ]
zip = zipfile . ZipFile ( odtfile , 'r' )
content = zip . read ( 'content.xml' )
xml = etree . fromstring ( content )
def appendLog ( xslDoc ) :
if hasattr ( xslDoc , 'error_log' ) :
for entry in xslDoc . error_log : # Entries are of the form :
# { ' level ' : ' ERROR ' , ' id ' : ' id1234 ' , ' msg ' : ' Descriptive message ' }
text = entry . message
try :
dict = json . loads ( text )
errors . append ( dict )
except ValueError :
errors . append ( { u'level' : u'CRITICAL' , u'id' : u'(none)' , u'msg' : unicode ( text ) } )
def injectStyles ( xml ) : # HACK - need to find the object location from the manifest . . .
strStyles = zip . read ( 'styles.xml' )
parser = etree . XMLParser ( )
parser . feed ( strStyles )
stylesXml = parser . close ( )
for i , obj in enumerate ( STYLES_XPATH ( stylesXml ) ) :
xml . append ( obj )
return xml
# All MathML is stored in separate files " Object # / content . xml "
# This converter includes the MathML by looking up the file in the zip
def mathIncluder ( xml ) :
for i , obj in enumerate ( MATH_XPATH ( xml ) ) :
strMathPath = MATH_HREF_XPATH ( obj ) [ 0 ]
# Or obj . get ( ' { % s } href ' % XLINK _ NS )
if strMathPath [ 0 ] == '#' :
strMathPath = strMathPath [ 1 : ]
# Remove leading ' . / ' Zip doesn ' t like it
if strMathPath [ 0 ] == '.' :
strMathPath = strMathPath [ 2 : ]
# HACK - need to find the object location from the manifest . . .
strMathPath = os . path . join ( strMathPath , 'content.xml' )
strMath = zip . read ( strMathPath )
# parser = etree . XMLParser ( encoding = ' utf - 8 ' )
# parser . feed ( strMath )
# math = parser . close ( )
math = etree . parse ( StringIO ( strMath ) ) . getroot ( )
# Replace the reference to the Math with the actual MathML
obj . getparent ( ) . replace ( obj , math )
return xml
def imagePuller ( xml ) :
for i , obj in enumerate ( IMAGE_XPATH ( xml ) ) :
strPath = IMAGE_HREF_XPATH ( obj ) [ 0 ]
strName = IMAGE_NAME_XPATH ( obj ) [ 0 ]
fileNeedEnding = ( strName . find ( '.' ) == - 1 )
if fileNeedEnding :
strName = strName + strPath [ strPath . index ( '.' ) : ]
if strPath [ 0 ] == '#' :
strPath = strPath [ 1 : ]
# Remove leading ' . / ' Zip doesn ' t like it
if strPath [ 0 ] == '.' :
strPath = strPath [ 2 : ]
image = zip . read ( strPath )
images [ strName ] = image
# Later on , an XSL pass will convert the draw : frame to a c : image and
# set the @ src correctly
return xml
def drawPuller ( xml ) :
styles = DRAW_STYLES_XPATH ( xml )
empty_odg_dirname = os . path . join ( dirname , 'empty_odg_template' )
temp_dirname = tempfile . mkdtemp ( )
for i , obj in enumerate ( DRAW_XPATH ( xml ) ) : # Copy everything except content . xml from the empty ODG ( OOo Draw ) template into a new zipfile
odg_filename = DRAW_FILENAME_PREFIX + str ( i ) + '.odg'
png_filename = DRAW_FILENAME_PREFIX + str ( i ) + '.png'
# add PNG filename as attribute to parent node . The good thing is : The child ( obj ) will get lost ! : - )
parent = obj . getparent ( )
parent . attrib [ 'ooo_drawing' ] = png_filename
odg_zip = zipfile . ZipFile ( os . path . join ( temp_dirname , odg_filename ) , 'w' , zipfile . ZIP_DEFLATED )
for root , dirs , files in os . walk ( empty_odg_dirname ) :
for name in files :
if name not in ( 'content.xml' , 'styles.xml' ) : # copy everything inside ZIP except content . xml or styles . xml
sourcename = os . path . join ( root , name )
# http : / / stackoverflow . com / a / 1193171/756056
arcname = os . path . join ( root [ len ( empty_odg_dirname ) : ] , name )
# Path name inside the ZIP file , empty _ odg _ template is the root folder
odg_zip . write ( sourcename , arcname )
content = etree . parse ( os . path . join ( empty_odg_dirname , 'content.xml' ) )
# Inject content styles in empty OOo Draw content . xml
content_style_xpath = etree . XPath ( '/office:document-content/office:automatic-styles' , namespaces = NAMESPACES )
content_styles = content_style_xpath ( content )
for style in styles :
content_styles [ 0 ] . append ( deepcopy ( style ) )
# Inject drawing in empty OOo Draw content . xml
content_page_xpath = etree . XPath ( '/office:document-content/office:body/office:drawing/draw:page' , namespaces = NAMESPACES )
content_page = content_page_xpath ( content )
content_page [ 0 ] . append ( obj )
# write modified content . xml
odg_zip . writestr ( 'content.xml' , etree . tostring ( content , xml_declaration = True , encoding = 'UTF-8' ) )
# copy styles . xml from odt to odg without modification
styles_xml = zip . read ( 'styles.xml' )
odg_zip . writestr ( 'styles.xml' , styles_xml )
odg_zip . close ( )
# TODO : Better error handling in the future .
try : # convert every odg to png
command = '/usr/bin/soffice -headless -nologo -nofirststartwizard "macro:///Standard.Module1.SaveAsPNG(%s,%s)"' % ( os . path . join ( temp_dirname , odg_filename ) , os . path . join ( temp_dirname , png_filename ) )
os . system ( command )
# save every image to memory
image = open ( os . path . join ( temp_dirname , png_filename ) , 'r' ) . read ( )
images [ png_filename ] = image
if outputdir is not None :
shutil . copy ( os . path . join ( temp_dirname , odg_filename ) , os . path . join ( outputdir , odg_filename ) )
shutil . copy ( os . path . join ( temp_dirname , png_filename ) , os . path . join ( outputdir , png_filename ) )
except :
pass
# delete temporary directory
shutil . rmtree ( temp_dirname )
return xml
# Reparse after XSL because the RED - escape pass injects arbitrary XML
def redParser ( xml ) :
xsl = makeXsl ( 'pass1_odt2red-escape.xsl' )
result = xsl ( xml )
appendLog ( xsl )
try :
xml = etree . fromstring ( etree . tostring ( result ) )
except etree . XMLSyntaxError , e :
msg = str ( e )
xml = makeXsl ( 'pass1_odt2red-failed.xsl' ) ( xml , message = "'%s'" % msg . replace ( "'" , '"' ) )
xml = xml . getroot ( )
return xml
def replaceSymbols ( xml ) :
xmlstr = etree . tostring ( xml )
xmlstr = symbols . replace ( xmlstr )
return etree . fromstring ( xmlstr )
PIPELINE = [ drawPuller , # gets OOo Draw objects out of odt and generate odg ( OOo Draw ) files
replaceSymbols , injectStyles , # include the styles . xml file because it contains list numbering info
makeXsl ( 'pass2_odt-normalize.xsl' ) , # This needs to be done 2x to fix headings
makeXsl ( 'pass2_odt-normalize.xsl' ) , # In the worst case all headings are 9
# and need to be 1 . See ( testbed ) southwood _ _ Lesson _ 2 . doc
makeXsl ( 'pass2_odt-collapse-spans.xsl' ) , # Collapse adjacent spans ( for RED )
redParser , # makeXsl ( ' pass1 _ odt2red - escape . xsl ' ) ,
makeXsl ( 'pass4_odt-headers.xsl' ) , imagePuller , # Need to run before math because both have a < draw : image > ( see xpath )
mathIncluder , makeXsl ( 'pass7_odt2cnxml.xsl' ) , makeXsl ( 'pass8_cnxml-cleanup.xsl' ) , makeXsl ( 'pass8.5_cnxml-cleanup.xsl' ) , makeXsl ( 'pass9_id-generation.xsl' ) , makeXsl ( 'pass10_processing-instruction-logger.xsl' ) , ]
# " xml " variable gets replaced during each iteration
passNum = 0
for xslDoc in PIPELINE :
if debug :
errors . append ( "DEBUG: Starting pass %d" % passNum )
xml = xslDoc ( xml )
appendLog ( xslDoc )
if outputdir is not None :
writeXMLFile ( os . path . join ( outputdir , 'pass%d.xml' % passNum ) , xml )
passNum += 1
# In most cases ( EIP ) Invalid XML is preferable over valid but Escaped XML
if not parsable :
xml = ( makeXsl ( 'pass11_red-unescape.xsl' ) ) ( xml )
return ( xml , images , errors ) |
def download_file ( url , data_path = '.' , filename = None , size = None , chunk_size = 4096 , verbose = True ) :
"""Uses stream = True and a reasonable chunk size to be able to download large ( GB ) files over https""" | if filename is None :
filename = dropbox_basename ( url )
file_path = os . path . join ( data_path , filename )
if url . endswith ( '?dl=0' ) :
url = url [ : - 1 ] + '1'
# noninteractive download
if verbose :
tqdm_prog = tqdm
print ( 'requesting URL: {}' . format ( url ) )
else :
tqdm_prog = no_tqdm
r = requests . get ( url , stream = True , allow_redirects = True , timeout = 5 )
size = r . headers . get ( 'Content-Length' , None ) if size is None else size
print ( 'remote size: {}' . format ( size ) )
stat = path_status ( file_path )
print ( 'local size: {}' . format ( stat . get ( 'size' , None ) ) )
if stat [ 'type' ] == 'file' and stat [ 'size' ] == size : # TODO : check md5 or get the right size of remote file
r . close ( )
return file_path
print ( 'Downloading to {}' . format ( file_path ) )
with open ( file_path , 'wb' ) as f :
for chunk in r . iter_content ( chunk_size = chunk_size ) :
if chunk : # filter out keep - alive chunks
f . write ( chunk )
r . close ( )
return file_path |
def write_transparency ( selection ) :
"""writes transparency as rgba to ~ / . Xresources""" | global themefile , transparency , prefix
if themefile == "" :
return
lines = themefile . split ( '\n' )
for line in lines :
if 'background' in line . lower ( ) :
try :
background = line . split ( ':' ) [ 1 ] . replace ( ' ' , '' )
background = background . replace ( '\t' , '' )
break
except :
msg = ( 'Cannot determine background color from themefile. ' 'Defaulting to: #000000' )
print msg
background = '#000000'
break
else :
background = '#000000'
background = hex_to_rgb ( background )
fd , tmpfile = tempfile . mkstemp ( )
if exists ( XRESOURCES ) :
old = open ( XRESOURCES )
new = os . fdopen ( fd , 'w' )
for line in old :
lline = line . lower ( )
if 'depth' in lline :
continue
elif 'rgba' in lline :
continue
elif line == '\n' :
continue
else :
os . write ( fd , line )
os . write ( fd , '\n%s.depth:\t32' % prefix )
os . write ( fd , '\n%s.background:\trgba:%s/%s\n' % ( prefix , background , decimal_to_alpha ( transparency ) ) )
old . close ( )
new . close ( )
move ( tmpfile , XRESOURCES ) |
def _read_lnk ( tokens ) :
"""Read and return a tuple of the pred ' s lnk type and lnk value ,
if a pred lnk is specified .""" | # < FROM : TO > or < FROM # TO > or < TOK . . . > or < @ EDGE >
lnk = None
if tokens [ 0 ] == '<' :
tokens . popleft ( )
# we just checked this is a left angle
if tokens [ 0 ] == '>' :
pass
# empty < > brackets the same as no lnk specified
# edge lnk : [ ' @ ' , EDGE , . . . ]
elif tokens [ 0 ] == '@' :
tokens . popleft ( )
# remove the @
lnk = Lnk . edge ( tokens . popleft ( ) )
# edge lnks only have one number
# character span lnk : [ FROM , ' : ' , TO , . . . ]
elif tokens [ 1 ] == ':' :
lnk = Lnk . charspan ( tokens . popleft ( ) , tokens [ 1 ] )
tokens . popleft ( )
# this should be the colon
tokens . popleft ( )
# and this is the cto
# chart vertex range lnk : [ FROM , ' # ' , TO , . . . ]
elif tokens [ 1 ] == '#' :
lnk = Lnk . chartspan ( tokens . popleft ( ) , tokens [ 1 ] )
tokens . popleft ( )
# this should be the hash
tokens . popleft ( )
# and this is the to vertex
# tokens lnk : [ ( TOK , ) + . . . ]
else :
lnkdata = [ ]
while tokens [ 0 ] != '>' :
lnkdata . append ( int ( tokens . popleft ( ) ) )
lnk = Lnk . tokens ( lnkdata )
_read_literals ( tokens , '>' )
return lnk |
def authenticate ( self , credentials ) :
"""Log in to the server and store these credentials in ` authset ` .
Can raise ConnectionFailure or OperationFailure .
: Parameters :
- ` credentials ` : A MongoCredential .""" | auth . authenticate ( credentials , self )
self . authset . add ( credentials ) |
def network_interface_create_or_update ( name , ip_configurations , subnet , virtual_network , resource_group , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Create or update a network interface within a specified resource group .
: param name : The name of the network interface to create .
: param ip _ configurations : A list of dictionaries representing valid
NetworkInterfaceIPConfiguration objects . The ' name ' key is required at
minimum . At least one IP Configuration must be present .
: param subnet : The name of the subnet assigned to the network interface .
: param virtual _ network : The name of the virtual network assigned to the subnet .
: param resource _ group : The resource group name assigned to the
virtual network .
CLI Example :
. . code - block : : bash
salt - call azurearm _ network . network _ interface _ create _ or _ update test - iface0 [ { ' name ' : ' testipconfig1 ' } ] testsubnet testnet testgroup''' | if 'location' not in kwargs :
rg_props = __salt__ [ 'azurearm_resource.resource_group_get' ] ( resource_group , ** kwargs )
if 'error' in rg_props :
log . error ( 'Unable to determine location from resource group specified.' )
return False
kwargs [ 'location' ] = rg_props [ 'location' ]
netconn = __utils__ [ 'azurearm.get_client' ] ( 'network' , ** kwargs )
# Use NSG name to link to the ID of an existing NSG .
if kwargs . get ( 'network_security_group' ) :
nsg = network_security_group_get ( name = kwargs [ 'network_security_group' ] , resource_group = resource_group , ** kwargs )
if 'error' not in nsg :
kwargs [ 'network_security_group' ] = { 'id' : str ( nsg [ 'id' ] ) }
# Use VM name to link to the ID of an existing VM .
if kwargs . get ( 'virtual_machine' ) :
vm_instance = __salt__ [ 'azurearm_compute.virtual_machine_get' ] ( name = kwargs [ 'virtual_machine' ] , resource_group = resource_group , ** kwargs )
if 'error' not in vm_instance :
kwargs [ 'virtual_machine' ] = { 'id' : str ( vm_instance [ 'id' ] ) }
# Loop through IP Configurations and build each dictionary to pass to model creation .
if isinstance ( ip_configurations , list ) :
subnet = subnet_get ( name = subnet , virtual_network = virtual_network , resource_group = resource_group , ** kwargs )
if 'error' not in subnet :
subnet = { 'id' : str ( subnet [ 'id' ] ) }
for ipconfig in ip_configurations :
if 'name' in ipconfig :
ipconfig [ 'subnet' ] = subnet
if isinstance ( ipconfig . get ( 'application_gateway_backend_address_pools' ) , list ) : # TODO : Add ID lookup for referenced object names
pass
if isinstance ( ipconfig . get ( 'load_balancer_backend_address_pools' ) , list ) : # TODO : Add ID lookup for referenced object names
pass
if isinstance ( ipconfig . get ( 'load_balancer_inbound_nat_rules' ) , list ) : # TODO : Add ID lookup for referenced object names
pass
if ipconfig . get ( 'public_ip_address' ) :
pub_ip = public_ip_address_get ( name = ipconfig [ 'public_ip_address' ] , resource_group = resource_group , ** kwargs )
if 'error' not in pub_ip :
ipconfig [ 'public_ip_address' ] = { 'id' : str ( pub_ip [ 'id' ] ) }
try :
nicmodel = __utils__ [ 'azurearm.create_object_model' ] ( 'network' , 'NetworkInterface' , ip_configurations = ip_configurations , ** kwargs )
except TypeError as exc :
result = { 'error' : 'The object model could not be built. ({0})' . format ( str ( exc ) ) }
return result
try :
interface = netconn . network_interfaces . create_or_update ( resource_group_name = resource_group , network_interface_name = name , parameters = nicmodel )
interface . wait ( )
nic_result = interface . result ( )
result = nic_result . as_dict ( )
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'network' , str ( exc ) , ** kwargs )
result = { 'error' : str ( exc ) }
except SerializationError as exc :
result = { 'error' : 'The object model could not be parsed. ({0})' . format ( str ( exc ) ) }
return result |
def install ( cls , handler , fmt , programname = None , style = DEFAULT_FORMAT_STYLE ) :
"""Install the : class : ` ProgramNameFilter ` ( only if needed ) .
: param fmt : The log format string to check for ` ` % ( programname ) ` ` .
: param style : One of the characters ` ` % ` ` , ` ` { ` ` or ` ` $ ` ` ( defaults to
: data : ` DEFAULT _ FORMAT _ STYLE ` ) .
: param handler : The logging handler on which to install the filter .
: param programname : Refer to : func : ` _ _ init _ _ ( ) ` .
If ` fmt ` is given the filter will only be installed if ` fmt ` uses the
` ` programname ` ` field . If ` fmt ` is not given the filter is installed
unconditionally .""" | if fmt :
parser = FormatStringParser ( style = style )
if not parser . contains_field ( fmt , 'programname' ) :
return
handler . addFilter ( cls ( programname ) ) |
def all ( self ) :
"""Return all entries
: rtype : list ( dict )""" | attribute = self . _attr [ 0 ]
return self . profile . data . get ( attribute , [ ] ) |
def to_xml ( self , tag_name = "buyer" ) :
'''Returns an XMLi representation of the object .
@ param tag _ name : str Tag name
@ return : Element''' | for n , v in { "name" : self . name , "address" : self . address } . items ( ) :
if is_empty_or_none ( v ) :
raise ValueError ( "'%s' attribute cannot be empty or None." % n )
if self . __require_id and is_empty_or_none ( self . identifier ) :
raise ValueError ( "identifier attribute cannot be empty or None." )
doc = Document ( )
root = doc . createElement ( tag_name )
self . _create_text_node ( root , "id" , self . identifier )
self . _create_text_node ( root , "name" , self . name , True )
if self . phone :
self . _create_text_node ( root , "phone" , self . phone , True )
root . appendChild ( self . address . to_xml ( ) )
return root |
def _fetch_stock_data ( self , stock_list ) :
"""获取股票信息""" | pool = multiprocessing . pool . ThreadPool ( len ( stock_list ) )
try :
res = pool . map ( self . get_stocks_by_range , stock_list )
finally :
pool . close ( )
return [ d for d in res if d is not None ] |
def get_country ( similar = False , ** kwargs ) :
"""Get a country for pycountry""" | result_country = None
try :
if similar :
for country in countries :
if kwargs . get ( 'name' , '' ) in country . name :
result_country = country
break
else :
result_country = countries . get ( ** kwargs )
except Exception as ex :
msg = ( 'Country not found in pycountry with params introduced' ' - {}' . format ( ex ) )
logger . error ( msg , params = kwargs )
return result_country |
def init_from_datastore ( self ) :
"""Initializes batches by reading from the datastore .""" | self . _data = { }
for entity in self . _datastore_client . query_fetch ( kind = self . _entity_kind_batches ) :
batch_id = entity . key . flat_path [ - 1 ]
self . _data [ batch_id ] = dict ( entity )
self . _data [ batch_id ] [ 'images' ] = { }
for entity in self . _datastore_client . query_fetch ( kind = self . _entity_kind_images ) :
batch_id = entity . key . flat_path [ - 3 ]
image_id = entity . key . flat_path [ - 1 ]
self . _data [ batch_id ] [ 'images' ] [ image_id ] = dict ( entity ) |
def get_move_data ( move ) :
"""Return the index number for the given move name . Check moves . json in the same directory .""" | srcpath = path . dirname ( __file__ )
try :
f = open ( path . join ( srcpath , 'moves.json' ) , 'r' )
except IOError :
get_moves ( )
f = open ( path . join ( srcpath , 'moves.json' ) , 'r' )
finally :
with f :
return json . load ( f ) [ move ] |
def set_readable_web_pdf ( self , value ) :
'''setter''' | if isinstance ( value , ReadableWebPDF ) is False and value is not None :
raise TypeError ( "The type of __readable_web_pdf must be ReadableWebPDF." )
self . __readable_web_pdf = value |
def getConfiguration ( self ) :
"""Get the current configuration number for this device .""" | configuration = c_int ( )
mayRaiseUSBError ( libusb1 . libusb_get_configuration ( self . __handle , byref ( configuration ) , ) )
return configuration . value |
def svg_data_uri ( self , xmldecl = False , encode_minimal = False , omit_charset = False , nl = False , ** kw ) :
"""Converts the QR Code into a SVG data URI .
The XML declaration is omitted by default ( set ` ` xmldecl ` ` to ` ` True ` `
to enable it ) , further the newline is omitted by default ( set ` ` nl ` ` to
` ` True ` ` to enable it ) .
Aside from the missing ` ` out ` ` parameter and the different ` ` xmldecl ` `
and ` ` nl ` ` default values and the additional parameter ` ` encode _ minimal ` `
and ` ` omit _ charset ` ` this method uses the same parameters as the
usual SVG serializer .
: param bool xmldecl : Indicates if the XML declaration should be
serialized ( default : ` ` False ` ` )
: param bool encode _ minimal : Indicates if the resulting data URI should
use minimal percent encoding ( disabled by default ) .
: param bool omit _ charset : Indicates if the ` ` ; charset = . . . ` ` should be omitted
( disabled by default )
: rtype : str""" | return writers . as_svg_data_uri ( self . matrix , self . _version , xmldecl = xmldecl , nl = nl , encode_minimal = encode_minimal , omit_charset = omit_charset , ** kw ) |
def match_via_correlation ( image , template , raw_tolerance = 1 , normed_tolerance = 0.9 ) :
"""Matchihng algorithm based on normalised cross correlation .
Using this matching prevents false positives occuring for bright patches in the image""" | h , w = image . shape
th , tw = template . shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve ( image , template [ : : - 1 , : : - 1 ] )
# trim the returned image , fftconvolve returns an image of width : ( Temp _ w - 1 ) + Im _ w + ( Temp _ w - 1 ) , likewise height
correlation = correlation [ th - 1 : h , tw - 1 : w ]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions ( image , template , correlation , raw_tolerance = raw_tolerance )
# bright spots in images can lead to false positivies - the normalisation carried out here eliminates those
results = normalise_correlation ( match_position_dict , correlation , template , normed_tolerance = normed_tolerance )
return results |
def register_button_handler ( self , button_handler , buttons ) :
"""Register a handler function which will be called when a button is pressed
: param button _ handler :
A function which will be called when any of the specified buttons are pressed . The
function is called with the Button that was pressed as the sole argument .
: param [ Button ] buttons :
A list or one or more buttons which should trigger the handler when pressed . Buttons
are specified as : class : ` approxeng . input . Button ` instances , in general controller implementations will
expose these as constants such as SixAxis . BUTTON _ CIRCLE . A single Button can be specified if only one button
binding is required .
: return :
A no - arg function which can be used to remove this registration""" | if not isinstance ( buttons , list ) :
buttons = [ buttons ]
for button in buttons :
state = self . buttons . get ( button )
if state is not None :
state . button_handlers . append ( button_handler )
def remove ( ) :
for button_to_remove in buttons :
state_to_remove = self . buttons . get ( button_to_remove )
if state_to_remove is not None :
state_to_remove . button_handlers . remove ( button_handler )
return remove |
def get_probs ( self , sampler = None , rerun = None , store = True ) :
"""Get probabilities .""" | if rerun is None :
rerun = sampler is not None
if self . _probs is not None and not rerun :
return self . _probs
if sampler is None :
sampler = self . vqe . sampler
probs = sampler ( self . circuit , range ( self . circuit . n_qubits ) )
if store :
self . _probs = probs
return probs |
def get_next ( self ) :
"""Get the billing cycle after this one . May return None""" | return BillingCycle . objects . filter ( date_range__gt = self . date_range ) . order_by ( 'date_range' ) . first ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.