idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
242,800
def flush ( self ) : for elem in self : if elem . id [ 0 ] != scoop . worker : elem . _delete ( ) self . socket . sendFuture ( elem ) self . ready . clear ( ) self . movable . clear ( )
Empty the local queue and send its elements to be executed remotely .
57
13
242,801
def updateQueue ( self ) : for future in self . socket . recvFuture ( ) : if future . _ended ( ) : # If the answer is coming back, update its entry try : thisFuture = scoop . _control . futureDict [ future . id ] except KeyError : # Already received? scoop . logger . warn ( '{0}: Received an unexpected future: ' '{1}' . format ( scoop . worker , future . id ) ) continue thisFuture . resultValue = future . resultValue thisFuture . exceptionValue = future . exceptionValue thisFuture . executor = future . executor thisFuture . isDone = future . isDone # Execute standard callbacks here (on parent) thisFuture . _execute_callbacks ( CallbackType . standard ) self . append ( thisFuture ) future . _delete ( ) elif future . id not in scoop . _control . futureDict : scoop . _control . futureDict [ future . id ] = future self . append ( scoop . _control . futureDict [ future . id ] ) else : self . append ( scoop . _control . futureDict [ future . id ] )
Process inbound communication buffer . Updates the local queue with elements from the broker .
247
16
242,802
def sendResult ( self , future ) : # Greenlets cannot be pickled future . greenlet = None assert future . _ended ( ) , "The results are not valid" self . socket . sendResult ( future )
Send back results to broker for distribution to parent task .
46
11
242,803
def shutdown ( self ) : self . socket . shutdown ( ) if scoop : if scoop . DEBUG : from scoop import _debug _debug . writeWorkerDebug ( scoop . _control . debug_stats , scoop . _control . QueueLength , )
Shutdown the ressources used by the queue
53
10
242,804
def redirectSTDOUTtoDebugFile ( ) : import sys kwargs = { } if sys . version_info >= ( 3 , ) : kwargs [ "encoding" ] = "utf8" sys . stdout = open ( os . path . join ( getDebugDirectory ( ) , "{0}.stdout" . format ( getDebugIdentifier ( ) ) , ) , "w" , 1 , # Buffering by line * * kwargs ) sys . stderr = open ( os . path . join ( getDebugDirectory ( ) , "{0}.stderr" . format ( getDebugIdentifier ( ) ) , ) , "w" , 1 , # Buffering by line * * kwargs )
Redirects the stdout and stderr of the current process to a file .
155
18
242,805
def writeWorkerDebug ( debugStats , queueLength , path_suffix = "" ) : createDirectory ( path_suffix ) origin_prefix = "origin-" if scoop . IS_ORIGIN else "" statsFilename = os . path . join ( getDebugDirectory ( ) , path_suffix , "{1}worker-{0}-STATS" . format ( getDebugIdentifier ( ) , origin_prefix ) ) lengthFilename = os . path . join ( getDebugDirectory ( ) , path_suffix , "{1}worker-{0}-QUEUE" . format ( getDebugIdentifier ( ) , origin_prefix ) ) with open ( statsFilename , 'wb' ) as f : pickle . dump ( debugStats , f ) with open ( lengthFilename , 'wb' ) as f : pickle . dump ( queueLength , f )
Serialize the execution data using pickle and writes it into the debug directory .
186
16
242,806
def main ( ) : # Generate a argparse parser and parse the command-line arguments parser = makeParser ( ) args = parser . parse_args ( ) # Get a list of resources to launch worker(s) on hosts = utils . getHosts ( args . hostfile , args . hosts ) if args . n : n = args . n else : n = utils . getWorkerQte ( hosts ) assert n >= 0 , ( "Scoop couldn't determine the number of worker to start.\n" "Use the '-n' flag to set it manually." ) if not args . external_hostname : args . external_hostname = [ utils . externalHostname ( hosts ) ] # Launch SCOOP thisScoopApp = ScoopApp ( hosts , n , args . b , args . verbose if not args . quiet else 0 , args . python_interpreter , args . external_hostname [ 0 ] , args . executable , args . args , args . tunnel , args . path , args . debug , args . nice , utils . getEnv ( ) , args . profile , args . pythonpath [ 0 ] , args . prolog [ 0 ] , args . backend , args . rsh , args . ssh_executable ) rootTaskExitCode = False interruptPreventer = Thread ( target = thisScoopApp . close ) try : rootTaskExitCode = thisScoopApp . run ( ) except Exception as e : logging . error ( 'Error while launching SCOOP subprocesses:' ) logging . error ( traceback . format_exc ( ) ) rootTaskExitCode = - 1 finally : # This should not be interrupted (ie. by a KeyboadInterrupt) # The only cross-platform way to do it I found was by using a thread. interruptPreventer . start ( ) interruptPreventer . join ( ) # Exit with the proper exit code if rootTaskExitCode : sys . exit ( rootTaskExitCode )
Execution of the SCOOP module . Parses its command - line arguments and launch needed resources .
431
21
242,807
def initLogging ( self ) : verbose_levels = { 0 : logging . WARNING , 1 : logging . INFO , 2 : logging . DEBUG , } logging . basicConfig ( level = verbose_levels [ self . verbose ] , format = "[%(asctime)-15s] %(module)-9s %(levelname)-7s %(message)s" ) return logging . getLogger ( self . __class__ . __name__ )
Configures the logger .
100
5
242,808
def divideHosts ( self , hosts , qty ) : maximumWorkers = sum ( host [ 1 ] for host in hosts ) # If specified amount of workers is greater than sum of each specified. if qty > maximumWorkers : index = 0 while qty > maximumWorkers : hosts [ index ] = ( hosts [ index ] [ 0 ] , hosts [ index ] [ 1 ] + 1 ) index = ( index + 1 ) % len ( hosts ) maximumWorkers += 1 # If specified amount of workers if lower than sum of each specified. elif qty < maximumWorkers : while qty < maximumWorkers : maximumWorkers -= hosts [ - 1 ] [ 1 ] if qty > maximumWorkers : hosts [ - 1 ] = ( hosts [ - 1 ] [ 0 ] , qty - maximumWorkers ) maximumWorkers += hosts [ - 1 ] [ 1 ] else : del hosts [ - 1 ] # Checking if the broker if externally routable if self . externalHostname in utils . loopbackReferences and len ( hosts ) > 1 and not self . tunnel : raise Exception ( "\n" "Could not find route from external worker to the " "broker: Unresolvable hostname or IP address.\n " "Please specify your externally routable hostname " "or IP using the --external-hostname parameter or " "use the --tunnel flag." ) return hosts
Divide processes among hosts .
299
6
242,809
def showHostDivision ( self , headless ) : scoop . logger . info ( 'Worker d--istribution: ' ) for worker , number in self . worker_hosts : first_worker = ( worker == self . worker_hosts [ 0 ] [ 0 ] ) scoop . logger . info ( ' {0}:\t{1} {2}' . format ( worker , number - 1 if first_worker or headless else str ( number ) , "+ origin" if first_worker or headless else "" , ) )
Show the worker distribution over the hosts .
115
8
242,810
def setWorkerInfo ( self , hostname , workerAmount , origin ) : scoop . logger . debug ( 'Initialising {0}{1} worker {2} [{3}].' . format ( "local" if hostname in utils . localHostnames else "remote" , " origin" if origin else "" , self . workersLeft , hostname , ) ) add_args , add_kwargs = self . _setWorker_args ( origin ) self . workers [ - 1 ] . setWorker ( * add_args , * * add_kwargs ) self . workers [ - 1 ] . setWorkerAmount ( workerAmount )
Sets the worker information for the current host .
140
10
242,811
def close ( self ) : # Give time to flush data if debug was on if self . debug : time . sleep ( 10 ) # Terminate workers for host in self . workers : host . close ( ) # Terminate the brokers for broker in self . brokers : try : broker . close ( ) except AttributeError : # Broker was not started (probably mislaunched) pass scoop . logger . info ( 'Finished cleaning spawned subprocesses.' )
Subprocess cleanup .
96
4
242,812
def processConfig ( self , worker_config ) : self . config [ 'headless' ] |= worker_config . get ( "headless" , False ) if self . config [ 'headless' ] : # Launch discovery process if not self . discovery_thread : self . discovery_thread = discovery . Advertise ( port = "," . join ( str ( a ) for a in self . getPorts ( ) ) , )
Update the pool configuration with a worker configuration .
94
9
242,813
def main ( self ) : if self . args is None : self . parse ( ) self . log = utils . initLogging ( self . verbose ) # Change to the desired directory if self . args . workingDirectory : os . chdir ( self . args . workingDirectory ) if not self . args . brokerHostname : self . log . info ( "Discovering SCOOP Brokers on network..." ) pools = discovery . Seek ( ) if not pools : self . log . error ( "Could not find a SCOOP Broker broadcast." ) sys . exit ( - 1 ) self . log . info ( "Found a broker named {name} on {host} port " "{ports}" . format ( name = pools [ 0 ] . name , host = pools [ 0 ] . host , ports = pools [ 0 ] . ports , ) ) self . args . brokerHostname = pools [ 0 ] . host self . args . taskPort = pools [ 0 ] . ports [ 0 ] self . args . metaPort = pools [ 0 ] . ports [ 0 ] self . log . debug ( "Using following addresses:\n{brokerAddress}\n" "{metaAddress}" . format ( brokerAddress = self . args . brokerAddress , metaAddress = self . args . metaAddress , ) ) self . args . origin = True self . setScoop ( ) self . run ( )
Bootstrap an arbitrary script . If no agruments were passed use discovery module to search and connect to a broker .
294
24
242,814
def makeParser ( self ) : self . parser = argparse . ArgumentParser ( description = 'Starts the executable.' , prog = ( "{0} -m scoop.bootstrap" ) . format ( sys . executable ) ) self . parser . add_argument ( '--origin' , help = "To specify that the worker is the origin" , action = 'store_true' ) self . parser . add_argument ( '--brokerHostname' , help = "The routable hostname of a broker" , default = "" ) self . parser . add_argument ( '--externalBrokerHostname' , help = "Externally routable hostname of local " "worker" , default = "" ) self . parser . add_argument ( '--taskPort' , help = "The port of the broker task socket" , type = int ) self . parser . add_argument ( '--metaPort' , help = "The port of the broker meta socket" , type = int ) self . parser . add_argument ( '--size' , help = "The size of the worker pool" , type = int , default = 1 ) self . parser . add_argument ( '--nice' , help = "Adjust the niceness of the process" , type = int , default = 0 ) self . parser . add_argument ( '--debug' , help = "Activate the debug" , action = 'store_true' ) self . parser . add_argument ( '--profile' , help = "Activate the profiler" , action = 'store_true' ) self . parser . add_argument ( '--workingDirectory' , help = "Set the working directory for the " "execution" , default = os . path . expanduser ( "~" ) ) self . parser . add_argument ( '--backend' , help = "Choice of communication backend" , choices = [ 'ZMQ' , 'TCP' ] , default = 'ZMQ' ) self . parser . add_argument ( 'executable' , nargs = '?' , help = 'The executable to start with scoop' ) self . parser . add_argument ( 'args' , nargs = argparse . REMAINDER , help = 'The arguments to pass to the executable' , default = [ ] ) self . parser . add_argument ( '--verbose' , '-v' , action = 'count' , help = ( "Verbosity level of this launch script" "(-vv for more)" ) , default = 0 )
Generate the argparse parser object containing the bootloader accepted parameters
545
13
242,815
def parse ( self ) : if self . parser is None : self . makeParser ( ) self . args = self . parser . parse_args ( ) self . verbose = self . args . verbose
Generate a argparse parser and parse the command - line arguments
43
13
242,816
def setScoop ( self ) : scoop . IS_RUNNING = True scoop . IS_ORIGIN = self . args . origin scoop . BROKER = BrokerInfo ( self . args . brokerHostname , self . args . taskPort , self . args . metaPort , self . args . externalBrokerHostname if self . args . externalBrokerHostname else self . args . brokerHostname , ) scoop . SIZE = self . args . size scoop . DEBUG = self . args . debug scoop . MAIN_MODULE = self . args . executable scoop . CONFIGURATION = { 'headless' : not bool ( self . args . executable ) , 'backend' : self . args . backend , } scoop . WORKING_DIRECTORY = self . args . workingDirectory scoop . logger = self . log if self . args . nice : if not psutil : scoop . logger . error ( "psutil not installed." ) raise ImportError ( "psutil is needed for nice functionnality." ) p = psutil . Process ( os . getpid ( ) ) p . set_nice ( self . args . nice ) if scoop . DEBUG or self . args . profile : from scoop import _debug if scoop . DEBUG : _debug . createDirectory ( )
Setup the SCOOP constants .
275
7
242,817
def myFunc ( parameter ) : print ( 'Hello World from {0}!' . format ( scoop . worker ) ) # It is possible to get a constant anywhere print ( shared . getConst ( 'myVar' ) [ 2 ] ) # Parameters are handled as usual return parameter + 1
This function will be executed on the remote host even if it was not available at launch .
61
18
242,818
def sendResult ( self , future ) : future = copy . copy ( future ) # Remove the (now) extraneous elements from future class future . callable = future . args = future . kargs = future . greenlet = None if not future . sendResultBack : # Don't reply back the result if it isn't asked future . resultValue = None self . _sendReply ( future . id . worker , pickle . dumps ( future , pickle . HIGHEST_PROTOCOL , ) , )
Send a terminated future back to its parent .
108
9
242,819
def getSize ( string ) : try : # We open the web page with urllib . request . urlopen ( string , None , 1 ) as f : return sum ( len ( line ) for line in f ) except ( urllib . error . URLError , socket . timeout ) as e : return 0
This functions opens a web sites and then calculate the total size of the page in bytes . This is for the sake of the example . Do not use this technique in real code as it is not a very bright way to do this .
68
47
242,820
def getValue ( words ) : value = 0 for word in words : for letter in word : # shared.getConst will evaluate to the dictionary broadcasted by # the root Future value += shared . getConst ( 'lettersValue' ) [ letter ] return value
Computes the sum of the values of the words .
54
11
242,821
def _run_module_code ( code , init_globals = None , mod_name = None , mod_fname = None , mod_loader = None , pkg_name = None ) : with _ModifiedArgv0 ( mod_fname ) : with _TempModule ( mod_name ) as temp_module : mod_globals = temp_module . module . __dict__ _run_code ( code , mod_globals , init_globals , mod_name , mod_fname , mod_loader , pkg_name ) # Copy the globals of the temporary module, as they # may be cleared when the temporary module goes away return mod_globals . copy ( )
Helper to run code in new namespace with sys modified
157
10
242,822
def run_module ( mod_name , init_globals = None , run_name = None , alter_sys = False ) : mod_name , loader , code , fname = _get_module_details ( mod_name ) if run_name is None : run_name = mod_name pkg_name = mod_name . rpartition ( '.' ) [ 0 ] if alter_sys : return _run_module_code ( code , init_globals , run_name , fname , loader , pkg_name ) else : # Leave the sys module alone return _run_code ( code , { } , init_globals , run_name , fname , loader , pkg_name )
Execute a module s code without importing it
159
9
242,823
def _get_importer ( path_name ) : cache = sys . path_importer_cache try : importer = cache [ path_name ] except KeyError : # Not yet cached. Flag as using the # standard machinery until we finish # checking the hooks cache [ path_name ] = None for hook in sys . path_hooks : try : importer = hook ( path_name ) break except ImportError : pass else : # The following check looks a bit odd. The trick is that # NullImporter throws ImportError if the supplied path is a # *valid* directory entry (and hence able to be handled # by the standard import machinery) try : importer = imp . NullImporter ( path_name ) except ImportError : return None cache [ path_name ] = importer return importer
Python version of PyImport_GetImporter C API function
172
12
242,824
def run_path ( path_name , init_globals = None , run_name = None ) : if run_name is None : run_name = "<run_path>" importer = _get_importer ( path_name ) if isinstance ( importer , imp . NullImporter ) : # Not a valid sys.path entry, so run the code directly # execfile() doesn't help as we want to allow compiled files code = _get_code_from_file ( path_name ) return _run_module_code ( code , init_globals , run_name , path_name ) else : # Importer is defined for path, so add it to # the start of sys.path sys . path . insert ( 0 , path_name ) try : # Here's where things are a little different from the run_module # case. There, we only had to replace the module in sys while the # code was running and doing so was somewhat optional. Here, we # have no choice and we have to remove it even while we read the # code. If we don't do this, a __loader__ attribute in the # existing __main__ module may prevent location of the new module. main_name = "__main__" saved_main = sys . modules [ main_name ] del sys . modules [ main_name ] try : mod_name , loader , code , fname = _get_main_module_details ( ) finally : sys . modules [ main_name ] = saved_main pkg_name = "" with _ModifiedArgv0 ( path_name ) : with _TempModule ( run_name ) as temp_module : mod_globals = temp_module . module . __dict__ return _run_code ( code , mod_globals , init_globals , run_name , fname , loader , pkg_name ) . copy ( ) finally : try : sys . path . remove ( path_name ) except ValueError : pass
Execute code located at the specified filesystem location
431
9
242,825
def maxTreeDepthDivide ( rootValue , currentDepth = 0 , parallelLevel = 2 ) : thisRoot = shared . getConst ( 'myTree' ) . search ( rootValue ) if currentDepth >= parallelLevel : return thisRoot . maxDepth ( currentDepth ) else : # Base case if not any ( [ thisRoot . left , thisRoot . right ] ) : return currentDepth if not all ( [ thisRoot . left , thisRoot . right ] ) : return thisRoot . maxDepth ( currentDepth ) # Parallel recursion return max ( futures . map ( maxTreeDepthDivide , [ thisRoot . left . payload , thisRoot . right . payload , ] , cycle ( [ currentDepth + 1 ] ) , cycle ( [ parallelLevel ] ) , ) )
Finds a tree node that represents rootValue and computes the max depth of this tree branch . This function will emit new futures until currentDepth = parallelLevel
163
32
242,826
def insert ( self , value ) : if not self . payload or value == self . payload : self . payload = value else : if value <= self . payload : if self . left : self . left . insert ( value ) else : self . left = BinaryTreeNode ( value ) else : if self . right : self . right . insert ( value ) else : self . right = BinaryTreeNode ( value )
Insert a value in the tree
86
6
242,827
def maxDepth ( self , currentDepth = 0 ) : if not any ( ( self . left , self . right ) ) : return currentDepth result = 0 for child in ( self . left , self . right ) : if child : result = max ( result , child . maxDepth ( currentDepth + 1 ) ) return result
Compute the depth of the longest branch of the tree
68
11
242,828
def search ( self , value ) : if self . payload == value : return self else : if value <= self . payload : if self . left : return self . left . search ( value ) else : if self . right : return self . right . search ( value ) return None
Find an element in the tree
58
6
242,829
def createZMQSocket ( self , sock_type ) : sock = self . ZMQcontext . socket ( sock_type ) sock . setsockopt ( zmq . LINGER , LINGER_TIME ) sock . setsockopt ( zmq . IPV4ONLY , 0 ) # Remove message dropping sock . setsockopt ( zmq . SNDHWM , 0 ) sock . setsockopt ( zmq . RCVHWM , 0 ) try : sock . setsockopt ( zmq . IMMEDIATE , 1 ) except : # This parameter was recently added by new libzmq versions pass # Don't accept unroutable messages if sock_type == zmq . ROUTER : sock . setsockopt ( zmq . ROUTER_MANDATORY , 1 ) return sock
Create a socket of the given sock_type and deactivate message dropping
183
14
242,830
def _reportFutures ( self ) : try : while True : time . sleep ( scoop . TIME_BETWEEN_STATUS_REPORTS ) fids = set ( x . id for x in scoop . _control . execQueue . movable ) fids . update ( set ( x . id for x in scoop . _control . execQueue . ready ) ) fids . update ( set ( x . id for x in scoop . _control . execQueue . inprogress ) ) self . socket . send_multipart ( [ STATUS_UPDATE , pickle . dumps ( fids ) , ] ) except AttributeError : # The process is being shut down. pass
Sends futures status updates to broker at intervals of scoop . TIME_BETWEEN_STATUS_REPORTS seconds . Is intended to be run by a separate thread .
146
36
242,831
def _sendReply ( self , destination , fid , * args ) : # Try to send the result directly to its parent self . addPeer ( destination ) try : self . direct_socket . send_multipart ( [ destination , REPLY , ] + list ( args ) , flags = zmq . NOBLOCK ) except zmq . error . ZMQError as e : # Fallback on Broker routing if no direct connection possible scoop . logger . debug ( "{0}: Could not send result directly to peer {1}, routing through " "broker." . format ( scoop . worker , destination ) ) self . socket . send_multipart ( [ REPLY , ] + list ( args ) + [ destination , ] ) self . socket . send_multipart ( [ STATUS_DONE , fid , ] )
Send a REPLY directly to its destination . If it doesn t work launch it back to the broker .
178
21
242,832
def _startup ( rootFuture , * args , * * kargs ) : import greenlet global _controller _controller = greenlet . greenlet ( control . runController ) try : result = _controller . switch ( rootFuture , * args , * * kargs ) except scoop . _comm . Shutdown : result = None control . execQueue . shutdown ( ) return result
Initializes the SCOOP environment .
78
8
242,833
def _recursiveReduce ( mapFunc , reductionFunc , scan , * iterables ) : if iterables : half = min ( len ( x ) // 2 for x in iterables ) data_left = [ list ( x ) [ : half ] for x in iterables ] data_right = [ list ( x ) [ half : ] for x in iterables ] else : data_left = data_right = [ [ ] ] # Submit the left and right parts of the reduction out_futures = [ None , None ] out_results = [ None , None ] for index , data in enumerate ( [ data_left , data_right ] ) : if any ( len ( x ) <= 1 for x in data ) : out_results [ index ] = mapFunc ( * list ( zip ( * data ) ) [ 0 ] ) else : out_futures [ index ] = submit ( _recursiveReduce , mapFunc , reductionFunc , scan , * data ) # Wait for the results for index , future in enumerate ( out_futures ) : if future : out_results [ index ] = future . result ( ) # Apply a scan if needed if scan : last_results = copy . copy ( out_results ) if type ( out_results [ 0 ] ) is not list : out_results [ 0 ] = [ out_results [ 0 ] ] else : last_results [ 0 ] = out_results [ 0 ] [ - 1 ] if type ( out_results [ 1 ] ) is list : out_results [ 0 ] . extend ( out_results [ 1 ] [ : - 1 ] ) last_results [ 1 ] = out_results [ 1 ] [ - 1 ] out_results [ 0 ] . append ( reductionFunc ( * last_results ) ) return out_results [ 0 ] return reductionFunc ( * out_results )
Generates the recursive reduction tree . Used by mapReduce .
405
13
242,834
def _createFuture ( func , * args , * * kwargs ) : assert callable ( func ) , ( "The provided func parameter is not a callable." ) if scoop . IS_ORIGIN and "SCOOP_WORKER" not in sys . modules : sys . modules [ "SCOOP_WORKER" ] = sys . modules [ "__main__" ] # If function is a lambda or class method, share it (or its parent object) # beforehand lambdaType = type ( lambda : None ) funcIsLambda = isinstance ( func , lambdaType ) and func . __name__ == '<lambda>' # Determine if function is a method. Methods derived from external # languages such as C++ aren't detected by ismethod. funcIsMethod = ismethod ( func ) if funcIsLambda or funcIsMethod : from . shared import SharedElementEncapsulation func = SharedElementEncapsulation ( func ) return Future ( control . current . id , func , * args , * * kwargs )
Helper function to create a future .
224
7
242,835
def _waitAny ( * children ) : n = len ( children ) # check for available results and index those unavailable for index , future in enumerate ( children ) : if future . exceptionValue : raise future . exceptionValue if future . _ended ( ) : future . _delete ( ) yield future n -= 1 else : future . index = index future = control . current while n > 0 : # wait for remaining results; switch to controller future . stopWatch . halt ( ) childFuture = _controller . switch ( future ) future . stopWatch . resume ( ) if childFuture . exceptionValue : raise childFuture . exceptionValue # Only yield if executed future was in children, otherwise loop if childFuture in children : childFuture . _delete ( ) yield childFuture n -= 1
Waits on any child Future created by the calling Future .
161
12
242,836
def wait ( fs , timeout = - 1 , return_when = ALL_COMPLETED ) : DoneAndNotDoneFutures = namedtuple ( 'DoneAndNotDoneFutures' , 'done not_done' ) if timeout < 0 : # Negative timeout means blocking. if return_when == FIRST_COMPLETED : next ( _waitAny ( * fs ) ) elif return_when in [ ALL_COMPLETED , FIRST_EXCEPTION ] : for _ in _waitAll ( * fs ) : pass done = set ( f for f in fs if f . done ( ) ) not_done = set ( fs ) - done return DoneAndNotDoneFutures ( done , not_done ) elif timeout == 0 : # Zero-value entry means non-blocking control . execQueue . flush ( ) control . execQueue . updateQueue ( ) done = set ( f for f in fs if f . _ended ( ) ) not_done = set ( fs ) - done return DoneAndNotDoneFutures ( done , not_done ) else : # Any other value means blocking for a given time. done = set ( ) start_time = time . time ( ) while time . time ( ) - start_time < timeout : # Flush futures on local queue (to be executed remotely) control . execQueue . flush ( ) # Block until data arrives (to free CPU time) control . execQueue . socket . _poll ( time . time ( ) - start_time ) # Update queue control . execQueue . updateQueue ( ) for f in fs : if f . _ended ( ) : done . add ( f ) not_done = set ( fs ) - done if return_when == FIRST_COMPLETED and len ( done ) > 0 : break if len ( not_done ) == 0 : break return DoneAndNotDoneFutures ( done , not_done )
Wait for the futures in the given sequence to complete . Using this function may prevent a worker from executing .
403
21
242,837
def advertiseBrokerWorkerDown ( exctype , value , traceback ) : if not scoop . SHUTDOWN_REQUESTED : execQueue . shutdown ( ) sys . __excepthook__ ( exctype , value , traceback )
Hook advertizing the broker if an impromptu shutdown is occuring .
55
15
242,838
def delFutureById ( futureId , parentId ) : try : del futureDict [ futureId ] except KeyError : pass try : toDel = [ a for a in futureDict [ parentId ] . children if a . id == futureId ] for f in toDel : del futureDict [ parentId ] . children [ f ] except KeyError : pass
Delete future on id basis
78
5
242,839
def delFuture ( afuture ) : try : del futureDict [ afuture . id ] except KeyError : pass try : del futureDict [ afuture . parentId ] . children [ afuture ] except KeyError : pass
Delete future afuture
49
4
242,840
def runFuture ( future ) : global debug_stats global QueueLength if scoop . DEBUG : init_debug ( ) # in case _control is imported before scoop.DEBUG was set debug_stats [ future . id ] [ 'start_time' ] . append ( time . time ( ) ) future . waitTime = future . stopWatch . get ( ) future . stopWatch . reset ( ) # Get callback Group ID and assign the broker-wide unique executor ID try : uniqueReference = [ cb . groupID for cb in future . callback ] [ 0 ] except IndexError : uniqueReference = None future . executor = ( scoop . worker , uniqueReference ) try : future . resultValue = future . callable ( * future . args , * * future . kargs ) except BaseException as err : future . exceptionValue = err future . exceptionTraceback = str ( traceback . format_exc ( ) ) scoop . logger . debug ( "The following error occured on a worker:\n%r\n%s" , err , traceback . format_exc ( ) , ) future . executionTime = future . stopWatch . get ( ) future . isDone = True # Update the worker inner work statistics if future . executionTime != 0. and hasattr ( future . callable , '__name__' ) : execStats [ hash ( future . callable ) ] . appendleft ( future . executionTime ) # Set debugging informations if needed if scoop . DEBUG : t = time . time ( ) debug_stats [ future . id ] [ 'end_time' ] . append ( t ) debug_stats [ future . id ] . update ( { 'executionTime' : future . executionTime , 'worker' : scoop . worker , 'creationTime' : future . creationTime , 'callable' : str ( future . callable . __name__ ) if hasattr ( future . callable , '__name__' ) else 'No name' , 'parent' : future . parentId } ) QueueLength . append ( ( t , len ( execQueue ) , execQueue . timelen ( execQueue ) ) ) # Run callback (see http://www.python.org/dev/peps/pep-3148/#future-objects) future . _execute_callbacks ( CallbackType . universal ) # Delete references to the future future . _delete ( ) return future
Callable greenlet in charge of running tasks .
512
10
242,841
def runController ( callable_ , * args , * * kargs ) : global execQueue # initialize and run root future rootId = ( - 1 , 0 ) # initialise queue if execQueue is None : execQueue = FutureQueue ( ) sys . excepthook = advertiseBrokerWorkerDown if scoop . DEBUG : from scoop import _debug _debug . redirectSTDOUTtoDebugFile ( ) # TODO: Make that a function # Wait until we received the main module if we are a headless slave headless = scoop . CONFIGURATION . get ( "headless" , False ) if not scoop . MAIN_MODULE : # If we're not the origin and still don't have our main_module, # wait for it and then import it as module __main___ main = scoop . shared . getConst ( '__MAIN_MODULE__' , timeout = float ( 'inf' ) ) directory_name = tempfile . mkdtemp ( ) os . chdir ( directory_name ) scoop . MAIN_MODULE = main . writeFile ( directory_name ) from . bootstrap . __main__ import Bootstrap as SCOOPBootstrap newModule = SCOOPBootstrap . setupEnvironment ( ) sys . modules [ '__main__' ] = newModule elif scoop . IS_ORIGIN and headless and scoop . MAIN_MODULE : # We're the origin, share our main_module scoop . shared . setConst ( __MAIN_MODULE__ = scoop . encapsulation . ExternalEncapsulation ( scoop . MAIN_MODULE , ) ) # TODO: use modulefinder to share every local dependency of # main module # launch future if origin or try to pickup a future if slave worker if scoop . IS_ORIGIN : future = Future ( rootId , callable_ , * args , * * kargs ) else : future = execQueue . pop ( ) future . greenlet = greenlet . greenlet ( runFuture ) future = future . _switch ( future ) if scoop . DEBUG : lastDebugTs = time . time ( ) while not scoop . IS_ORIGIN or future . parentId != rootId or not future . _ended ( ) : if scoop . DEBUG and time . time ( ) - lastDebugTs > scoop . TIME_BETWEEN_PARTIALDEBUG : _debug . writeWorkerDebug ( debug_stats , QueueLength , "debug/partial-{0}" . format ( round ( time . time ( ) , - 1 ) ) ) lastDebugTs = time . time ( ) # process future if future . _ended ( ) : # future is finished if future . id [ 0 ] != scoop . worker : # future is not local execQueue . sendResult ( future ) future = execQueue . pop ( ) else : # future is local, parent is waiting if future . index is not None : try : parent = futureDict [ future . parentId ] except KeyError : # Job has no parent here (probably children restart) future = execQueue . pop ( ) else : if parent . exceptionValue is None : future = parent . _switch ( future ) else : future = execQueue . pop ( ) else : future = execQueue . pop ( ) else : # future is in progress; run next future from pending execution queue. future = execQueue . pop ( ) if not future . _ended ( ) and future . greenlet is None : # initialize if the future hasn't started future . greenlet = greenlet . greenlet ( runFuture ) future = future . _switch ( future ) execQueue . shutdown ( ) if future . exceptionValue : print ( future . exceptionTraceback ) sys . exit ( 1 ) return future . resultValue
Callable greenlet implementing controller logic .
794
8
242,842
def mode ( self ) : mu = self . mean ( ) sigma = self . std ( ) ret_val = math . exp ( mu - sigma ** 2 ) if math . isnan ( ret_val ) : ret_val = float ( "inf" ) return ret_val
Computes the mode of a log - normal distribution built with the stats data .
61
16
242,843
def median ( self ) : mu = self . mean ( ) ret_val = math . exp ( mu ) if math . isnan ( ret_val ) : ret_val = float ( "inf" ) return ret_val
Computes the median of a log - normal distribution built with the stats data .
48
16
242,844
def _decode_string ( buf , pos ) : for i in range ( pos , len ( buf ) ) : if buf [ i : i + 1 ] == _compat_bytes ( '\x00' ) : try : return ( buf [ pos : i ] . decode ( _CHARSET ) , i + 1 ) # Uncomment the following two lines for detailled information #except UnicodeDecodeError as ude: # raise MinusconfError(str(ude)) except UnicodeDecodeError : raise MinusconfError ( 'Not a valid ' + _CHARSET + ' string: ' + repr ( buf [ pos : i ] ) ) raise MinusconfError ( "Premature end of string (Forgot trailing \\0?), buf=" + repr ( buf ) )
Decodes a string in the buffer buf starting at position pos . Returns a tupel of the read string and the next byte to read .
169
29
242,845
def _find_sock ( ) : if socket . has_ipv6 : try : return socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM ) except socket . gaierror : pass # Platform lied about IPv6 support return socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
Create a UDP socket
78
4
242,846
def _compat_inet_pton ( family , addr ) : if family == socket . AF_INET : # inet_aton accepts some strange forms, so we use our own res = _compat_bytes ( '' ) parts = addr . split ( '.' ) if len ( parts ) != 4 : raise ValueError ( 'Expected 4 dot-separated numbers' ) for part in parts : intval = int ( part , 10 ) if intval < 0 or intval > 0xff : raise ValueError ( "Invalid integer value in IPv4 address: " + str ( intval ) ) res = res + struct . pack ( '!B' , intval ) return res elif family == socket . AF_INET6 : wordcount = 8 res = _compat_bytes ( '' ) # IPv4 embedded? dotpos = addr . find ( '.' ) if dotpos >= 0 : v4start = addr . rfind ( ':' , 0 , dotpos ) if v4start == - 1 : raise ValueException ( "Missing colons in an IPv6 address" ) wordcount = 6 res = socket . inet_aton ( addr [ v4start + 1 : ] ) addr = addr [ : v4start ] + '!' # We leave a marker that the address is not finished # Compact version? compact_pos = addr . find ( '::' ) if compact_pos >= 0 : if compact_pos == 0 : addr = '0' + addr compact_pos += 1 if compact_pos == len ( addr ) - len ( '::' ) : addr = addr + '0' addr = ( addr [ : compact_pos ] + ':' + ( '0:' * ( wordcount - ( addr . count ( ':' ) - '::' . count ( ':' ) ) - 2 ) ) + addr [ compact_pos + len ( '::' ) : ] ) # Remove any dots we left if addr . endswith ( '!' ) : addr = addr [ : - len ( '!' ) ] words = addr . split ( ':' ) if len ( words ) != wordcount : raise ValueError ( 'Invalid number of IPv6 hextets, expected ' + str ( wordcount ) + ', got ' + str ( len ( words ) ) ) for w in reversed ( words ) : # 0x and negative is not valid here, but accepted by int(,16) if 'x' in w or '-' in w : raise ValueError ( "Invalid character in IPv6 address" ) intval = int ( w , 16 ) if intval > 0xffff : raise ValueError ( "IPv6 address componenent too big" ) res = struct . pack ( '!H' , intval ) + res return res else : raise ValueError ( "Unknown protocol family " + family )
socket . inet_pton for platforms that don t have it
605
13
242,847
def start_blocking ( self ) : self . _cav_started . clear ( ) self . start ( ) self . _cav_started . wait ( )
Start the advertiser in the background but wait until it is ready
35
13
242,848
def _send_queries ( self ) : res = 0 addrs = _resolve_addrs ( self . addresses , self . port , self . ignore_senderrors , [ self . _sock . family ] ) for addr in addrs : try : self . _send_query ( addr [ 1 ] ) res += 1 except : if not self . ignore_senderrors : raise return res
Sends queries to multiple addresses . Returns the number of successful queries .
87
14
242,849
def clean ( self ) : raise forms . ValidationError ( self . error_messages [ 'invalid_login' ] , code = 'invalid_login' , params = { 'username' : self . username_field . verbose_name } )
Always raise the default error message because we don t care what they entered here .
56
16
242,850
def types ( * * requirements ) : def predicate ( args ) : for name , kind in sorted ( requirements . items ( ) ) : assert hasattr ( args , name ) , "missing required argument `%s`" % name if not isinstance ( kind , tuple ) : kind = ( kind , ) if not any ( isinstance ( getattr ( args , name ) , k ) for k in kind ) : return False return True return condition ( "the types of arguments must be valid" , predicate , True )
Specify a precondition based on the types of the function s arguments .
108
16
242,851
def ensure ( arg1 , arg2 = None ) : assert ( isinstance ( arg1 , str ) and isfunction ( arg2 ) ) or ( isfunction ( arg1 ) and arg2 is None ) description = "" predicate = lambda x : x if isinstance ( arg1 , str ) : description = arg1 predicate = arg2 else : description = get_function_source ( arg1 ) predicate = arg1 return condition ( description , predicate , False , True )
Specify a precondition described by description and tested by predicate .
99
14
242,852
def invariant ( arg1 , arg2 = None ) : desc = "" predicate = lambda x : x if isinstance ( arg1 , str ) : desc = arg1 predicate = arg2 else : desc = get_function_source ( arg1 ) predicate = arg1 def invariant ( c ) : def check ( name , func ) : exceptions = ( "__getitem__" , "__setitem__" , "__lt__" , "__le__" , "__eq__" , "__ne__" , "__gt__" , "__ge__" , "__init__" ) if name . startswith ( "__" ) and name . endswith ( "__" ) and name not in exceptions : return False if not ismethod ( func ) and not isfunction ( func ) : return False if getattr ( func , "__self__" , None ) is c : return False return True class InvariantContractor ( c ) : pass for name , value in [ ( name , getattr ( c , name ) ) for name in dir ( c ) ] : if check ( name , value ) : setattr ( InvariantContractor , name , condition ( desc , predicate , name != "__init__" , True , True ) ( value ) ) return InvariantContractor return invariant
Specify a class invariant described by description and tested by predicate .
286
14
242,853
def mkpassword ( length = 16 , chars = None , punctuation = None ) : if chars is None : chars = string . ascii_letters + string . digits # Generate string from population data = [ random . choice ( chars ) for _ in range ( length ) ] # If punctuation: # - remove n chars from string # - add random punctuation # - shuffle chars :) if punctuation : data = data [ : - punctuation ] for _ in range ( punctuation ) : data . append ( random . choice ( PUNCTUATION ) ) random . shuffle ( data ) return '' . join ( data )
Generates a random ascii string - useful to generate authinfos
132
15
242,854
def disk_check_size ( ctx , param , value ) : if value : # if we've got a prefix if isinstance ( value , tuple ) : val = value [ 1 ] else : val = value if val % 1024 : raise click . ClickException ( 'Size must be a multiple of 1024.' ) return value
Validation callback for disk size parameter .
68
8
242,855
def create ( cls , fqdn , flags , algorithm , public_key ) : fqdn = fqdn . lower ( ) params = { 'flags' : flags , 'algorithm' : algorithm , 'public_key' : public_key , } result = cls . call ( 'domain.dnssec.create' , fqdn , params ) return result
Create a dnssec key .
82
7
242,856
def from_name ( cls , name ) : snps = cls . list ( { 'name' : name } ) if len ( snps ) == 1 : return snps [ 0 ] [ 'id' ] elif not snps : return raise DuplicateResults ( 'snapshot profile name %s is ambiguous.' % name )
Retrieve a snapshot profile accsociated to a name .
72
12
242,857
def list ( cls , options = None , target = None ) : options = options or { } result = [ ] if not target or target == 'paas' : for profile in cls . safe_call ( 'paas.snapshotprofile.list' , options ) : profile [ 'target' ] = 'paas' result . append ( ( profile [ 'id' ] , profile ) ) if not target or target == 'vm' : for profile in cls . safe_call ( 'hosting.snapshotprofile.list' , options ) : profile [ 'target' ] = 'vm' result . append ( ( profile [ 'id' ] , profile ) ) result = sorted ( result , key = lambda item : item [ 0 ] ) return [ profile for id_ , profile in result ]
List all snapshot profiles .
172
5
242,858
def records ( cls , fqdn , sort_by = None , text = False ) : meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_records_href' ] kwargs = { } if text : kwargs = { 'headers' : { 'Accept' : 'text/plain' } } return cls . json_get ( cls . get_sort_url ( url , sort_by ) , * * kwargs )
Display records information about a domain .
111
7
242,859
def add_record ( cls , fqdn , name , type , value , ttl ) : data = { "rrset_name" : name , "rrset_type" : type , "rrset_values" : value , } if ttl : data [ 'rrset_ttl' ] = int ( ttl ) meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_records_href' ] return cls . json_post ( url , data = json . dumps ( data ) )
Create record for a domain .
122
6
242,860
def update_record ( cls , fqdn , name , type , value , ttl , content ) : data = { "rrset_name" : name , "rrset_type" : type , "rrset_values" : value , } if ttl : data [ 'rrset_ttl' ] = int ( ttl ) meta = cls . get_fqdn_info ( fqdn ) if content : url = meta [ 'domain_records_href' ] kwargs = { 'headers' : { 'Content-Type' : 'text/plain' } , 'data' : content } return cls . json_put ( url , * * kwargs ) url = '%s/domains/%s/records/%s/%s' % ( cls . api_url , fqdn , name , type ) return cls . json_put ( url , data = json . dumps ( data ) )
Update all records for a domain .
210
7
242,861
def del_record ( cls , fqdn , name , type ) : meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_records_href' ] delete_url = url if name : delete_url = '%s/%s' % ( delete_url , name ) if type : delete_url = '%s/%s' % ( delete_url , type ) return cls . json_delete ( delete_url )
Delete record for a domain .
108
6
242,862
def keys ( cls , fqdn , sort_by = None ) : meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_keys_href' ] return cls . json_get ( cls . get_sort_url ( url , sort_by ) )
Display keys information about a domain .
70
7
242,863
def keys_info ( cls , fqdn , key ) : return cls . json_get ( '%s/domains/%s/keys/%s' % ( cls . api_url , fqdn , key ) )
Retrieve key information .
54
5
242,864
def keys_create ( cls , fqdn , flag ) : data = { "flags" : flag , } meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_keys_href' ] ret , headers = cls . json_post ( url , data = json . dumps ( data ) , return_header = True ) return cls . json_get ( headers [ 'location' ] )
Create new key entry for a domain .
97
8
242,865
def list ( gandi , datacenter , id , subnet , gateway ) : output_keys = [ 'name' , 'state' , 'dc' ] if id : output_keys . append ( 'id' ) if subnet : output_keys . append ( 'subnet' ) if gateway : output_keys . append ( 'gateway' ) datacenters = gandi . datacenter . list ( ) vlans = gandi . vlan . list ( datacenter ) for num , vlan in enumerate ( vlans ) : if num : gandi . separator_line ( ) output_vlan ( gandi , vlan , datacenters , output_keys ) return vlans
List vlans .
156
5
242,866
def info ( gandi , resource , ip ) : output_keys = [ 'name' , 'state' , 'dc' , 'subnet' , 'gateway' ] datacenters = gandi . datacenter . list ( ) vlan = gandi . vlan . info ( resource ) gateway = vlan [ 'gateway' ] if not ip : output_vlan ( gandi , vlan , datacenters , output_keys , justify = 11 ) return vlan gateway_exists = False vms = dict ( [ ( vm_ [ 'id' ] , vm_ ) for vm_ in gandi . iaas . list ( ) ] ) ifaces = gandi . vlan . ifaces ( resource ) for iface in ifaces : for ip in iface [ 'ips' ] : if gateway == ip [ 'ip' ] : gateway_exists = True if gateway_exists : vlan . pop ( 'gateway' ) else : vlan [ 'gateway' ] = ( "%s don't exists" % gateway if gateway else 'none' ) output_vlan ( gandi , vlan , datacenters , output_keys , justify = 11 ) output_keys = [ 'vm' , 'bandwidth' ] for iface in ifaces : gandi . separator_line ( ) output_iface ( gandi , iface , datacenters , vms , output_keys , justify = 11 ) for ip in iface [ 'ips' ] : output_ip ( gandi , ip , None , None , None , [ 'ip' ] ) if gateway == ip [ 'ip' ] : output_line ( gandi , 'gateway' , 'true' , justify = 11 ) return vlan
Display information about a vlan .
380
7
242,867
def create ( gandi , name , datacenter , subnet , gateway , background ) : try : gandi . datacenter . is_opened ( datacenter , 'iaas' ) except DatacenterLimited as exc : gandi . echo ( '/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % ( datacenter , exc . date ) ) result = gandi . vlan . create ( name , datacenter , subnet , gateway , background ) if background : gandi . pretty_echo ( result ) return result
Create a new vlan
128
5
242,868
def update ( gandi , resource , name , gateway , create , bandwidth ) : params = { } if name : params [ 'name' ] = name vlan_id = gandi . vlan . usable_id ( resource ) try : if gateway : IP ( gateway ) params [ 'gateway' ] = gateway except ValueError : vm = gandi . iaas . info ( gateway ) ips = [ ip for sublist in [ [ ip [ 'ip' ] for ip in iface [ 'ips' ] if ip [ 'version' ] == 4 ] for iface in vm [ 'ifaces' ] if iface [ 'vlan' ] and iface [ 'vlan' ] . get ( 'id' ) == vlan_id ] for ip in sublist ] if len ( ips ) > 1 : gandi . echo ( "This vm has two ips in the vlan, don't know which one" ' to choose (%s)' % ( ', ' . join ( ips ) ) ) return if not ips and not create : gandi . echo ( "Can't find '%s' in '%s' vlan" % ( gateway , resource ) ) return if not ips and create : gandi . echo ( 'Will create a new ip in this vlan for vm %s' % gateway ) oper = gandi . ip . create ( '4' , vm [ 'datacenter_id' ] , bandwidth , vm [ 'hostname' ] , resource ) iface_id = oper [ 'iface_id' ] iface = gandi . iface . info ( iface_id ) ips = [ ip [ 'ip' ] for ip in iface [ 'ips' ] if ip [ 'version' ] == 4 ] params [ 'gateway' ] = ips [ 0 ] result = gandi . vlan . update ( resource , params ) return result
Update a vlan
412
4
242,869
def list_migration_choice ( cls , datacenter ) : datacenter_id = cls . usable_id ( datacenter ) dc_list = cls . list ( ) available_dcs = [ dc for dc in dc_list if dc [ 'id' ] == datacenter_id ] [ 0 ] [ 'can_migrate_to' ] choices = [ dc for dc in dc_list if dc [ 'id' ] in available_dcs ] return choices
List available datacenters for migration from given datacenter .
108
13
242,870
def is_opened ( cls , dc_code , type_ ) : options = { 'dc_code' : dc_code , '%s_opened' % type_ : True } datacenters = cls . safe_call ( 'hosting.datacenter.list' , options ) if not datacenters : # try with ISO code options = { 'iso' : dc_code , '%s_opened' % type_ : True } datacenters = cls . safe_call ( 'hosting.datacenter.list' , options ) if not datacenters : raise DatacenterClosed ( r'/!\ Datacenter %s is closed, please ' 'choose another datacenter.' % dc_code ) datacenter = datacenters [ 0 ] if datacenter . get ( '%s_closed_for' % type_ ) == 'NEW' : dc_close_date = datacenter . get ( 'deactivate_at' , '' ) if dc_close_date : dc_close_date = dc_close_date . strftime ( '%d/%m/%Y' ) raise DatacenterLimited ( dc_close_date )
List opened datacenters for given type .
263
9
242,871
def filtered_list ( cls , name = None , obj = None ) : options = { } if name : options [ 'id' ] = cls . usable_id ( name ) def obj_ok ( dc , obj ) : if not obj or obj [ 'datacenter_id' ] == dc [ 'id' ] : return True return False return [ x for x in cls . list ( options ) if obj_ok ( x , obj ) ]
List datacenters matching name and compatible with obj
99
10
242,872
def from_iso ( cls , iso ) : result = cls . list ( { 'sort_by' : 'id ASC' } ) dc_isos = { } for dc in result : if dc [ 'iso' ] not in dc_isos : dc_isos [ dc [ 'iso' ] ] = dc [ 'id' ] return dc_isos . get ( iso )
Retrieve the first datacenter id associated to an ISO .
86
13
242,873
def from_name ( cls , name ) : result = cls . list ( ) dc_names = { } for dc in result : dc_names [ dc [ 'name' ] ] = dc [ 'id' ] return dc_names . get ( name )
Retrieve datacenter id associated to a name .
57
11
242,874
def from_country ( cls , country ) : result = cls . list ( { 'sort_by' : 'id ASC' } ) dc_countries = { } for dc in result : if dc [ 'country' ] not in dc_countries : dc_countries [ dc [ 'country' ] ] = dc [ 'id' ] return dc_countries . get ( country )
Retrieve the first datacenter id associated to a country .
86
13
242,875
def from_dc_code ( cls , dc_code ) : result = cls . list ( ) dc_codes = { } for dc in result : if dc . get ( 'dc_code' ) : dc_codes [ dc [ 'dc_code' ] ] = dc [ 'id' ] return dc_codes . get ( dc_code )
Retrieve the datacenter id associated to a dc_code
77
13
242,876
def usable_id ( cls , id ) : try : # id is maybe a dc_code qry_id = cls . from_dc_code ( id ) if not qry_id : # id is maybe a ISO qry_id = cls . from_iso ( id ) if qry_id : cls . deprecated ( 'ISO code for datacenter filter use ' 'dc_code instead' ) if not qry_id : # id is maybe a country qry_id = cls . from_country ( id ) if not qry_id : qry_id = int ( id ) except Exception : qry_id = None if not qry_id : msg = 'unknown identifier %s' % id cls . error ( msg ) return qry_id
Retrieve id from input which can be ISO name country dc_code .
173
15
242,877
def find_port ( addr , user ) : import pwd home = pwd . getpwuid ( os . getuid ( ) ) . pw_dir for name in os . listdir ( '%s/.ssh/' % home ) : if name . startswith ( 'unixpipe_%s@%s_' % ( user , addr , ) ) : return int ( name . split ( '_' ) [ 2 ] )
Find local port in existing tunnels
97
6
242,878
def new_port ( ) : s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM , socket . IPPROTO_TCP ) for i in range ( 12042 , 16042 ) : try : s . bind ( ( '127.0.0.1' , i ) ) s . close ( ) return i except socket . error : pass raise Exception ( 'No local port available' )
Find a free local port and allocate it
94
8
242,879
def _ssh_master_cmd ( addr , user , command , local_key = None ) : ssh_call = [ 'ssh' , '-qNfL%d:127.0.0.1:12042' % find_port ( addr , user ) , '-o' , 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % find_port ( addr , user ) , '-O' , command , '%s@%s' % ( user , addr , ) ] if local_key : ssh_call . insert ( 1 , local_key ) ssh_call . insert ( 1 , '-i' ) return subprocess . call ( ssh_call )
Exit or check ssh mux
158
6
242,880
def setup ( addr , user , remote_path , local_key = None ) : port = find_port ( addr , user ) if not port or not is_alive ( addr , user ) : port = new_port ( ) scp ( addr , user , __file__ , '~/unixpipe' , local_key ) ssh_call = [ 'ssh' , '-fL%d:127.0.0.1:12042' % port , '-o' , 'ExitOnForwardFailure=yes' , '-o' , 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % port , '-o' , 'ControlMaster=auto' , '%s@%s' % ( user , addr , ) , 'python' , '~/unixpipe' , 'server' , remote_path ] if local_key : ssh_call . insert ( 1 , local_key ) ssh_call . insert ( 1 , '-i' ) subprocess . call ( ssh_call ) # XXX Sleep is a bad way to wait for the tunnel endpoint time . sleep ( 1 ) return port
Setup the tunnel
253
3
242,881
def list ( gandi , limit , step ) : output_keys = [ 'id' , 'type' , 'step' ] options = { 'step' : step , 'items_per_page' : limit , 'sort_by' : 'date_created DESC' } result = gandi . oper . list ( options ) for num , oper in enumerate ( reversed ( result ) ) : if num : gandi . separator_line ( ) output_generic ( gandi , oper , output_keys ) return result
List operations .
113
3
242,882
def info ( gandi , id ) : output_keys = [ 'id' , 'type' , 'step' , 'last_error' ] oper = gandi . oper . info ( id ) output_generic ( gandi , oper , output_keys ) return oper
Display information about an operation .
58
6
242,883
def create ( gandi , resource , flags , algorithm , public_key ) : result = gandi . dnssec . create ( resource , flags , algorithm , public_key ) return result
Create DNSSEC key .
40
6
242,884
def list ( gandi , resource ) : keys = gandi . dnssec . list ( resource ) gandi . pretty_echo ( keys ) return keys
List DNSSEC keys .
33
6
242,885
def delete ( gandi , resource ) : result = gandi . dnssec . delete ( resource ) gandi . echo ( 'Delete successful.' ) return result
Delete DNSSEC key .
34
6
242,886
def load_config ( cls ) : config_file = os . path . expanduser ( cls . home_config ) global_conf = cls . load ( config_file , 'global' ) cls . load ( cls . local_config , 'local' ) # update global configuration if needed cls . update_config ( config_file , global_conf )
Load global and local configuration files and update if needed .
81
11
242,887
def update_config ( cls , config_file , config ) : need_save = False # delete old env key if 'api' in config and 'env' in config [ 'api' ] : del config [ 'api' ] [ 'env' ] need_save = True # convert old ssh_key configuration entry ssh_key = config . get ( 'ssh_key' ) sshkeys = config . get ( 'sshkey' ) if ssh_key and not sshkeys : config . update ( { 'sshkey' : [ ssh_key ] } ) need_save = True elif ssh_key and sshkeys : config . update ( { 'sshkey' : sshkeys . append ( ssh_key ) } ) need_save = True # remove old value if ssh_key : del config [ 'ssh_key' ] need_save = True # save to disk if need_save : cls . save ( config_file , config )
Update configuration if needed .
203
5
242,888
def load ( cls , filename , name = None ) : if not os . path . exists ( filename ) : return { } name = name or filename if name not in cls . _conffiles : with open ( filename ) as fdesc : content = yaml . load ( fdesc , YAMLLoader ) # in case the file is empty if content is None : content = { } cls . _conffiles [ name ] = content return cls . _conffiles [ name ]
Load yaml configuration from filename .
108
7
242,889
def save ( cls , filename , config ) : mode = os . O_WRONLY | os . O_TRUNC | os . O_CREAT with os . fdopen ( os . open ( filename , mode , 0o600 ) , 'w' ) as fname : yaml . safe_dump ( config , fname , indent = 4 , default_flow_style = False )
Save configuration to yaml file .
87
7
242,890
def get ( cls , key , default = None , separator = '.' , global_ = False ) : # first check environnment variables # if we're not in global scope if not global_ : ret = os . environ . get ( key . upper ( ) . replace ( '.' , '_' ) ) if ret is not None : return ret # then check in local and global configuration unless global_=True scopes = [ 'global' ] if global_ else [ 'local' , 'global' ] for scope in scopes : ret = cls . _get ( scope , key , default , separator ) if ret is not None and ret != default : return ret if ret is None or ret == default : return default
Retrieve a key value from loaded configuration .
158
9
242,891
def configure ( cls , global_ , key , val ) : # first retrieve current configuration scope = 'global' if global_ else 'local' if scope not in cls . _conffiles : cls . _conffiles [ scope ] = { } config = cls . _conffiles . get ( scope , { } ) # apply modification to fields cls . _set ( scope , key , val ) conf_file = cls . home_config if global_ else cls . local_config # save configuration to file cls . save ( os . path . expanduser ( conf_file ) , config )
Update and save configuration value to file .
134
8
242,892
def info ( gandi ) : output_keys = [ 'handle' , 'credit' , 'prepaid' ] account = gandi . account . all ( ) account [ 'prepaid_info' ] = gandi . contact . balance ( ) . get ( 'prepaid' , { } ) output_account ( gandi , account , output_keys ) return account
Display information about hosting account .
79
6
242,893
def create ( cls , ip_version , datacenter , bandwidth , vm = None , vlan = None , ip = None , background = False ) : return Iface . create ( ip_version , datacenter , bandwidth , vlan , vm , ip , background )
Create a public ip and attach it if vm is given .
59
12
242,894
def update ( cls , resource , params , background = False ) : cls . echo ( 'Updating your IP' ) result = cls . call ( 'hosting.ip.update' , cls . usable_id ( resource ) , params ) if not background : cls . display_progress ( result ) return result
Update this IP
70
3
242,895
def delete ( cls , resources , background = False , force = False ) : if not isinstance ( resources , ( list , tuple ) ) : resources = [ resources ] ifaces = [ ] for item in resources : try : ip_ = cls . info ( item ) except UsageError : cls . error ( "Can't find this ip %s" % item ) iface = Iface . info ( ip_ [ 'iface_id' ] ) ifaces . append ( iface [ 'id' ] ) return Iface . delete ( ifaces , background )
Delete an ip by deleting the iface
122
8
242,896
def from_ip ( cls , ip ) : ips = dict ( [ ( ip_ [ 'ip' ] , ip_ [ 'id' ] ) for ip_ in cls . list ( { 'items_per_page' : 500 } ) ] ) return ips . get ( ip )
Retrieve ip id associated to an ip .
65
9
242,897
def list ( cls , datacenter = None ) : options = { } if datacenter : datacenter_id = int ( Datacenter . usable_id ( datacenter ) ) options [ 'datacenter_id' ] = datacenter_id return cls . call ( 'hosting.vlan.list' , options )
List virtual machine vlan
77
5
242,898
def ifaces ( cls , name ) : ifaces = Iface . list ( { 'vlan_id' : cls . usable_id ( name ) } ) ret = [ ] for iface in ifaces : ret . append ( Iface . info ( iface [ 'id' ] ) ) return ret
Get vlan attached ifaces .
68
7
242,899
def delete ( cls , resources , background = False ) : if not isinstance ( resources , ( list , tuple ) ) : resources = [ resources ] opers = [ ] for item in resources : oper = cls . call ( 'hosting.vlan.delete' , cls . usable_id ( item ) ) if not oper : continue if isinstance ( oper , list ) : opers . extend ( oper ) else : opers . append ( oper ) if background : return opers # interactive mode, run a progress bar cls . echo ( 'Deleting your vlan.' ) if opers : cls . display_progress ( opers )
Delete a vlan .
141
5