idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
32,500
def create_node ( self , * args , ** kwargs ) : with ( yield from self . _iou_id_lock ) : application_id = get_next_application_id ( self . nodes ) node = yield from super ( ) . create_node ( * args , application_id = application_id , ** kwargs ) return node
Creates a new IOU VM .
32,501
async def create_link ( project , nodes ) : node1 = random . choice ( list ( nodes . values ( ) ) ) for port in range ( 0 , 8 ) : node2 = random . choice ( list ( nodes . values ( ) ) ) if node1 == node2 : continue data = { "nodes" : [ { "adapter_number" : 0 , "node_id" : node1 [ "node_id" ] , "port_number" : port } , ...
Create all possible link of a node
32,502
def join ( self ) : pending = set ( ) exceptions = set ( ) while len ( self . _tasks ) > 0 or len ( pending ) > 0 : while len ( self . _tasks ) > 0 and len ( pending ) < self . _concurrency : task , args , kwargs = self . _tasks . pop ( 0 ) pending . add ( task ( * args , ** kwargs ) ) ( done , pending ) = yield from a...
Wait for all task to finish
32,503
def name ( self , new_name ) : log . info ( "{module}: {name} [{id}] renamed to {new_name}" . format ( module = self . manager . module_name , name = self . name , id = self . id , new_name = new_name ) ) self . _name = new_name
Sets the name of this node .
32,504
def create ( self ) : log . info ( "{module}: {name} [{id}] created" . format ( module = self . manager . module_name , name = self . name , id = self . id ) )
Creates the node .
32,505
def stop ( self ) : if self . _wrapper_telnet_server : self . _wrapper_telnet_server . close ( ) yield from self . _wrapper_telnet_server . wait_closed ( ) self . status = "stopped"
Stop the node process .
32,506
def close ( self ) : if self . _closed : return False log . info ( "{module}: '{name}' [{id}]: is closing" . format ( module = self . manager . module_name , name = self . name , id = self . id ) ) if self . _console : self . _manager . port_manager . release_tcp_port ( self . _console , self . _project ) self . _conso...
Close the node process .
32,507
def start_wrap_console ( self ) : if not self . _wrap_console or self . _console_type != "telnet" : return remaining_trial = 60 while True : try : ( reader , writer ) = yield from asyncio . open_connection ( host = "127.0.0.1" , port = self . _internal_console_port ) break except ( OSError , ConnectionRefusedError ) as...
Start a telnet proxy for the console allowing multiple client connected at the same time
32,508
def aux ( self , aux ) : if aux == self . _aux : return if self . _aux : self . _manager . port_manager . release_tcp_port ( self . _aux , self . _project ) self . _aux = None if aux is not None : self . _aux = self . _manager . port_manager . reserve_tcp_port ( aux , self . _project ) log . info ( "{module}: '{name}' ...
Changes the aux port
32,509
def console ( self , console ) : if console == self . _console : return if self . _console_type == "vnc" and console is not None and console < 5900 : raise NodeError ( "VNC console require a port superior or equal to 5900 currently it's {}" . format ( console ) ) if self . _console : self . _manager . port_manager . re...
Changes the console port
32,510
def console_type ( self , console_type ) : if console_type != self . _console_type : self . _manager . port_manager . release_tcp_port ( self . _console , self . _project ) if console_type == "vnc" : self . _console = self . _manager . port_manager . get_free_tcp_port ( self . _project , 5900 , 6000 ) else : self . _co...
Sets the console type for this node .
32,511
def ubridge ( self ) : if self . _ubridge_hypervisor and not self . _ubridge_hypervisor . is_running ( ) : self . _ubridge_hypervisor = None return self . _ubridge_hypervisor
Returns the uBridge hypervisor .
32,512
def ubridge_path ( self ) : path = self . _manager . config . get_section_config ( "Server" ) . get ( "ubridge_path" , "ubridge" ) path = shutil . which ( path ) return path
Returns the uBridge executable path .
32,513
def _ubridge_send ( self , command ) : if not self . _ubridge_hypervisor or not self . _ubridge_hypervisor . is_running ( ) : yield from self . _start_ubridge ( ) if not self . _ubridge_hypervisor or not self . _ubridge_hypervisor . is_running ( ) : raise NodeError ( "Cannot send command '{}': uBridge is not running" ....
Sends a command to uBridge hypervisor .
32,514
def _stop_ubridge ( self ) : if self . _ubridge_hypervisor and self . _ubridge_hypervisor . is_running ( ) : log . info ( "Stopping uBridge hypervisor {}:{}" . format ( self . _ubridge_hypervisor . host , self . _ubridge_hypervisor . port ) ) yield from self . _ubridge_hypervisor . stop ( ) self . _ubridge_hypervisor =...
Stops uBridge .
32,515
def add_ubridge_udp_connection ( self , bridge_name , source_nio , destination_nio ) : yield from self . _ubridge_send ( "bridge create {name}" . format ( name = bridge_name ) ) if not isinstance ( destination_nio , NIOUDP ) : raise NodeError ( "Destination NIO is not UDP" ) yield from self . _ubridge_send ( 'bridge ad...
Creates an UDP connection in uBridge .
32,516
def _ubridge_apply_filters ( self , bridge_name , filters ) : yield from self . _ubridge_send ( 'bridge reset_packet_filters ' + bridge_name ) for packet_filter in self . _build_filter_list ( filters ) : cmd = 'bridge add_packet_filter {} {}' . format ( bridge_name , packet_filter ) try : yield from self . _ubridge_sen...
Apply packet filters
32,517
def _add_ubridge_ethernet_connection ( self , bridge_name , ethernet_interface , block_host_traffic = False ) : if sys . platform . startswith ( "linux" ) and block_host_traffic is False : yield from self . _ubridge_send ( 'bridge add_nio_linux_raw {name} "{interface}"' . format ( name = bridge_name , interface = ether...
Creates a connection with an Ethernet interface in uBridge .
32,518
def check_available_ram ( self , requested_ram ) : available_ram = int ( psutil . virtual_memory ( ) . available / ( 1024 * 1024 ) ) percentage_left = psutil . virtual_memory ( ) . percent if requested_ram > available_ram : message = '"{}" requires {}MB of RAM to run but there is only {}MB - {}% of RAM left on "{}"' . ...
Sends a warning notification if there is not enough RAM on the system to allocate requested RAM .
32,519
def backing_file ( self ) : with open ( self . _path , 'rb' ) as f : f . seek ( self . backing_file_offset ) content = f . read ( self . backing_file_size ) path = content . decode ( ) if len ( path ) == 0 : return None return path
When using linked clone this will return the path to the base image
32,520
def rebase ( self , qemu_img , base_image ) : if not os . path . exists ( base_image ) : raise FileNotFoundError ( base_image ) command = [ qemu_img , "rebase" , "-u" , "-b" , base_image , self . _path ] process = yield from asyncio . create_subprocess_exec ( * command ) retcode = yield from process . wait ( ) if retco...
Rebase a linked clone in order to use the correct disk
32,521
def qt_font_to_style ( font , color ) : if font is None : font = "TypeWriter,10,-1,5,75,0,0,0,0,0" font_info = font . split ( "," ) style = "font-family: {};font-size: {};" . format ( font_info [ 0 ] , font_info [ 1 ] ) if font_info [ 4 ] == "75" : style += "font-weight: bold;" if font_info [ 5 ] == "1" : style += "fon...
Convert a Qt font to CSS style
32,522
def list ( self ) : res = [ ] for compute in self . _controller . computes . values ( ) : if compute . id not in [ "local" , "vm" ] : res . append ( { "vmname" : compute . name } ) return res
List all VMs
32,523
def take_dynamips_id ( self , project_id , dynamips_id ) : self . _dynamips_ids . setdefault ( project_id , set ( ) ) if dynamips_id in self . _dynamips_ids [ project_id ] : raise DynamipsError ( "Dynamips identifier {} is already used by another router" . format ( dynamips_id ) ) self . _dynamips_ids [ project_id ] . ...
Reserve a dynamips id or raise an error
32,524
def release_dynamips_id ( self , project_id , dynamips_id ) : self . _dynamips_ids . setdefault ( project_id , set ( ) ) if dynamips_id in self . _dynamips_ids [ project_id ] : self . _dynamips_ids [ project_id ] . remove ( dynamips_id )
A Dynamips id can be reused by another VM
32,525
def project_closing ( self , project ) : yield from super ( ) . project_closing ( project ) tasks = [ ] for device in self . _devices . values ( ) : if device . project . id == project . id : tasks . append ( asyncio . async ( device . delete ( ) ) ) if tasks : done , _ = yield from asyncio . wait ( tasks ) for future ...
Called when a project is about to be closed .
32,526
def start_new_hypervisor ( self , working_dir = None ) : if not self . _dynamips_path : self . find_dynamips ( ) if not working_dir : working_dir = tempfile . gettempdir ( ) server_config = self . config . get_section_config ( "Server" ) server_host = server_config . get ( "host" ) try : info = socket . getaddrinfo ( s...
Creates a new Dynamips process and start it .
32,527
def _set_ghost_ios ( self , vm ) : if not vm . mmap : raise DynamipsError ( "mmap support is required to enable ghost IOS support" ) if vm . platform == "c7200" and vm . npe == "npe-g2" : log . warning ( "Ghost IOS is not supported for c7200 with NPE-G2" ) return ghost_file = vm . formatted_ghost_file ( ) module_workdi...
Manages Ghost IOS support .
32,528
def set_vm_configs ( self , vm , settings ) : startup_config_content = settings . get ( "startup_config_content" ) if startup_config_content : self . _create_config ( vm , vm . startup_config_path , startup_config_content ) private_config_content = settings . get ( "private_config_content" ) if private_config_content :...
Set VM configs from pushed content or existing config files .
32,529
def auto_idlepc ( self , vm ) : yield from vm . set_idlepc ( "0x0" ) was_auto_started = False try : status = yield from vm . get_status ( ) if status != "running" : yield from vm . start ( ) was_auto_started = True yield from asyncio . sleep ( 20 ) validated_idlepc = None idlepcs = yield from vm . get_idle_pc_prop ( ) ...
Try to find the best possible idle - pc value .
32,530
def write_documentation ( self , doc_type ) : for handler_name in sorted ( self . _documentation ) : if "controller." in handler_name : server_type = "controller" elif "compute" in handler_name : server_type = "compute" else : server_type = "root" if doc_type != server_type : continue print ( "Build {}" . format ( hand...
Build all the doc page for handlers
32,531
def _create_handler_directory ( self , handler_name , api_version , server_type ) : directory = "{}/api/v{}/{}/{}" . format ( self . _directory , api_version , server_type , handler_name ) os . makedirs ( directory , exist_ok = True ) with open ( "{}/api/v{}/{}/{}.rst" . format ( self . _directory , api_version , serve...
Create a directory for the handler and add an index inside
32,532
def _include_query_example ( self , f , method , path , api_version , server_type ) : m = method [ "method" ] . lower ( ) query_path = "{}_{}_{}.txt" . format ( server_type , m , self . _file_path ( path ) ) if os . path . isfile ( os . path . join ( self . _directory , "api" , "examples" , query_path ) ) : f . write (...
If a sample session is available we include it in documentation
32,533
def _write_json_schema_object ( self , f , obj ) : for name in sorted ( obj . get ( "properties" , { } ) ) : prop = obj [ "properties" ] [ name ] mandatory = " " if name in obj . get ( "required" , [ ] ) : mandatory = "&#10004;" if "enum" in prop : field_type = "enum" prop [ 'description' ] = "Possible values: {}" . fo...
obj is current object in JSON schema schema is the whole schema including definitions
32,534
def _set_auth ( self , user , password ) : if user is None or len ( user . strip ( ) ) == 0 : self . _user = None self . _password = None self . _auth = None else : self . _user = user . strip ( ) if password : self . _password = password . strip ( ) try : self . _auth = aiohttp . BasicAuth ( self . _user , self . _pas...
Set authentication parameters
32,535
def interfaces ( self ) : if not self . _interfaces_cache : response = yield from self . get ( "/network/interfaces" ) self . _interfaces_cache = response . json return self . _interfaces_cache
Get the list of network on compute
32,536
def stream_file ( self , project , path ) : class StreamResponse : def __init__ ( self , response ) : self . _response = response def __enter__ ( self ) : return self . _response . content def __exit__ ( self ) : self . _response . close ( ) url = self . _getUrl ( "/projects/{}/stream/{}" . format ( project . id , path...
Read file of a project and stream it
32,537
def connect ( self ) : if not self . _connected and not self . _closed : try : log . info ( "Connecting to compute '{}'" . format ( self . _id ) ) response = yield from self . _run_http_query ( "GET" , "/capabilities" ) except ComputeError as e : if not hasattr ( sys , "_called_from_test" ) or not sys . _called_from_te...
Check if remote server is accessible
32,538
def _connect_notification ( self ) : try : self . _ws = yield from self . _session ( ) . ws_connect ( self . _getUrl ( "/notifications/ws" ) , auth = self . _auth ) except ( aiohttp . WSServerHandshakeError , aiohttp . ClientResponseError ) : self . _ws = None while self . _ws is not None : try : response = yield from ...
Connect to the notification stream
32,539
def forward ( self , method , type , path , data = None ) : try : action = "/{}/{}" . format ( type , path ) res = yield from self . http_query ( method , action , data = data , timeout = None ) except aiohttp . ServerDisconnectedError : log . error ( "Connection lost to %s during %s %s" , self . _id , method , action ...
Forward a call to the emulator on compute
32,540
def images ( self , type ) : images = [ ] res = yield from self . http_query ( "GET" , "/{}/images" . format ( type ) , timeout = None ) images = res . json try : if type in [ "qemu" , "dynamips" , "iou" ] : for local_image in list_images ( type ) : if local_image [ 'filename' ] not in [ i [ 'filename' ] for i in image...
Return the list of images available for this type on controller and on the compute node .
32,541
def list_files ( self , project ) : path = "/projects/{}/files" . format ( project . id ) res = yield from self . http_query ( "GET" , path , timeout = 120 ) return res . json
List files in the project on computes
32,542
def get_ip_on_same_subnet ( self , other_compute ) : if other_compute == self : return ( self . host_ip , self . host_ip ) if ( self . host_ip not in ( '0.0.0.0' , '127.0.0.1' ) and other_compute . host_ip not in ( '0.0.0.0' , '127.0.0.1' ) ) : return ( self . host_ip , other_compute . host_ip ) this_compute_interfaces...
Try to found the best ip for communication from one compute to another
32,543
def add_nio ( self , nio , port_number ) : if port_number in self . _nios : raise DynamipsError ( "Port {} isn't free" . format ( port_number ) ) log . info ( 'Frame Relay switch "{name}" [{id}]: NIO {nio} bound to port {port}' . format ( name = self . _name , id = self . _id , nio = nio , port = port_number ) ) self ....
Adds a NIO as new port on Frame Relay switch .
32,544
def remove_nio ( self , port_number ) : if port_number not in self . _nios : raise DynamipsError ( "Port {} is not allocated" . format ( port_number ) ) for source , destination in self . _active_mappings . copy ( ) . items ( ) : source_port , source_dlci = source destination_port , destination_dlci = destination if po...
Removes the specified NIO as member of this Frame Relay switch .
32,545
def main ( ) : if not sys . platform . startswith ( "win" ) : if "--daemon" in sys . argv : daemonize ( ) from gns3server . run import run run ( )
Entry point for GNS3 server
32,546
def _get_free_display_port ( self ) : display = 100 if not os . path . exists ( "/tmp/.X11-unix/" ) : return display while True : if not os . path . exists ( "/tmp/.X11-unix/X{}" . format ( display ) ) : return display display += 1
Search a free display port
32,547
def create ( self ) : try : image_infos = yield from self . _get_image_information ( ) except DockerHttp404Error : log . info ( "Image %s is missing pulling it from docker hub" , self . _image ) yield from self . pull_image ( self . _image ) image_infos = yield from self . _get_image_information ( ) if image_infos is N...
Creates the Docker container .
32,548
def update ( self ) : console = self . console aux = self . aux state = yield from self . _get_container_state ( ) yield from self . reset ( ) yield from self . create ( ) self . console = console self . aux = aux if state == "running" : yield from self . start ( )
Destroy an recreate the container with the new settings
32,549
def start ( self ) : try : state = yield from self . _get_container_state ( ) except DockerHttp404Error : raise DockerError ( "Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds." . format ( name = self . name , cid = self . _cid ) ) if state == "paused" : yiel...
Starts this Docker container .
32,550
def _start_aux ( self ) : process = yield from asyncio . subprocess . create_subprocess_exec ( "docker" , "exec" , "-i" , self . _cid , "/gns3/bin/busybox" , "script" , "-qfc" , "while true; do TERM=vt100 /gns3/bin/busybox sh; done" , "/dev/null" , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . ...
Start an auxilary console
32,551
def _fix_permissions ( self ) : state = yield from self . _get_container_state ( ) if state == "stopped" or state == "exited" : yield from self . manager . query ( "POST" , "containers/{}/start" . format ( self . _cid ) ) for volume in self . _volumes : log . debug ( "Docker container '{name}' [{image}] fix ownership o...
Because docker run as root we need to fix permission and ownership to allow user to interact with it from their filesystem and do operation like file delete
32,552
def _start_vnc ( self ) : self . _display = self . _get_free_display_port ( ) if shutil . which ( "Xvfb" ) is None or shutil . which ( "x11vnc" ) is None : raise DockerError ( "Please install Xvfb and x11vnc before using the VNC support" ) self . _xvfb_process = yield from asyncio . create_subprocess_exec ( "Xvfb" , "-...
Start a VNC server for this container
32,553
def _start_http ( self ) : log . debug ( "Forward HTTP for %s to %d" , self . name , self . _console_http_port ) command = [ "docker" , "exec" , "-i" , self . _cid , "/gns3/bin/busybox" , "nc" , "127.0.0.1" , str ( self . _console_http_port ) ] server = AsyncioRawCommandServer ( command , replaces = [ ( '://127.0.0.1' ...
Start an HTTP tunnel to container localhost . It s not perfect but the only way we have to inject network packet is using nc .
32,554
def _start_console ( self ) : class InputStream : def __init__ ( self ) : self . _data = b"" def write ( self , data ) : self . _data += data @ asyncio . coroutine def drain ( self ) : if not self . ws . closed : self . ws . send_bytes ( self . _data ) self . _data = b"" output_stream = asyncio . StreamReader ( ) input...
Start streaming the console via telnet
32,555
def _read_console_output ( self , ws , out ) : while True : msg = yield from ws . receive ( ) if msg . tp == aiohttp . WSMsgType . text : out . feed_data ( msg . data . encode ( ) ) elif msg . tp == aiohttp . WSMsgType . BINARY : out . feed_data ( msg . data ) elif msg . tp == aiohttp . WSMsgType . ERROR : log . critic...
Read Websocket and forward it to the telnet
32,556
def is_running ( self ) : state = yield from self . _get_container_state ( ) if state == "running" : return True if self . status == "started" : yield from self . stop ( ) return False
Checks if the container is running .
32,557
def restart ( self ) : yield from self . manager . query ( "POST" , "containers/{}/restart" . format ( self . _cid ) ) log . info ( "Docker container '{name}' [{image}] restarted" . format ( name = self . _name , image = self . _image ) )
Restart this Docker container .
32,558
def _clean_servers ( self ) : if len ( self . _telnet_servers ) > 0 : for telnet_server in self . _telnet_servers : telnet_server . close ( ) yield from telnet_server . wait_closed ( ) self . _telnet_servers = [ ]
Clean the list of running console servers
32,559
def stop ( self ) : try : yield from self . _clean_servers ( ) yield from self . _stop_ubridge ( ) try : state = yield from self . _get_container_state ( ) except DockerHttp404Error : self . status = "stopped" return if state == "paused" : yield from self . unpause ( ) yield from self . _fix_permissions ( ) state = yie...
Stops this Docker container .
32,560
def pause ( self ) : yield from self . manager . query ( "POST" , "containers/{}/pause" . format ( self . _cid ) ) self . status = "suspended" log . info ( "Docker container '{name}' [{image}] paused" . format ( name = self . _name , image = self . _image ) )
Pauses this Docker container .
32,561
def _get_log ( self ) : result = yield from self . manager . query ( "GET" , "containers/{}/logs" . format ( self . _cid ) , params = { "stderr" : 1 , "stdout" : 1 } ) return result
Return the log from the container
32,562
def get_next_application_id ( nodes ) : used = set ( [ n . application_id for n in nodes ] ) pool = set ( range ( 1 , 512 ) ) try : return ( pool - used ) . pop ( ) except KeyError : raise IOUError ( "Cannot create a new IOU VM (limit of 512 VMs on one host reached)" )
Calculates free application_id from given nodes
32,563
def clear ( self ) : self . _config = configparser . RawConfigParser ( ) self . _override_config = { } self . read_config ( )
Restart with a clean config
32,564
def read_config ( self ) : try : parsed_files = self . _config . read ( self . _files , encoding = "utf-8" ) except configparser . Error as e : log . error ( "Can't parse configuration file: %s" , str ( e ) ) return if not parsed_files : log . warning ( "No configuration file could be found or read" ) else : for file i...
Read the configuration files .
32,565
def get_section_config ( self , section ) : if section not in self . _config : return self . _config [ "DEFAULT" ] return self . _config [ section ]
Get a specific configuration section . Returns the default section if none can be found .
32,566
def set_section_config ( self , section , content ) : if not self . _config . has_section ( section ) : self . _config . add_section ( section ) for key in content : if isinstance ( content [ key ] , bool ) : content [ key ] = str ( content [ key ] ) . lower ( ) self . _config . set ( section , key , content [ key ] ) ...
Set a specific configuration section . It s not dumped on the disk .
32,567
def set ( self , section , key , value ) : conf = self . get_section_config ( section ) if isinstance ( value , bool ) : conf [ key ] = str ( value ) else : conf [ key ] = value self . set_section_config ( section , conf )
Set a config value . It s not dumped on the disk .
32,568
def instance ( * args , ** kwargs ) : if not hasattr ( Config , "_instance" ) or Config . _instance is None : Config . _instance = Config ( * args , ** kwargs ) return Config . _instance
Singleton to return only one instance of Config .
32,569
def wait_run_in_executor ( func , * args , ** kwargs ) : loop = asyncio . get_event_loop ( ) future = loop . run_in_executor ( None , functools . partial ( func , * args , ** kwargs ) ) yield from asyncio . wait ( [ future ] ) return future . result ( )
Run blocking code in a different thread and wait for the result .
32,570
def subprocess_check_output ( * args , cwd = None , env = None , stderr = False ) : if stderr : proc = yield from asyncio . create_subprocess_exec ( * args , stderr = asyncio . subprocess . PIPE , cwd = cwd , env = env ) output = yield from proc . stderr . read ( ) else : proc = yield from asyncio . create_subprocess_e...
Run a command and capture output
32,571
def wait_for_process_termination ( process , timeout = 10 ) : if sys . version_info >= ( 3 , 5 ) : try : yield from asyncio . wait_for ( process . wait ( ) , timeout = timeout ) except ProcessLookupError : return else : while timeout > 0 : if process . returncode is not None : return yield from asyncio . sleep ( 0.1 ) ...
Wait for a process terminate and raise asyncio . TimeoutError in case of timeout .
32,572
def locked_coroutine ( f ) : @ asyncio . coroutine def new_function ( * args , ** kwargs ) : lock_var_name = "__" + f . __name__ + "_lock" if not hasattr ( args [ 0 ] , lock_var_name ) : setattr ( args [ 0 ] , lock_var_name , asyncio . Lock ( ) ) with ( yield from getattr ( args [ 0 ] , lock_var_name ) ) : return ( yie...
Method decorator that replace asyncio . coroutine that warranty that this specific method of this class instance will not we executed twice at the same time
32,573
def set_name ( self , new_name ) : yield from self . _hypervisor . send ( 'nio_bridge rename "{name}" "{new_name}"' . format ( name = self . _name , new_name = new_name ) ) self . _name = new_name
Renames this bridge .
32,574
def delete ( self ) : if self . _hypervisor and self in self . _hypervisor . devices : self . _hypervisor . devices . remove ( self ) if self . _hypervisor and not self . _hypervisor . devices : yield from self . _hypervisor . send ( 'nio_bridge delete "{}"' . format ( self . _name ) )
Deletes this bridge .
32,575
def add_nio ( self , nio ) : yield from self . _hypervisor . send ( 'nio_bridge add_nio "{name}" {nio}' . format ( name = self . _name , nio = nio ) ) self . _nios . append ( nio )
Adds a NIO as new port on this bridge .
32,576
def remove_nio ( self , nio ) : if self . _hypervisor : yield from self . _hypervisor . send ( 'nio_bridge remove_nio "{name}" {nio}' . format ( name = self . _name , nio = nio ) ) self . _nios . remove ( nio )
Removes the specified NIO as member of this bridge .
32,577
def _move_node_file ( path , old_id , new_id ) : root = os . path . join ( path , "project-files" ) if os . path . exists ( root ) : for dirname in os . listdir ( root ) : module_dir = os . path . join ( root , dirname ) if os . path . isdir ( module_dir ) : node_dir = os . path . join ( module_dir , old_id ) if os . p...
Move the files from a node when changing his id
32,578
def _move_files_to_compute ( compute , project_id , directory , files_path ) : location = os . path . join ( directory , files_path ) if os . path . exists ( location ) : for ( dirpath , dirnames , filenames ) in os . walk ( location ) : for filename in filenames : path = os . path . join ( dirpath , filename ) dst = o...
Move the files to a remote compute
32,579
def _upload_file ( compute , project_id , file_path , path ) : path = "/projects/{}/files/{}" . format ( project_id , path . replace ( "\\" , "/" ) ) with open ( file_path , "rb" ) as f : yield from compute . http_query ( "POST" , path , f , timeout = None )
Upload a file to a remote project
32,580
def _import_images ( controller , path ) : image_dir = controller . images_path ( ) root = os . path . join ( path , "images" ) for ( dirpath , dirnames , filenames ) in os . walk ( root ) : for filename in filenames : path = os . path . join ( dirpath , filename ) dst = os . path . join ( image_dir , os . path . relpa...
Copy images to the images directory or delete them if they already exists .
32,581
def parse_add_loopback ( ) : class Add ( argparse . Action ) : def __call__ ( self , parser , args , values , option_string = None ) : try : ipaddress . IPv4Interface ( "{}/{}" . format ( values [ 1 ] , values [ 2 ] ) ) except ipaddress . AddressValueError as e : raise argparse . ArgumentTypeError ( "Invalid IP address...
Validate params when adding a loopback adapter
32,582
def main ( ) : parser = argparse . ArgumentParser ( description = '%(prog)s add/remove Windows loopback adapters' ) parser . add_argument ( '-a' , "--add" , nargs = 3 , action = parse_add_loopback ( ) , help = "add a Windows loopback adapter" ) parser . add_argument ( "-r" , "--remove" , action = "store" , help = "remo...
Entry point for the Windows loopback tool .
32,583
def _svg_convert_size ( size ) : conversion_table = { "pt" : 1.25 , "pc" : 15 , "mm" : 3.543307 , "cm" : 35.43307 , "in" : 90 , "px" : 1 } if len ( size ) > 3 : if size [ - 2 : ] in conversion_table : return round ( float ( size [ : - 2 ] ) * conversion_table [ size [ - 2 : ] ] ) return round ( float ( size ) )
Convert svg size to the px version
32,584
def list_images ( type ) : files = set ( ) images = [ ] server_config = Config . instance ( ) . get_section_config ( "Server" ) general_images_directory = os . path . expanduser ( server_config . get ( "images_path" , "~/GNS3/images" ) ) default_directory = default_images_directory ( type ) for directory in images_dire...
Scan directories for available image for a type
32,585
def _os_walk ( directory , recurse = True , ** kwargs ) : if recurse : for root , dirs , files in os . walk ( directory , ** kwargs ) : yield root , dirs , files else : files = [ ] for filename in os . listdir ( directory ) : if os . path . isfile ( os . path . join ( directory , filename ) ) : files . append ( filenam...
Work like os . walk but if recurse is False just list current directory
32,586
def images_directories ( type ) : server_config = Config . instance ( ) . get_section_config ( "Server" ) paths = [ ] img_dir = os . path . expanduser ( server_config . get ( "images_path" , "~/GNS3/images" ) ) type_img_directory = default_images_directory ( type ) try : os . makedirs ( type_img_directory , exist_ok = ...
Return all directory where we will look for images by priority
32,587
def md5sum ( path ) : if path is None or len ( path ) == 0 or not os . path . exists ( path ) : return None try : with open ( path + '.md5sum' ) as f : md5 = f . read ( ) if len ( md5 ) == 32 : return md5 except ( OSError , UnicodeDecodeError ) : pass try : m = hashlib . md5 ( ) with open ( path , 'rb' ) as f : while T...
Return the md5sum of an image and cache it on disk
32,588
def remove_checksum ( path ) : path = '{}.md5sum' . format ( path ) if os . path . exists ( path ) : os . remove ( path )
Remove the checksum of an image from cache if exists
32,589
def record_tcp_port ( self , port ) : if port not in self . _used_tcp_ports : self . _used_tcp_ports . add ( port )
Associate a reserved TCP port number with this project .
32,590
def record_udp_port ( self , port ) : if port not in self . _used_udp_ports : self . _used_udp_ports . add ( port )
Associate a reserved UDP port number with this project .
32,591
def remove_tcp_port ( self , port ) : if port in self . _used_tcp_ports : self . _used_tcp_ports . remove ( port )
Removes an associated TCP port number from this project .
32,592
def remove_udp_port ( self , port ) : if port in self . _used_udp_ports : self . _used_udp_ports . remove ( port )
Removes an associated UDP port number from this project .
32,593
def module_working_directory ( self , module_name ) : workdir = self . module_working_path ( module_name ) if not self . _deleted : try : os . makedirs ( workdir , exist_ok = True ) except OSError as e : raise aiohttp . web . HTTPInternalServerError ( text = "Could not create module working directory: {}" . format ( e ...
Returns a working directory for the module The directory is created if the directory doesn t exist .
32,594
def node_working_directory ( self , node ) : workdir = self . node_working_path ( node ) if not self . _deleted : try : os . makedirs ( workdir , exist_ok = True ) except OSError as e : raise aiohttp . web . HTTPInternalServerError ( text = "Could not create the node working directory: {}" . format ( e ) ) return workd...
Returns a working directory for a specific node . If the directory doesn t exist the directory is created .
32,595
def capture_working_directory ( self ) : workdir = os . path . join ( self . _path , "tmp" , "captures" ) if not self . _deleted : try : os . makedirs ( workdir , exist_ok = True ) except OSError as e : raise aiohttp . web . HTTPInternalServerError ( text = "Could not create the capture working directory: {}" . format ...
Returns a working directory where to temporary store packet capture files .
32,596
def remove_node ( self , node ) : if node in self . _nodes : yield from node . delete ( ) self . _nodes . remove ( node )
Removes a node from the project . In theory this should be called by the node manager .
32,597
def close ( self ) : project_nodes_id = set ( [ n . id for n in self . nodes ] ) for module in self . compute ( ) : module_nodes_id = set ( [ n . id for n in module . instance ( ) . nodes ] ) if len ( module_nodes_id & project_nodes_id ) : yield from module . instance ( ) . project_closing ( self ) yield from self . _c...
Closes the project but keep information on disk
32,598
def _close_and_clean ( self , cleanup ) : tasks = [ ] for node in self . _nodes : tasks . append ( asyncio . async ( node . manager . close_node ( node . id ) ) ) if tasks : done , _ = yield from asyncio . wait ( tasks ) for future in done : try : future . result ( ) except ( Exception , GeneratorExit ) as e : log . er...
Closes the project and cleanup the disk if cleanup is True
32,599
def delete ( self ) : for module in self . compute ( ) : yield from module . instance ( ) . project_closing ( self ) yield from self . _close_and_clean ( True ) for module in self . compute ( ) : yield from module . instance ( ) . project_closed ( self )
Removes project from disk