idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
31,600
def rules ( self ) : list_of_rules = [ ] for main_row in self . dict_rules : if 'rules' in main_row : for rule_row in main_row [ 'rules' ] : if 'grants' in rule_row : for grant_row in rule_row [ 'grants' ] : if 'group_id' in grant_row : group_id = grant_row [ 'group_id' ] if 'name' in grant_row : row_name = grant_row [...
Returns a sorted list of firewall rules .
31,601
def csv ( self ) : output = StringIO . StringIO ( ) fieldnames = [ 'id' , 'name' , 'description' , 'rules_direction' , 'rules_ip_protocol' , 'rules_from_port' , 'rules_to_port' , 'rules_grants_group_id' , 'rules_grants_name' , 'rules_grants_cidr_ip' , 'rules_description' ] writer = csv . DictWriter ( output , fieldname...
Returns the security rules as a CSV .
31,602
def _get_rules_from_aws ( self ) : list_of_rules = list ( ) if self . profile : boto3 . setup_default_session ( profile_name = self . profile ) if self . region : ec2 = boto3 . client ( 'ec2' , region_name = self . region ) else : ec2 = boto3 . client ( 'ec2' ) security_groups = ec2 . describe_security_groups ( Filters...
Load the EC2 security rules off AWS into a list of dict .
31,603
def getattr_sdk ( attr , name ) : if inspect . isroutine ( attr ) : if hasattr ( attr , '_sdkmeta' ) : return attr raise AttributeError ( name )
Filter SDK attributes
31,604
def setup_sdk_logging ( logfile = None , loglevel = logging . INFO ) : logging . root . setLevel ( logging . DEBUG ) logging . root . addHandler ( logging . NullHandler ( ) ) if logfile : fh = logging . FileHandler ( logfile ) fh . setLevel ( loglevel ) fh . setFormatter ( get_default_log_formatter ( ) ) logging . root...
Setup a NullHandler to the root logger . If logfile is passed additionally add a FileHandler in loglevel level .
31,605
def get_ssh_client ( ip_addr , ssh_key = None , host_name = None , ssh_tries = None , propagate_fail = True , username = 'root' , password = '123456' , ) : host_name = host_name or ip_addr with LogTask ( 'Get ssh client for %s' % host_name , level = 'debug' , propagate_fail = propagate_fail , ) : ssh_timeout = int ( co...
Get a connected SSH client
31,606
def sysprep ( disk , distro , loader = None , backend = 'direct' , ** kwargs ) : if loader is None : loader = PackageLoader ( 'lago' , 'templates' ) sysprep_file = _render_template ( distro , loader = loader , ** kwargs ) cmd = [ 'virt-sysprep' , '-a' , disk ] cmd . extend ( [ '--commands-from-file' , sysprep_file ] ) ...
Run virt - sysprep on the disk commands are built from the distro specific template and arguments passed in kwargs . If no template is available it will default to sysprep - base . j2 .
31,607
def get_domain_template ( distro , libvirt_ver , ** kwargs ) : env = Environment ( loader = PackageLoader ( 'lago' , 'providers/libvirt/templates' ) , trim_blocks = True , lstrip_blocks = True , ) template_name = 'dom_template-{0}.xml.j2' . format ( distro ) try : template = env . get_template ( template_name ) except ...
Get a rendered Jinja2 domain template
31,608
def dict_to_xml ( spec , full_document = False ) : middle = xmltodict . unparse ( spec , full_document = full_document , pretty = True ) return lxml . etree . fromstring ( middle )
Convert dict to XML
31,609
def guestfs_conn_ro ( disk ) : disk_path = os . path . expandvars ( disk ) conn = guestfs . GuestFS ( python_return_dict = True ) conn . add_drive_ro ( disk_path ) conn . set_backend ( os . environ . get ( 'LIBGUESTFS_BACKEND' , 'direct' ) ) try : conn . launch ( ) except RuntimeError as err : LOGGER . debug ( err ) ra...
Open a GuestFS handle and add disk in read only mode .
31,610
def guestfs_conn_mount_ro ( disk_path , disk_root , retries = 5 , wait = 1 ) : for attempt in range ( retries ) : with guestfs_conn_ro ( disk_path ) as conn : rootfs = find_rootfs ( conn , disk_root ) try : conn . mount_ro ( rootfs , '/' ) except RuntimeError as err : LOGGER . debug ( err ) if attempt < retries - 1 : L...
Open a GuestFS handle with disk_path and try mounting the root filesystem . disk_root is a hint where it should be looked and will only be used if GuestFS will not be able to deduce it independently .
31,611
def find_rootfs ( conn , disk_root ) : rootfs = conn . inspect_os ( ) if not rootfs or len ( rootfs ) > 1 : filesystems = conn . list_filesystems ( ) if disk_root in filesystems : rootfs = [ disk_root ] else : rootfs = [ fs for fs in filesystems . keys ( ) if disk_root in fs ] if not rootfs : raise GuestFSError ( 'no r...
Find the image s device root filesystem and return its path .
31,612
def extract_paths ( disk_path , disk_root , paths , ignore_nopath ) : with guestfs_conn_mount_ro ( disk_path , disk_root ) as conn : for ( guest_path , host_path ) in paths : msg = ( 'Extracting guestfs://{0} to {1}' ) . format ( guest_path , host_path ) LOGGER . debug ( msg ) try : _copy_path ( conn , guest_path , hos...
Extract paths from a disk using guestfs
31,613
def cli_plugin_add_argument ( * args , ** kwargs ) : def decorator ( func ) : if not isinstance ( func , CLIPluginFuncWrapper ) : func = CLIPluginFuncWrapper ( do_run = func ) func . add_argument ( * args , ** kwargs ) return func return decorator
Decorator generator that adds an argument to the cli plugin based on the decorated function
31,614
def cli_plugin_add_help ( help ) : def decorator ( func ) : if not isinstance ( func , CLIPluginFuncWrapper ) : func = CLIPluginFuncWrapper ( do_run = func ) func . set_help ( help ) return func return decorator
Decorator generator that adds the cli help to the cli plugin based on the decorated function
31,615
def _get_domain ( self ) : try : return self . libvirt_con . lookupByName ( self . _libvirt_name ( ) ) except libvirt . libvirtError as e : raise vm_plugin . LagoVMDoesNotExistError ( str ( e ) )
Return the object representation of this provider VM .
31,616
def raw_state ( self ) : try : return self . _get_domain ( ) . state ( ) except libvirt . libvirtError as e : raise vm_plugin . LagoFailedToGetVMStateError ( str ( e ) )
Return the state of the domain in Libvirt s terms
31,617
def state ( self ) : try : return libvirt_utils . Domain . resolve_state ( self . raw_state ( ) ) except vm_plugin . LagoVMDoesNotExistError : return 'down' except vm_plugin . LagoFailedToGetVMStateError : return 'failed to get state' except KeyError : return 'unknown state'
Return a small description of the current status of the domain
31,618
def extract_paths ( self , paths , ignore_nopath ) : try : super ( ) . extract_paths ( paths = paths , ignore_nopath = ignore_nopath , ) except ExtractPathError as err : LOGGER . debug ( '%s: failed extracting files: %s' , self . vm . name ( ) , err . message ) if self . _has_guestfs : self . extract_paths_dead ( paths...
Extract the given paths from the domain
31,619
def extract_paths_dead ( self , paths , ignore_nopath ) : if not self . _has_guestfs : raise LagoException ( ( 'guestfs module not available, cannot ' ) ( 'extract files with libguestfs' ) ) LOGGER . debug ( '%s: attempting to extract files with libguestfs' , self . vm . name ( ) ) guestfs_tools . extract_paths ( disk_...
Extract the given paths from the domain using guestfs . Using guestfs can have side - effects and should be used as a second option mainly when SSH is not available .
31,620
def export_disks ( self , standalone , dst_dir , compress , collect_only = False , with_threads = True , * args , ** kwargs ) : vm_export_mgr = export . VMExportManager ( disks = self . vm . disks , dst = dst_dir , compress = compress , with_threads = with_threads , standalone = standalone , * args , ** kwargs ) if col...
Export all the disks of self .
31,621
def interactive_console ( self ) : if not self . running ( ) : raise RuntimeError ( 'VM %s is not running' % self . _libvirt_ . name ) virsh_command = [ "virsh" , "-c" , config . get ( 'libvirt_url' ) , "console" , self . _libvirt_name ( ) , ] return utils . run_interactive_command ( command = virsh_command , )
Opens an interactive console
31,622
def init ( config , workdir = None , logfile = None , loglevel = logging . INFO , ** kwargs ) : setup_sdk_logging ( logfile , loglevel ) defaults = lago_config . get_section ( 'init' ) if workdir is None : workdir = os . path . abspath ( '.lago' ) defaults [ 'workdir' ] = workdir defaults [ 'virt_config' ] = config def...
Initialize the Lago environment
31,623
def load_env ( workdir , logfile = None , loglevel = logging . INFO ) : setup_sdk_logging ( logfile , loglevel ) workdir = os . path . abspath ( workdir ) loaded_workdir = lago_workdir . Workdir ( path = workdir ) prefix = loaded_workdir . get_prefix ( 'current' ) return SDK ( loaded_workdir , prefix )
Load an existing Lago environment
31,624
def ansible_inventory ( self , keys = [ 'vm-type' , 'groups' , 'vm-provider' ] , ) : lansible = LagoAnsible ( self . _prefix ) return lansible . get_inventory_str ( keys = keys )
Get an Ansible inventory as a string keys should be list on which to group the hosts by . You can use any key defined in LagoInitFile .
31,625
def calc_sha ( self , checksum ) : with LogTask ( 'Calculating {}' . format ( checksum ) ) : with open ( self . dst + '.hash' , 'wt' ) as f : sha = utils . get_hash ( self . dst , checksum ) f . write ( sha ) self . exported_metadata [ checksum ] = sha
Calculate the checksum of the new exported disk write it to a file and update this managers exported_metadata .
31,626
def compress ( self ) : if not self . do_compress : return with LogTask ( 'Compressing disk' ) : utils . compress ( self . dst , 16777216 ) os . unlink ( self . dst )
Compress the new exported image Block size was taken from virt - builder page
31,627
def rebase ( self ) : if self . standalone : rebase_msg = 'Merging layered image with base' else : rebase_msg = 'Rebase' with LogTask ( rebase_msg ) : if len ( self . src_qemu_info ) == 1 : return if self . standalone : utils . qemu_rebase ( target = self . dst , backing_file = "" ) else : if len ( self . src_qemu_info...
Change the backing - file entry of the exported disk . Please refer to qemu - img rebase manual for more info .
31,628
def export ( self ) : with LogTask ( 'Exporting disk {} to {}' . format ( self . name , self . dst ) ) : with utils . RollbackContext ( ) as rollback : rollback . prependDefer ( shutil . rmtree , self . dst , ignore_errors = True ) self . copy ( ) if not self . disk [ 'format' ] == 'iso' : self . sparse ( ) self . calc...
See DiskExportManager . export
31,629
def normalize_build_spec ( self , build_spec ) : for cmd in build_spec : if not cmd : continue cmd_name = cmd . keys ( ) [ 0 ] cmd_options = cmd . values ( ) [ 0 ] cmd_handler = self . get_cmd_handler ( cmd_name ) self . build_cmds . append ( cmd_handler ( cmd_options ) )
Convert a build spec into a list of Command tuples . After running this command self . build_cmds should hold all the commands that should be run on the disk in self . disk_path .
31,630
def get_cmd_handler ( self , cmd ) : cmd = cmd . replace ( '-' , '_' ) handler = getattr ( self , cmd , None ) if not handler : raise BuildException ( 'Command {} is not supported as a ' 'build command' . format ( cmd ) ) return handler
Return an handler for cmd . The handler and the command should have the same name . See class description for more info about handlers .
31,631
def build ( self ) : if not self . build_cmds : LOGGER . debug ( 'No build commands were found, skipping build step' ) with LogTask ( 'Building {} disk {}' . format ( self . name , self . disk_path ) ) : for command in self . build_cmds : with LogTask ( 'Running command {}' . format ( command . name ) ) : LOGGER . debu...
Run all the commands in self . build_cmds
31,632
def download_image ( self , handle , dest ) : shutil . copyfile ( self . _prefixed ( handle ) , dest )
Copies over the handl to the destination
31,633
def download_image ( self , handle , dest ) : with log_utils . LogTask ( 'Download image %s' % handle , logger = LOGGER ) : self . open_url ( url = handle , dest = dest ) self . extract_image_xz ( dest )
Downloads the image from the http server
31,634
def get_by_name ( self , name ) : try : spec = self . _dom . get ( 'templates' , { } ) [ name ] except KeyError : raise LagoMissingTemplateError ( name , self . _path ) return Template ( name = name , versions = { ver_name : TemplateVersion ( name = '%s:%s:%s' % ( self . name , name , ver_name ) , source = self . _prov...
Retrieve a template by it s name
31,635
def get_hash ( self ) : if self . _hash is None : self . _hash = self . _source . get_hash ( self . _handle ) . strip ( ) return self . _hash
Returns the associated hash for this template version
31,636
def get_metadata ( self ) : if self . _metadata is None : self . _metadata = self . _source . get_metadata ( self . _handle ) return self . _metadata
Returns the associated metadata info for this template version
31,637
def get_path ( self , temp_ver ) : if temp_ver not in self : raise RuntimeError ( 'Template: {} not present' . format ( temp_ver . name ) ) return self . _prefixed ( temp_ver . name )
Get the path of the given version in this store
31,638
def download ( self , temp_ver , store_metadata = True ) : dest = self . _prefixed ( temp_ver . name ) temp_dest = '%s.tmp' % dest with utils . LockFile ( dest + '.lock' ) : if os . path . exists ( dest ) : return temp_ver . download ( temp_dest ) if store_metadata : with open ( '%s.metadata' % dest , 'w' ) as f : util...
Retrieve the given template version
31,639
def get_stored_metadata ( self , temp_ver ) : with open ( self . _prefixed ( '%s.metadata' % temp_ver . name ) ) as f : return json . load ( f )
Retrieves the metadata for the given template version from the store
31,640
def get_stored_hash ( self , temp_ver ) : with open ( self . _prefixed ( '%s.hash' % temp_ver . name ) ) as f : return f . read ( ) . strip ( )
Retrieves the hash for the given template version from the store
31,641
def get_inventory ( self , keys = None ) : inventory = defaultdict ( list ) keys = keys or [ 'vm-type' , 'groups' , 'vm-provider' ] vms = self . prefix . get_vms ( ) . values ( ) for vm in vms : entry = self . _generate_entry ( vm ) vm_spec = vm . spec for key in keys : value = self . get_key ( key , vm_spec ) if value...
Create an Ansible inventory based on python dicts and lists . The returned value is a dict in which every key represents a group and every value is a list of entries for that group .
31,642
def get_key ( key , data_structure ) : if key == '/' : return data_structure path = key . split ( '/' ) path [ 0 ] or path . pop ( 0 ) current_value = data_structure while path : current_key = path . pop ( 0 ) try : current_key = int ( current_key ) except ValueError : pass try : current_value = current_value [ current...
Helper method for extracting values from a nested data structure .
31,643
def get_inventory_temp_file ( self , keys = None ) : temp_file = tempfile . NamedTemporaryFile ( mode = 'r+t' ) inventory = self . get_inventory_str ( keys ) LOGGER . debug ( 'Writing inventory to temp file {} \n{}' . format ( temp_file . name , inventory ) ) temp_file . write ( inventory ) temp_file . flush ( ) temp_f...
Context manager which returns the inventory written on a tempfile . The tempfile will be deleted as soon as this context manger ends .
31,644
def exit_handler ( signum , frame ) : LOGGER . debug ( 'signal {} was caught' . format ( signum ) ) sys . exit ( 128 + signum )
Catch SIGTERM and SIGHUP and call sys . exit which raises SystemExit exception . This will trigger all the cleanup code defined in ContextManagers and finally statements .
31,645
def start ( self , attempts = 5 , timeout = 2 ) : if not self . alive ( ) : with LogTask ( 'Create network %s' % self . name ( ) ) : net = self . libvirt_con . networkCreateXML ( self . _libvirt_xml ( ) ) if net is None : raise RuntimeError ( 'failed to create network, XML: %s' % ( self . _libvirt_xml ( ) ) ) for _ in ...
Start the network will check if the network is active attempts times waiting timeout between each attempt .
31,646
def generate_cpu_xml ( self ) : if self . cpu_custom : return self . generate_custom ( cpu = self . cpu , vcpu_num = self . vcpu_num , fill_topology = self . vcpu_set ) elif self . cpu_model : return self . generate_exact ( self . cpu_model , vcpu_num = self . vcpu_num , host_cpu = self . host_cpu ) else : return self ...
Get CPU XML
31,647
def generate_host_passthrough ( self , vcpu_num ) : cpu = ET . Element ( 'cpu' , mode = 'host-passthrough' ) cpu . append ( self . generate_topology ( vcpu_num ) ) if vcpu_num > 1 : cpu . append ( self . generate_numa ( vcpu_num ) ) return cpu
Generate host - passthrough XML cpu node
31,648
def generate_custom ( self , cpu , vcpu_num , fill_topology ) : try : cpu = utils . dict_to_xml ( { 'cpu' : cpu } ) except : raise LagoInitException ( 'conversion of \'cpu\' to XML failed' ) if not cpu . xpath ( 'topology' ) and fill_topology : cpu . append ( self . generate_topology ( vcpu_num ) ) return cpu
Generate custom CPU model . This method attempts to convert the dict to XML as defined by xmltodict . unparse method .
31,649
def generate_exact ( self , model , vcpu_num , host_cpu ) : nested = { 'Intel' : 'vmx' , 'AMD' : 'svm' } cpu = ET . Element ( 'cpu' , match = 'exact' ) ET . SubElement ( cpu , 'model' ) . text = model cpu . append ( self . generate_topology ( vcpu_num ) ) vendor = host_cpu . findtext ( 'vendor' ) if not nested . get ( ...
Generate exact CPU model with nested virtualization CPU feature .
31,650
def generate_feature ( self , name , policy = 'require' ) : return ET . Element ( 'feature' , policy = policy , name = name )
Generate CPU feature element
31,651
def get_cpu_vendor ( cls , family , arch = 'x86' ) : props = cls . get_cpu_props ( family , arch ) vendor = 'generic' try : vendor = props . xpath ( 'vendor/@name' ) [ 0 ] except IndexError : pass return vendor
Get CPU vendor if vendor is not available will return generic
31,652
def get_cpu_props ( cls , family , arch = 'x86' ) : cpus = cls . get_cpus_by_arch ( arch ) try : return cpus . xpath ( 'model[@name="{0}"]' . format ( family ) ) [ 0 ] except IndexError : raise LagoException ( 'No such CPU family: {0}' . format ( family ) )
Get CPU info XML
31,653
def get_cpus_by_arch ( cls , arch ) : with open ( '/usr/share/libvirt/cpu_map.xml' , 'r' ) as cpu_map : cpu_xml = ET . parse ( cpu_map ) try : return cpu_xml . xpath ( '/cpus/arch[@name="{0}"]' . format ( arch ) ) [ 0 ] except IndexError : raise LagoException ( 'No such arch: {0}' . format ( arch ) )
Get all CPUs info by arch
31,654
def log_task ( task , logger = logging , level = 'info' , propagate_fail = True , uuid = None ) : def decorator ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : with LogTask ( task , logger = logger , level = level , propagate_fail = propagate_fail , uuid = uuid ) : return func ( * args , ** kwargs ) re...
Parameterized decorator to wrap a function in a log task
31,655
def start_log_task ( task , logger = logging , level = 'info' ) : getattr ( logger , level ) ( START_TASK_TRIGGER_MSG % task )
Starts a log task
31,656
def end_log_task ( task , logger = logging , level = 'info' ) : getattr ( logger , level ) ( END_TASK_TRIGGER_MSG % task )
Ends a log task
31,657
def hide_stevedore_logs ( ) : stevedore_logger = logging . getLogger ( 'stevedore.extension' ) stevedore_logger . propagate = False stevedore_logger . setLevel ( logging . ERROR ) stevedore_logger . addHandler ( logging . NullHandler ( ) )
Hides the logs of stevedore this function was added in order to support older versions of stevedore
31,658
def colored ( cls , color , message ) : return getattr ( cls , color . upper ( ) ) + message + cls . DEFAULT
Small function to wrap a string around a color
31,659
def format ( self , record ) : level = record . levelno if level >= logging . CRITICAL : color = self . CRITICAL elif level >= logging . ERROR : color = self . ERROR elif level >= logging . WARNING : color = self . WARNING elif level >= logging . INFO : color = self . INFO elif level >= logging . DEBUG : color = self ....
Adds colors to a log record and formats it with the default
31,660
def handle_new_task ( self , task_name , record ) : record . msg = ColorFormatter . colored ( 'default' , START_TASK_MSG ) record . task = task_name self . tasks [ task_name ] = Task ( name = task_name , maxlen = self . buffer_size ) if self . should_show_by_depth ( ) : self . pretty_emit ( record , is_header = True )
Do everything needed when a task is starting
31,661
def mark_parent_tasks_as_failed ( self , task_name , flush_logs = False ) : for existing_task_name in self . tasks : if existing_task_name == task_name : break if flush_logs : self . tasks [ existing_task_name ] . clear ( ) self . tasks [ existing_task_name ] . failed = True self . mark_main_tasks_as_failed ( )
Marks all the parent tasks as failed
31,662
def close_children_tasks ( self , parent_task_name ) : if parent_task_name not in self . tasks : return while self . tasks : next_task = reversed ( self . tasks . keys ( ) ) . next ( ) if next_task == parent_task_name : break del self . tasks [ next_task ]
Closes all the children tasks that were open
31,663
def handle_closed_task ( self , task_name , record ) : if task_name not in self . tasks : return if self . main_failed : self . mark_parent_tasks_as_failed ( self . cur_task ) if self . tasks [ task_name ] . failed : record . msg = ColorFormatter . colored ( 'red' , END_TASK_ON_ERROR_MSG ) else : record . msg = ColorFo...
Do everything needed when a task is closed
31,664
def handle_error ( self ) : if not self . tasks : return self . mark_parent_tasks_as_failed ( self . cur_task , flush_logs = True , ) for index , task in enumerate ( self . tasks . values ( ) ) : if self . should_show_by_depth ( index + 1 ) : continue start_task_header = logging . LogRecord ( '' , logging . INFO , '' ,...
Handles an error log record that should be shown
31,665
def emit ( self , record ) : record . task = self . cur_task if record . levelno >= self . dump_level and self . cur_task : self . tasks [ self . cur_task ] . failed = True self . tasks [ self . cur_task ] . force_show = True is_start = START_TASK_REG . match ( str ( record . msg ) ) if is_start : self . handle_new_tas...
Handle the given record this is the entry point from the python logging facility
31,666
def _validate_lease_dir ( self ) : try : if not os . path . isdir ( self . path ) : os . makedirs ( self . path ) except OSError as e : raise_from ( LagoSubnetLeaseBadPermissionsException ( self . path , e . strerror ) , e )
Validate that the directory used by this store exist otherwise create it .
31,667
def acquire ( self , uuid_path , subnet = None ) : try : with self . _create_lock ( ) : if subnet : LOGGER . debug ( 'Trying to acquire subnet {}' . format ( subnet ) ) acquired_subnet = self . _acquire_given_subnet ( uuid_path , subnet ) else : LOGGER . debug ( 'Trying to acquire a free subnet' ) acquired_subnet = sel...
Lease a free subnet for the given uuid path . If subnet is given try to lease that subnet otherwise try to lease a free subnet .
31,668
def _acquire ( self , uuid_path ) : for index in range ( self . _min_third_octet , self . _max_third_octet + 1 ) : lease = self . create_lease_object_from_idx ( index ) if self . _lease_valid ( lease ) : continue self . _take_lease ( lease , uuid_path , safe = False ) return lease . to_ip_network ( ) raise LagoSubnetLe...
Lease a free network for the given uuid path
31,669
def _acquire_given_subnet ( self , uuid_path , subnet ) : lease = self . create_lease_object_from_subnet ( subnet ) self . _take_lease ( lease , uuid_path ) return lease . to_ip_network ( )
Try to create a lease for subnet
31,670
def _lease_valid ( self , lease ) : if not lease . exist : return None if lease . has_env : return lease . uuid_path else : self . _release ( lease ) return None
Check if the given lease exist and still has a prefix that owns it . If the lease exist but its prefix isn t remove the lease from this store .
31,671
def _take_lease ( self , lease , uuid_path , safe = True ) : if safe : lease_taken_by = self . _lease_valid ( lease ) if lease_taken_by and lease_taken_by != uuid_path : raise LagoSubnetLeaseTakenException ( lease . subnet , lease_taken_by ) with open ( uuid_path ) as f : uuid = f . read ( ) with open ( lease . path , ...
Persist the given lease to the store and make the prefix in uuid_path his owner
31,672
def list_leases ( self , uuid = None ) : try : lease_files = os . listdir ( self . path ) except OSError as e : raise_from ( LagoSubnetLeaseBadPermissionsException ( self . path , e . strerror ) , e ) leases = [ self . create_lease_object_from_idx ( lease_file . split ( '.' ) [ 0 ] ) for lease_file in lease_files if le...
List current subnet leases
31,673
def release ( self , subnets ) : if isinstance ( subnets , str ) or isinstance ( subnets , IPNetwork ) : subnets = [ subnets ] subnets_iter = ( str ( subnet ) if isinstance ( subnet , IPNetwork ) else subnet for subnet in subnets ) try : with self . _create_lock ( ) : for subnet in subnets_iter : self . _release ( self...
Free the lease of the given subnets
31,674
def _release ( self , lease ) : if lease . exist : os . unlink ( lease . path ) LOGGER . debug ( 'Removed subnet lease {}' . format ( lease . path ) )
Free the given lease
31,675
def _lease_owned ( self , lease , current_uuid_path ) : prev_uuid_path , prev_uuid = lease . metadata with open ( current_uuid_path ) as f : current_uuid = f . read ( ) return current_uuid_path == prev_uuid_path and prev_uuid == current_uuid
Checks if the given lease is owned by the prefix whose uuid is in the given path
31,676
def _run_command ( command , input_data = None , stdin = None , out_pipe = subprocess . PIPE , err_pipe = subprocess . PIPE , env = None , uuid = None , ** kwargs ) : if uuid is None : uuid = uuid_m . uuid4 ( ) if constants . LIBEXEC_DIR not in os . environ [ 'PATH' ] . split ( ':' ) : os . environ [ 'PATH' ] = '%s:%s'...
Runs a command
31,677
def run_command ( command , input_data = None , out_pipe = subprocess . PIPE , err_pipe = subprocess . PIPE , env = None , ** kwargs ) : if env is None : env = os . environ . copy ( ) with LogTask ( 'Run command: %s' % ' ' . join ( '"%s"' % arg for arg in command ) , logger = LOGGER , level = 'debug' , ) as task : comm...
Runs a command non - interactively
31,678
def run_interactive_command ( command , env = None , ** kwargs ) : command_result = _run_command ( command = command , out_pipe = sys . stdout , err_pipe = sys . stderr , stdin = sys . stdin , env = env , ** kwargs ) return command_result
Runs a command interactively reusing the current stdin stdout and stderr
31,679
def deepcopy ( original_obj ) : if isinstance ( original_obj , list ) : return list ( deepcopy ( item ) for item in original_obj ) elif isinstance ( original_obj , dict ) : return dict ( ( key , deepcopy ( val ) ) for key , val in original_obj . items ( ) ) else : return original_obj
Creates a deep copy of an object with no crossed referenced lists or dicts useful when loading from yaml as anchors generate those cross - referenced dicts and lists
31,680
def load_virt_stream ( virt_fd ) : try : virt_conf = json . load ( virt_fd ) except ValueError : virt_fd . seek ( 0 ) virt_conf = yaml . load ( virt_fd ) return deepcopy ( virt_conf )
Loads the given conf stream into a dict trying different formats if needed
31,681
def get_qemu_info ( path , backing_chain = False , fail_on_error = True ) : cmd = [ 'qemu-img' , 'info' , '--output=json' , path ] if backing_chain : cmd . insert ( - 1 , '--backing-chain' ) result = run_command_with_validation ( cmd , fail_on_error , msg = 'Failed to get info for {}' . format ( path ) ) return json . ...
Get info on a given qemu disk
31,682
def get_hash ( file_path , checksum = 'sha1' ) : sha = getattr ( hashlib , checksum ) ( ) with open ( file_path ) as file_descriptor : while True : chunk = file_descriptor . read ( 65536 ) if not chunk : break sha . update ( chunk ) return sha . hexdigest ( )
Generate a hash for the given file
31,683
def ver_cmp ( ver1 , ver2 ) : return cmp ( pkg_resources . parse_version ( ver1 ) , pkg_resources . parse_version ( ver2 ) )
Compare lago versions
31,684
def acquire ( self ) : self . _fd = open ( self . _path , mode = 'w+' ) os . chmod ( self . _path , 0o660 ) fcntl . flock ( self . _fd , self . _op )
Acquire the lock
31,685
def format ( self , info_dict , delimiter = '/' ) : def dfs ( father , path , acc ) : if isinstance ( father , list ) : for child in father : dfs ( child , path , acc ) elif isinstance ( father , collections . Mapping ) : for child in sorted ( father . items ( ) , key = itemgetter ( 0 ) ) , : dfs ( child , path , acc )...
This formatter will take a data structure that represent a tree and will print all the paths from the root to the leaves
31,686
def workdir_loaded ( func ) : @ wraps ( func ) def decorator ( workdir , * args , ** kwargs ) : if not workdir . loaded : workdir . load ( ) return func ( workdir , * args , ** kwargs ) return decorator
Decorator to make sure that the workdir is loaded when calling the decorated function
31,687
def initialize ( self , prefix_name = 'default' , * args , ** kwargs ) : if self . loaded : raise WorkdirError ( 'Workdir %s already initialized' % self . path ) if not os . path . exists ( self . path ) : LOGGER . debug ( 'Creating workdir %s' , self . path ) os . makedirs ( self . path ) self . prefixes [ prefix_name...
Initializes a workdir by adding a new prefix to the workdir .
31,688
def load ( self ) : if self . loaded : LOGGER . debug ( 'Already loaded' ) return try : basepath , dirs , _ = os . walk ( self . path ) . next ( ) except StopIteration : raise MalformedWorkdir ( 'Empty dir %s' % self . path ) full_path = partial ( os . path . join , basepath ) found_current = False for dirname in dirs ...
Loads the prefixes that are available is the workdir
31,689
def _update_current ( self ) : if not self . current or self . current not in self . prefixes : if 'default' in self . prefixes : selected_current = 'default' elif self . prefixes : selected_current = sorted ( self . prefixes . keys ( ) ) . pop ( ) else : raise MalformedWorkdir ( 'No current link and no prefixes in wor...
Makes sure that a current is set
31,690
def _set_current ( self , new_current ) : new_cur_full_path = self . join ( new_current ) if not os . path . exists ( new_cur_full_path ) : raise PrefixNotFound ( 'Prefix "%s" does not exist in workdir %s' % ( new_current , self . path ) ) if os . path . lexists ( self . join ( 'current' ) ) : os . unlink ( self . join...
Change the current default prefix for internal usage
31,691
def add_prefix ( self , name , * args , ** kwargs ) : if os . path . exists ( self . join ( name ) ) : raise LagoPrefixAlreadyExistsError ( name , self . path ) self . prefixes [ name ] = self . prefix_class ( self . join ( name ) , * args , ** kwargs ) self . prefixes [ name ] . initialize ( ) if self . current is Non...
Adds a new prefix to the workdir .
31,692
def get_prefix ( self , name ) : if name == 'current' : name = self . current try : return self . prefixes [ name ] except KeyError : raise KeyError ( 'Unable to find prefix "%s" in workdir %s' % ( name , self . path ) )
Retrieve a prefix resolving the current one if needed
31,693
def destroy ( self , prefix_names = None ) : if prefix_names is None : self . destroy ( prefix_names = self . prefixes . keys ( ) ) return for prefix_name in prefix_names : if prefix_name == 'current' and self . current in prefix_names : continue elif prefix_name == 'current' : prefix_name = self . current self . get_p...
Destroy all the given prefixes and remove any left files if no more prefixes are left
31,694
def is_workdir ( cls , path ) : try : cls ( path = path ) . load ( ) except MalformedWorkdir : return False return True
Check if the given path is a workdir
31,695
def cleanup ( self ) : current = self . join ( 'current' ) if not os . path . exists ( current ) : LOGGER . debug ( 'found broken current symlink, removing: %s' , current ) os . unlink ( self . join ( 'current' ) ) self . current = None try : self . _update_current ( ) except PrefixNotFound : if not os . listdir ( self...
Attempt to set a new current symlink if it is broken . If no other prefixes exist and the workdir is empty try to delete the entire workdir .
31,696
def _load_plugins ( namespace , instantiate = True ) : mgr = ExtensionManager ( namespace = namespace , on_load_failure_callback = ( lambda _ , ep , err : LOGGER . warning ( 'Could not load plugin {}: {}' . format ( ep . name , err ) ) ) ) if instantiate : plugins = dict ( ( ext . name , ext . plugin if isinstance ( ex...
Loads all the plugins for the given namespace
31,697
def metadata ( self ) : if self . _metadata is None : try : with open ( self . paths . metadata ( ) ) as metadata_fd : self . _metadata = json . load ( metadata_fd ) except IOError : self . _metadata = { } return self . _metadata
Retrieve the metadata info for this prefix
31,698
def _save_metadata ( self ) : with open ( self . paths . metadata ( ) , 'w' ) as metadata_fd : utils . json_dump ( self . metadata , metadata_fd )
Write this prefix metadata to disk
31,699
def save ( self ) : if not os . path . exists ( self . paths . virt ( ) ) : os . makedirs ( self . paths . virt ( ) ) self . _save_metadata ( ) self . virt_env . save ( )
Save this prefix to persistent storage