idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
42,600
def get_collections ( db , collection = None , prefix = None , suffix = None ) : if collection is not None : return [ collection , ] collections = db . collection_names ( include_system_collections = False ) if prefix is not None : collections = [ c for c in collections if c . startswith ( prefix ) ] if suffix is not None : collections = [ c for c in collections if c . endswith ( suffix ) ] return sorted ( collections )
Returns a sorted list of collection names found in db .
42,601
def rename_collection ( db , collection , new_name ) : if hasattr ( new_name , '__call__' ) : _new = new_name ( collection ) if _new == '' : return else : _new = new_name c = db [ collection ] c . rename ( _new )
Renames a MongoDB collection .
42,602
def update ( field , value , db , collection , match = None ) : c = db [ collection ] match = match if match is not None else { } if db . client . server_info ( ) [ 'version' ] . startswith ( '2' ) : c . update ( match , { '$set' : { field : value } } , multi = True ) else : c . update_many ( match , { '$set' : { field : value } } )
Updates MongoDB documents .
42,603
def mongoimport ( json , database , ip = 'localhost' , port = 27017 , user = None , password = None , delim = '_' , delim1 = None , delim2 = None , delim_occurance = 1 , delim1_occurance = 1 , delim2_occurance = 1 ) : logger = log . get_logger ( 'mongodb' ) _print_mongoimport_info ( logger ) if type ( json ) in ( list , tuple ) : pass elif os . path . isdir ( json ) : from abtools . utils . pipeline import list_files json = list_files ( json ) else : json = [ json , ] jsons = sorted ( [ os . path . expanduser ( j ) for j in json if j . endswith ( '.json' ) ] ) collections = _get_import_collections ( jsons , delim , delim_occurance , delim1 , delim1_occurance , delim2 , delim2_occurance ) logger . info ( 'Found {} files to import' . format ( len ( jsons ) ) ) logger . info ( '' ) for i , ( json_file , collection ) in enumerate ( zip ( jsons , collections ) ) : logger . info ( '[ {} ] {} . format ( i + 1 , os . path . basename ( json_file ) , collection ) ) if all ( [ user , password ] ) : host = '--host {} --port {} -username {} -password {}' . format ( ip , port , user , password ) else : host = '--host {} --port {}' . format ( ip , port ) mongo_cmd = "mongoimport {} --db {} --collection {} --file {}" . format ( host , database , collection , json_file ) mongo = sp . Popen ( mongo_cmd , shell = True , stdout = sp . PIPE , stderr = sp . PIPE ) stdout , stderr = mongo . communicate ( )
Performs mongoimport on one or more json files .
42,604
def get_settings_from_profile ( profile , profile_dir = None ) : if profile_dir is None : import settings profile_dir = settings if hasattr ( profile_dir , '__file__' ) : profile_dir = os . path . dirname ( profile_dir . __file__ ) return os . path . join ( profile_dir , '{0}.cfg' . format ( profile ) )
Returns the configuration file path for the given profile .
42,605
def _get_app_module ( self ) : def configure ( binder ) : binder . bind ( ServiceApplication , to = self , scope = singleton ) binder . bind ( Config , to = self . config , scope = singleton ) return configure
Returns a module which binds the current app and configuration .
42,606
def _configure_injector ( self , modules ) : self . _register ( ) self . _create_injector ( ) self . _bind_core ( ) self . _bind_modules ( modules ) self . logger . debug ( "Injector configuration with modules {0}." . format ( modules ) ) self . _dependencies_initialized = True
Create the injector and install the modules .
42,607
def configure ( self , binder ) : redis_cache_module = RedisCacheModule ( ) binder . bind ( RedisCacheModule , to = redis_cache_module , scope = singleton ) binder . bind ( CacheModule , to = redis_cache_module , scope = singleton ) redis_id_helper = RedisIdHelper ( ) binder . bind ( RedisIdHelper , to = redis_id_helper , scope = singleton ) binder . bind ( IdHelper , to = redis_id_helper , scope = singleton ) logging . debug ( "Created RedisCache binding." )
Initializer of the cache - creates the Redis cache module as the default cache infrastructure . The module is bound to RedisCacheModule and CacheModule keys . The initializer also creates RedisIdHelper and bounds it to RedisIdHelper and IdHelper keys .
42,608
def lazy_property ( func ) : attr_name = '_lazy_' + func . __name__ @ property def _lazy_property ( self ) : if not hasattr ( self , attr_name ) : setattr ( self , attr_name , func ( self ) ) return getattr ( self , attr_name ) @ _lazy_property . deleter def _lazy_property ( self ) : if hasattr ( self , attr_name ) : delattr ( self , attr_name ) @ _lazy_property . setter def _lazy_property ( self , value ) : setattr ( self , attr_name , value ) return _lazy_property
Wraps a property to provide lazy evaluation . Eliminates boilerplate . Also provides for setting and deleting the property .
42,609
def region ( self , start = 0 , end = None ) : if end is None : end = len ( self . sequence ) return '>{}\n{}' . format ( self . id , self . sequence [ start : end ] )
Returns a region of Sequence . sequence in FASTA format .
42,610
def underscore_to_camelcase ( value , first_upper = True ) : value = str ( value ) camelized = "" . join ( x . title ( ) if x else '_' for x in value . split ( "_" ) ) if not first_upper : camelized = camelized [ 0 ] . lower ( ) + camelized [ 1 : ] return camelized
Transform string from underscore_string to camelCase .
42,611
def et_node_to_string ( et_node , default = '' ) : return str ( et_node . text ) . strip ( ) if et_node is not None and et_node . text else default
Simple method to get stripped text from node or default string if None is given .
42,612
def addslashes ( s , escaped_chars = None ) : if escaped_chars is None : escaped_chars = [ "\\" , "'" , ] for i in escaped_chars : if i in s : s = s . replace ( i , '\\' + i ) return s
Add slashes for given characters . Default is for \ and .
42,613
def mafft ( sequences = None , alignment_file = None , fasta = None , fmt = 'fasta' , threads = - 1 , as_file = False , reorder = True , print_stdout = False , print_stderr = False , mafft_bin = None ) : if sequences : fasta_string = _get_fasta_string ( sequences ) fasta_file = tempfile . NamedTemporaryFile ( delete = False ) fasta_file . close ( ) ffile = fasta_file . name with open ( ffile , 'w' ) as f : f . write ( fasta_string ) elif fasta : ffile = fasta if alignment_file is None : alignment_file = tempfile . NamedTemporaryFile ( delete = False ) . name aln_format = '' if fmt . lower ( ) == 'clustal' : aln_format = '--clustalout ' if fmt . lower ( ) == 'phylip' : aln_format = '--phylipout ' if reorder : aln_format += '--reorder ' if mafft_bin is None : mafft_bin = 'mafft' mafft_cline = '{} --thread {} {}{} > {}' . format ( mafft_bin , threads , aln_format , ffile , alignment_file ) mafft = sp . Popen ( str ( mafft_cline ) , stdout = sp . PIPE , stderr = sp . PIPE , universal_newlines = True , shell = True ) stdout , stderr = mafft . communicate ( ) if print_stdout : print ( mafft_cline ) print ( stdout ) if print_stderr : print ( stderr ) os . unlink ( ffile ) if os . stat ( alignment_file ) . st_size == 0 : return None if as_file : return alignment_file aln = AlignIO . read ( open ( alignment_file ) , fmt ) os . unlink ( alignment_file ) return aln
Performs multiple sequence alignment with MAFFT .
42,614
def muscle ( sequences = None , alignment_file = None , fasta = None , fmt = 'fasta' , as_file = False , maxiters = None , diags = False , gap_open = None , gap_extend = None , muscle_bin = None ) : if sequences : fasta_string = _get_fasta_string ( sequences ) elif fasta : fasta_string = open ( fasta , 'r' ) . read ( ) if muscle_bin is None : muscle_bin = os . path . join ( BINARY_DIR , 'muscle_{}' . format ( platform . system ( ) . lower ( ) ) ) aln_format = '' if fmt == 'clustal' : aln_format = ' -clwstrict' muscle_cline = '{}{} ' . format ( muscle_bin , aln_format ) if maxiters is not None : muscle_cline += ' -maxiters {}' . format ( maxiters ) if diags : muscle_cline += ' -diags' if all ( [ gap_open is not None , gap_extend is not None ] ) : muscle_cline += ' -gapopen {} -gapextend {}' . format ( gap_open , gap_extend ) muscle = sp . Popen ( str ( muscle_cline ) , stdin = sp . PIPE , stdout = sp . PIPE , stderr = sp . PIPE , universal_newlines = True , shell = True ) if sys . version_info [ 0 ] > 2 : alignment = muscle . communicate ( input = fasta_string ) [ 0 ] else : alignment = unicode ( muscle . communicate ( input = fasta_string ) [ 0 ] , 'utf-8' ) aln = AlignIO . read ( StringIO ( alignment ) , fmt ) if as_file : if not alignment_file : alignment_file = tempfile . NamedTemporaryFile ( ) . name AlignIO . write ( aln , alignment_file , fmt ) return alignment_file return aln
Performs multiple sequence alignment with MUSCLE .
42,615
def local_alignment ( query , target = None , targets = None , match = 3 , mismatch = - 2 , gap_open = - 5 , gap_extend = - 2 , matrix = None , aa = False , gap_open_penalty = None , gap_extend_penalty = None ) : if aa and not matrix : err = 'ERROR: You must supply a scoring matrix for amino acid alignments' raise RuntimeError ( err ) if not target and not targets : err = 'ERROR: You must supply a target sequence (or sequences).' raise RuntimeError ( err ) if target : targets = [ target , ] if gap_open_penalty is not None : gap_open = - 1 * gap_open_penalty if gap_extend_penalty is not None : gap_extend = - 1 * gap_extend_penalty alignments = [ ] for t in targets : try : alignment = SSWAlignment ( query = query , target = t , match = match , mismatch = mismatch , matrix = matrix , gap_open = - 1 * gap_open , gap_extend = - 1 * gap_extend , aa = aa ) alignments . append ( alignment ) except IndexError : continue if len ( alignments ) == 1 : return alignments [ 0 ] return alignments
Striped Smith - Waterman local pairwise alignment .
42,616
def global_alignment ( query , target = None , targets = None , match = 3 , mismatch = - 2 , gap_open = - 5 , gap_extend = - 2 , score_match = None , score_mismatch = None , score_gap_open = None , score_gap_extend = None , matrix = None , aa = False ) : if not target and not targets : err = 'ERROR: You must supply a target sequence (or sequences).' raise RuntimeError ( err ) if target : targets = [ target , ] if type ( targets ) not in ( list , tuple ) : err = 'ERROR: ::targets:: requires an iterable (list or tuple).' err += 'For a single sequence, use ::target::' raise RuntimeError ( err ) alignments = [ ] for t in targets : alignment = NWAlignment ( query = query , target = t , match = match , mismatch = mismatch , gap_open = gap_open , gap_extend = gap_extend , score_match = score_match , score_mismatch = score_mismatch , score_gap_open = score_gap_open , score_gap_extend = score_gap_extend , matrix = matrix , aa = aa ) alignments . append ( alignment ) if target is not None : return alignments [ 0 ] return alignments
Needleman - Wunch global pairwise alignment .
42,617
def fetch_class ( full_class_name ) : ( module_name , class_name ) = full_class_name . rsplit ( '.' , 1 ) module = importlib . import_module ( module_name ) return getattr ( module , class_name )
Fetches the given class .
42,618
def has_chosen ( state , correct , msgs ) : ctxt = { } exec ( state . student_code , globals ( ) , ctxt ) sel_indx = ctxt [ "selected_option" ] if sel_indx != correct : state . report ( Feedback ( msgs [ sel_indx - 1 ] ) ) else : state . reporter . success_msg = msgs [ correct - 1 ] return state
Verify exercises of the type MultipleChoiceExercise
42,619
def check_or ( state , * tests ) : success = False first_feedback = None for test in iter_tests ( tests ) : try : multi ( state , test ) success = True except TestFail as e : if not first_feedback : first_feedback = e . feedback if success : return state state . report ( first_feedback )
Test whether at least one SCT passes .
42,620
def check_correct ( state , check , diagnose ) : feedback = None try : multi ( state , check ) except TestFail as e : feedback = e . feedback try : multi ( state , diagnose ) except TestFail as e : if feedback is not None or state . force_diagnose : feedback = e . feedback if feedback is not None : state . report ( feedback ) return state
Allows feedback from a diagnostic SCT only if a check SCT fails .
42,621
def fail ( state , msg = "fail" ) : _msg = state . build_message ( msg ) state . report ( Feedback ( _msg , state ) ) return state
Always fails the SCT with an optional msg .
42,622
def required_attributes ( element , * attributes ) : if not reduce ( lambda still_valid , param : still_valid and param in element . attrib , attributes , True ) : raise NotValidXmlException ( msg_err_missing_attributes ( element . tag , * attributes ) )
Check element for required attributes . Raise NotValidXmlException on error .
42,623
def required_items ( element , children , attributes ) : required_elements ( element , * children ) required_attributes ( element , * attributes )
Check an xml element to include given attributes and children .
42,624
def attrib_to_dict ( element , * args , ** kwargs ) : if len ( args ) > 0 : return { key : element . get ( key ) for key in args } if len ( kwargs ) > 0 : return { new_key : element . get ( old_key ) for new_key , old_key in viewitems ( kwargs ) } return element . attrib
For an ElementTree element extract specified attributes . If an attribute does not exists its value will be None .
42,625
def get_xml_root ( xml_path ) : r = requests . get ( xml_path ) root = ET . fromstring ( r . content ) return root
Load and parse an xml by given xml_path and return its root .
42,626
def element_to_int ( element , attribute = None ) : if attribute is not None : return int ( element . get ( attribute ) ) else : return int ( element . text )
Convert element object to int . If attribute is not given convert element . text .
42,627
def modules ( cls ) : members = inspect . getmembers ( cls , lambda a : not ( inspect . isroutine ( a ) and a . __name__ == 'modules' ) ) modules = [ module for name , module in members if not name . startswith ( '_' ) ] return modules
Collect all the public class attributes .
42,628
def get_pairs ( db , collection , experiment = None , subject = None , group = None , name = 'seq_id' , delim = None , delim_occurance = 1 , pairs_only = False , h_selection_func = None , l_selection_func = None ) : match = { } if subject is not None : if type ( subject ) in ( list , tuple ) : match [ 'subject' ] = { '$in' : subject } elif type ( subject ) in STR_TYPES : match [ 'subject' ] = subject if group is not None : if type ( group ) in ( list , tuple ) : match [ 'group' ] = { '$in' : group } elif type ( group ) in STR_TYPES : match [ 'group' ] = group if experiment is not None : if type ( experiment ) in ( list , tuple ) : match [ 'experiment' ] = { '$in' : experiment } elif type ( experiment ) in STR_TYPES : match [ 'experiment' ] = experiment seqs = list ( db [ collection ] . find ( match ) ) return assign_pairs ( seqs , name = name , delim = delim , delim_occurance = delim_occurance , pairs_only = pairs_only , h_selection_func = h_selection_func , l_selection_func = l_selection_func )
Gets sequences and assigns them to the appropriate mAb pair based on the sequence name .
42,629
def assign_pairs ( seqs , name = 'seq_id' , delim = None , delim_occurance = 1 , pairs_only = False , h_selection_func = None , l_selection_func = None ) : pdict = { } for s in seqs : if delim is not None : pname = delim . join ( s [ name ] . split ( delim ) [ : delim_occurance ] ) else : pname = s [ name ] if pname not in pdict : pdict [ pname ] = [ s , ] else : pdict [ pname ] . append ( s ) pairs = [ Pair ( pdict [ n ] , name = n , h_selection_func = h_selection_func , l_selection_func = l_selection_func ) for n in pdict . keys ( ) ] if pairs_only : pairs = [ p for p in pairs if p . is_pair ] return pairs
Assigns sequences to the appropriate mAb pair based on the sequence name .
42,630
def deduplicate ( pairs , aa = False , ignore_primer_regions = False ) : nr_pairs = [ ] just_pairs = [ p for p in pairs if p . is_pair ] single_chains = [ p for p in pairs if not p . is_pair ] _pairs = just_pairs + single_chains for p in _pairs : duplicates = [ ] for nr in nr_pairs : identical = True vdj = 'vdj_aa' if aa else 'vdj_nt' offset = 4 if aa else 12 if p . heavy is not None : if nr . heavy is None : identical = False else : heavy = p . heavy [ vdj ] [ offset : - offset ] if ignore_primer_regions else p . heavy [ vdj ] nr_heavy = nr . heavy [ vdj ] [ offset : - offset ] if ignore_primer_regions else nr . heavy [ vdj ] if heavy != nr_heavy : identical = False if p . light is not None : if nr . light is None : identical = False else : light = p . light [ vdj ] [ offset : - offset ] if ignore_primer_regions else p . light [ vdj ] nr_light = nr . light [ vdj ] [ offset : - offset ] if ignore_primer_regions else nr . light [ vdj ] if light != nr_light : identical = False duplicates . append ( identical ) if any ( duplicates ) : continue else : nr_pairs . append ( p ) return nr_pairs
Removes duplicate sequences from a list of Pair objects .
42,631
def fasta ( self , key = 'vdj_nt' , append_chain = True ) : fastas = [ ] for s , chain in [ ( self . heavy , 'heavy' ) , ( self . light , 'light' ) ] : if s is not None : c = '_{}' . format ( chain ) if append_chain else '' fastas . append ( '>{}{}\n{}' . format ( s [ 'seq_id' ] , c , s [ key ] ) ) return '\n' . join ( fastas )
Returns the sequence pair as a fasta string . If the Pair object contains both heavy and light chain sequences both will be returned as a single string .
42,632
def cmap_from_color ( color , dark = False ) : if dark : return sns . dark_palette ( color , as_cmap = True ) else : return sns . light_palette ( color , as_cmap = True )
Generates a matplotlib colormap from a single color .
42,633
def has_code ( state , text , incorrect_msg = "Check the {ast_path}. The checker expected to find {text}." , fixed = False , ) : stu_ast = state . student_ast stu_code = state . student_code ParseError = state . ast_dispatcher . ParseError def get_text ( ast , code ) : if isinstance ( ast , ParseError ) : return code try : return ast . get_text ( code ) except : return code stu_text = get_text ( stu_ast , stu_code ) _msg = incorrect_msg . format ( ast_path = state . get_ast_path ( ) or "highlighted code" , text = text ) res = text in stu_text if fixed else re . search ( text , stu_text ) if not res : state . report ( Feedback ( _msg ) ) return state
Test whether the student code contains text .
42,634
def has_equal_ast ( state , incorrect_msg = "Check the {ast_path}. {extra}" , sql = None , start = [ "expression" , "subquery" , "sql_script" ] [ 0 ] , exact = None , ) : ast = state . ast_dispatcher . ast_mod sol_ast = state . solution_ast if sql is None else ast . parse ( sql , start ) if exact is None : exact = sql is None stu_rep = repr ( state . student_ast ) sol_rep = repr ( sol_ast ) def get_str ( ast , code , sql ) : if sql : return sql if isinstance ( ast , str ) : return ast try : return ast . get_text ( code ) except : return None sol_str = get_str ( state . solution_ast , state . solution_code , sql ) _msg = incorrect_msg . format ( ast_path = state . get_ast_path ( ) or "highlighted code" , extra = "The checker expected to find `{}` in there." . format ( sol_str ) if sol_str else "Something is missing." , ) if ( exact and ( sol_rep != stu_rep ) ) or ( not exact and ( sol_rep not in stu_rep ) ) : state . report ( Feedback ( _msg ) ) return state
Test whether the student and solution code have identical AST representations
42,635
def cache_model ( key_params , timeout = 'default' ) : def decorator_fn ( fn ) : return CacheModelDecorator ( ) . decorate ( key_params , timeout , fn ) return decorator_fn
Caching decorator for app models in task . perform
42,636
def omnihash ( obj ) : if isinstance ( obj , set ) : return hash ( frozenset ( omnihash ( e ) for e in obj ) ) elif isinstance ( obj , ( tuple , list ) ) : return hash ( tuple ( omnihash ( e ) for e in obj ) ) elif isinstance ( obj , dict ) : return hash ( frozenset ( ( k , omnihash ( v ) ) for k , v in obj . items ( ) ) ) else : return hash ( obj )
recursively hash unhashable objects
42,637
def items_differ ( jsonitems , dbitems , subfield_dict ) : if len ( jsonitems ) == len ( dbitems ) == 0 : return False elif len ( jsonitems ) != len ( dbitems ) : return True original_jsonitems = jsonitems jsonitems = copy . deepcopy ( jsonitems ) keys = jsonitems [ 0 ] . keys ( ) for dbitem in dbitems : order = getattr ( dbitem , 'order' , None ) match = None for i , jsonitem in enumerate ( jsonitems ) : for k in keys : if k not in subfield_dict and getattr ( dbitem , k ) != jsonitem . get ( k , None ) : break else : for k in subfield_dict : jsonsubitems = jsonitem [ k ] dbsubitems = list ( getattr ( dbitem , k ) . all ( ) ) if items_differ ( jsonsubitems , dbsubitems , subfield_dict [ k ] [ 2 ] ) : break else : if order is not None and int ( order ) != original_jsonitems . index ( jsonitem ) : break match = i break if match is not None : jsonitems . pop ( match ) else : return True if jsonitems : return True return False
check whether or not jsonitems and dbitems differ
42,638
def resolve_json_id ( self , json_id , allow_no_match = False ) : if not json_id : return None if json_id . startswith ( '~' ) : if json_id not in self . pseudo_id_cache : spec = get_pseudo_id ( json_id ) spec = self . limit_spec ( spec ) if isinstance ( spec , Q ) : objects = self . model_class . objects . filter ( spec ) else : objects = self . model_class . objects . filter ( ** spec ) ids = { each . id for each in objects } if len ( ids ) == 1 : self . pseudo_id_cache [ json_id ] = ids . pop ( ) errmsg = None elif not ids : errmsg = 'cannot resolve pseudo id to {}: {}' . format ( self . model_class . __name__ , json_id ) else : errmsg = 'multiple objects returned for {} pseudo id {}: {}' . format ( self . model_class . __name__ , json_id , ids ) if errmsg : if not allow_no_match : raise UnresolvedIdError ( errmsg ) else : self . error ( errmsg ) self . pseudo_id_cache [ json_id ] = None return self . pseudo_id_cache [ json_id ] json_id = self . duplicates . get ( json_id , json_id ) try : return self . json_to_db_id [ json_id ] except KeyError : raise UnresolvedIdError ( 'cannot resolve id: {}' . format ( json_id ) )
Given an id found in scraped JSON return a DB id for the object .
42,639
def import_directory ( self , datadir ) : def json_stream ( ) : for fname in glob . glob ( os . path . join ( datadir , self . _type + '_*.json' ) ) : with open ( fname ) as f : yield json . load ( f ) return self . import_data ( json_stream ( ) )
import a JSON directory into the database
42,640
def _prepare_imports ( self , dicts ) : seen_hashes = { } for data in dicts : json_id = data . pop ( '_id' ) objhash = omnihash ( data ) if objhash not in seen_hashes : seen_hashes [ objhash ] = json_id yield json_id , data else : self . duplicates [ json_id ] = seen_hashes [ objhash ]
filters the import stream to remove duplicates
42,641
def import_data ( self , data_items ) : record = { 'insert' : 0 , 'update' : 0 , 'noop' : 0 , 'start' : utcnow ( ) , 'records' : { 'insert' : [ ] , 'update' : [ ] , 'noop' : [ ] , } } for json_id , data in self . _prepare_imports ( data_items ) : obj_id , what = self . import_item ( data ) self . json_to_db_id [ json_id ] = obj_id record [ 'records' ] [ what ] . append ( obj_id ) record [ what ] += 1 self . postimport ( ) record [ 'end' ] = utcnow ( ) return { self . _type : record }
import a bunch of dicts together
42,642
def import_item ( self , data ) : what = 'noop' data . pop ( '_id' , None ) data = self . apply_transformers ( data ) data = self . prepare_for_db ( data ) try : obj = self . get_object ( data ) except self . model_class . DoesNotExist : obj = None pupa_id = data . pop ( 'pupa_id' , None ) related = { } for field in self . related_models : related [ field ] = data . pop ( field ) if obj : if obj . id in self . json_to_db_id . values ( ) : raise DuplicateItemError ( data , obj , related . get ( 'sources' , [ ] ) ) for key , value in data . items ( ) : if getattr ( obj , key ) != value and key not in obj . locked_fields : setattr ( obj , key , value ) what = 'update' updated = self . _update_related ( obj , related , self . related_models ) if updated : what = 'update' if what == 'update' : obj . save ( ) else : what = 'insert' try : obj = self . model_class . objects . create ( ** data ) except Exception as e : raise DataImportError ( '{} while importing {} as {}' . format ( e , data , self . model_class ) ) self . _create_related ( obj , related , self . related_models ) if pupa_id : Identifier . objects . get_or_create ( identifier = pupa_id , jurisdiction_id = self . jurisdiction_id , defaults = { 'content_object' : obj } ) return obj . id , what
function used by import_data
42,643
def leaf_nodes ( self ) : deps = { item for sublist in self . edges . values ( ) for item in sublist } return self . nodes - deps
Return an interable of nodes with no edges pointing at them . This is helpful to find all nodes without dependencies .
42,644
def sort ( self ) : while self . nodes : iterated = False for node in self . leaf_nodes ( ) : iterated = True self . prune_node ( node ) yield node if not iterated : raise CyclicGraphError ( "Sorting has found a cyclic graph." )
Return an iterable of nodes toplogically sorted to correctly import dependencies before leaf nodes .
42,645
def cycles ( self ) : def walk_node ( node , seen ) : if node in seen : yield ( node , ) return seen . add ( node ) for edge in self . edges [ node ] : for cycle in walk_node ( edge , set ( seen ) ) : yield ( node , ) + cycle cycles = chain . from_iterable ( ( walk_node ( node , set ( ) ) for node in self . nodes ) ) shortest = set ( ) for cycle in sorted ( cycles , key = len ) : for el in shortest : if set ( el ) . issubset ( set ( cycle ) ) : break else : shortest . add ( cycle ) return shortest
Fairly expensive cycle detection algorithm . This method will return the shortest unique cycles that were detected .
42,646
def pseudo_organization ( organization , classification , default = None ) : if organization and classification : raise ScrapeValueError ( 'cannot specify both classification and organization' ) elif classification : return _make_pseudo_id ( classification = classification ) elif organization : if isinstance ( organization , Organization ) : return organization . _id elif isinstance ( organization , str ) : return organization else : return _make_pseudo_id ( ** organization ) elif default is not None : return _make_pseudo_id ( classification = default ) else : return None
helper for setting an appropriate ID for organizations
42,647
def add_membership ( self , name_or_org , role = 'member' , ** kwargs ) : if isinstance ( name_or_org , Organization ) : membership = Membership ( person_id = self . _id , person_name = self . name , organization_id = name_or_org . _id , role = role , ** kwargs ) else : membership = Membership ( person_id = self . _id , person_name = self . name , organization_id = _make_pseudo_id ( name = name_or_org ) , role = role , ** kwargs ) self . _related . append ( membership ) return membership
add a membership in an organization and return the membership object in case there are more details to add
42,648
def save_object ( self , obj ) : obj . pre_save ( self . jurisdiction . jurisdiction_id ) filename = '{0}_{1}.json' . format ( obj . _type , obj . _id ) . replace ( '/' , '-' ) self . info ( 'save %s %s as %s' , obj . _type , obj , filename ) self . debug ( json . dumps ( OrderedDict ( sorted ( obj . as_dict ( ) . items ( ) ) ) , cls = utils . JSONEncoderPlus , indent = 4 , separators = ( ',' , ': ' ) ) ) self . output_names [ obj . _type ] . add ( filename ) with open ( os . path . join ( self . datadir , filename ) , 'w' ) as f : json . dump ( obj . as_dict ( ) , f , cls = utils . JSONEncoderPlus ) try : obj . validate ( ) except ValueError as ve : if self . strict_validation : raise ve else : self . warning ( ve ) for obj in obj . _related : self . save_object ( obj )
Save object to disk as JSON .
42,649
def validate ( self , schema = None ) : if schema is None : schema = self . _schema type_checker = Draft3Validator . TYPE_CHECKER . redefine ( "datetime" , lambda c , d : isinstance ( d , ( datetime . date , datetime . datetime ) ) ) ValidatorCls = jsonschema . validators . extend ( Draft3Validator , type_checker = type_checker ) validator = ValidatorCls ( schema , format_checker = FormatChecker ( ) ) errors = [ str ( error ) for error in validator . iter_errors ( self . as_dict ( ) ) ] if errors : raise ScrapeValueError ( 'validation of {} {} failed: {}' . format ( self . __class__ . __name__ , self . _id , '\n\t' + '\n\t' . join ( errors ) ) )
Validate that we have a valid object .
42,650
def add_source ( self , url , * , note = '' ) : new = { 'url' : url , 'note' : note } self . sources . append ( new )
Add a source URL from which data was collected
42,651
def evolve_genomes ( rng , pop , params , recorder = None ) : import warnings with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) params . validate ( ) from . _fwdpy11 import MutationRegions from . _fwdpy11 import evolve_without_tree_sequences from . _fwdpy11 import dispatch_create_GeneticMap pneutral = params . mutrate_n / ( params . mutrate_n + params . mutrate_s ) mm = MutationRegions . create ( pneutral , params . nregions , params . sregions ) rm = dispatch_create_GeneticMap ( params . recrate , params . recregions ) if recorder is None : from . _fwdpy11 import RecordNothing recorder = RecordNothing ( ) evolve_without_tree_sequences ( rng , pop , params . demography , params . mutrate_n , params . mutrate_s , params . recrate , mm , rm , params . gvalue , recorder , params . pself , params . prune_selected )
Evolve a population without tree sequence recordings . In other words complete genomes must be simulated and tracked .
42,652
def _initializeIndividualTable ( pop , tc ) : individal_nodes = { } for i in range ( pop . N ) : individal_nodes [ 2 * i ] = i individal_nodes [ 2 * i + 1 ] = i metadata_strings = _generate_individual_metadata ( pop . diploid_metadata , tc ) num_ind_nodes = pop . N for i in pop . ancient_sample_metadata : assert i not in individal_nodes , "indivudal record error" individal_nodes [ i . nodes [ 0 ] ] = num_ind_nodes individal_nodes [ i . nodes [ 1 ] ] = num_ind_nodes num_ind_nodes += 1 metadata_strings . extend ( _generate_individual_metadata ( pop . ancient_sample_metadata , tc ) ) md , mdo = tskit . pack_bytes ( metadata_strings ) flags = [ 0 for i in range ( pop . N + len ( pop . ancient_sample_metadata ) ) ] tc . individuals . set_columns ( flags = flags , metadata = md , metadata_offset = mdo ) return individal_nodes
Returns node ID - > individual map
42,653
def dump_tables_to_tskit ( pop ) : node_view = np . array ( pop . tables . nodes , copy = True ) node_view [ 'time' ] -= node_view [ 'time' ] . max ( ) node_view [ 'time' ] [ np . where ( node_view [ 'time' ] != 0.0 ) [ 0 ] ] *= - 1.0 edge_view = np . array ( pop . tables . edges , copy = False ) mut_view = np . array ( pop . tables . mutations , copy = False ) tc = tskit . TableCollection ( pop . tables . genome_length ) _initializePopulationTable ( node_view , tc ) node_to_individual = _initializeIndividualTable ( pop , tc ) individual = [ - 1 for i in range ( len ( node_view ) ) ] for k , v in node_to_individual . items ( ) : individual [ k ] = v flags = [ 1 ] * 2 * pop . N + [ 0 ] * ( len ( node_view ) - 2 * pop . N ) for i in pop . tables . preserved_nodes : flags [ i ] = 1 tc . nodes . set_columns ( flags = flags , time = node_view [ 'time' ] , population = node_view [ 'population' ] , individual = individual ) tc . edges . set_columns ( left = edge_view [ 'left' ] , right = edge_view [ 'right' ] , parent = edge_view [ 'parent' ] , child = edge_view [ 'child' ] ) mpos = np . array ( [ pop . mutations [ i ] . pos for i in mut_view [ 'key' ] ] ) ancestral_state = np . zeros ( len ( mut_view ) , dtype = np . int8 ) + ord ( '0' ) ancestral_state_offset = np . arange ( len ( mut_view ) + 1 , dtype = np . uint32 ) tc . sites . set_columns ( position = mpos , ancestral_state = ancestral_state , ancestral_state_offset = ancestral_state_offset ) derived_state = np . zeros ( len ( mut_view ) , dtype = np . int8 ) + ord ( '1' ) md , mdo = _generate_mutation_metadata ( pop ) tc . mutations . set_columns ( site = np . arange ( len ( mpos ) , dtype = np . int32 ) , node = mut_view [ 'node' ] , derived_state = derived_state , derived_state_offset = ancestral_state_offset , metadata = md , metadata_offset = mdo ) return tc . tree_sequence ( )
Converts fwdpy11 . TableCollection to an tskit . TreeSequence
42,654
def mslike ( pop , ** kwargs ) : import fwdpy11 if isinstance ( pop , fwdpy11 . DiploidPopulation ) is False : raise ValueError ( "incorrect pop type: " + str ( type ( pop ) ) ) defaults = { 'simlen' : 10 * pop . N , 'beg' : 0.0 , 'end' : 1.0 , 'theta' : 100.0 , 'pneutral' : 1.0 , 'rho' : 100.0 , 'dfe' : None } for key , value in kwargs . items ( ) : if key in defaults : defaults [ key ] = value import numpy as np params = { 'demography' : np . array ( [ pop . N ] * defaults [ 'simlen' ] , dtype = np . uint32 ) , 'nregions' : [ fwdpy11 . Region ( defaults [ 'beg' ] , defaults [ 'end' ] , 1.0 ) ] , 'recregions' : [ fwdpy11 . Region ( defaults [ 'beg' ] , defaults [ 'end' ] , 1.0 ) ] , 'rates' : ( ( defaults [ 'pneutral' ] * defaults [ 'theta' ] ) / ( 4.0 * pop . N ) , ( ( 1.0 - defaults [ 'pneutral' ] ) * defaults [ 'theta' ] ) / ( 4.0 * pop . N ) , defaults [ 'rho' ] / ( 4.0 * float ( pop . N ) ) ) , 'gvalue' : fwdpy11 . Multiplicative ( 2.0 ) } if defaults [ 'dfe' ] is None : params [ 'sregions' ] = [ ] else : params [ 'sregions' ] = [ defaults [ 'dfe' ] ] return params
Function to establish default parameters for a single - locus simulation for standard pop - gen modeling scenarios .
42,655
def limit_spec ( self , spec ) : if list ( spec . keys ( ) ) == [ 'name' ] : return ( ( Q ( name = spec [ 'name' ] ) | Q ( other_names__name = spec [ 'name' ] ) ) & Q ( memberships__organization__jurisdiction_id = self . jurisdiction_id ) ) spec [ 'memberships__organization__jurisdiction_id' ] = self . jurisdiction_id return spec
Whenever we do a Pseudo ID lookup from the database we need to limit based on the memberships - > organization - > jurisdiction so we scope the resolution .
42,656
def validate ( self ) : if self . nregions is None : raise TypeError ( "neutral regions cannot be None" ) if self . sregions is None : raise TypeError ( "selected regions cannot be None" ) if self . recregions is None : raise TypeError ( "recombination regions cannot be None" ) if self . demography is None : raise TypeError ( "demography cannot be None" ) if self . prune_selected is None : raise TypeError ( "prune_selected cannot be None" ) if self . gvalue is None : raise TypeError ( "gvalue cannot be None" ) if self . rates is None : raise TypeError ( "rates cannot be None" )
Error check model params .
42,657
def _prepare_imports ( self , dicts ) : pseudo_ids = set ( ) pseudo_matches = { } prepared = dict ( super ( OrganizationImporter , self ) . _prepare_imports ( dicts ) ) for _ , data in prepared . items ( ) : parent_id = data . get ( 'parent_id' , None ) or '' if parent_id . startswith ( '~' ) : pseudo_ids . add ( parent_id ) pseudo_ids = [ ( ppid , get_pseudo_id ( ppid ) ) for ppid in pseudo_ids ] for json_id , data in prepared . items ( ) : for ppid , spec in pseudo_ids : match = True for k , v in spec . items ( ) : if data [ k ] != v : match = False break if match : if ppid in pseudo_matches : raise UnresolvedIdError ( 'multiple matches for pseudo id: ' + ppid ) pseudo_matches [ ppid ] = json_id network = Network ( ) in_network = set ( ) import_order = [ ] for json_id , data in prepared . items ( ) : parent_id = data . get ( 'parent_id' , None ) if parent_id in pseudo_matches : parent_id = pseudo_matches [ parent_id ] network . add_node ( json_id ) if parent_id : network . add_edge ( parent_id , json_id ) for jid in network . sort ( ) : import_order . append ( ( jid , prepared [ jid ] ) ) in_network . add ( jid ) if in_network != set ( prepared . keys ( ) ) : raise PupaInternalError ( "import is missing nodes in network set" ) return import_order
an override for prepare imports that sorts the imports by parent_id dependencies
42,658
def evolvets ( rng , pop , params , simplification_interval , recorder = None , suppress_table_indexing = False , record_gvalue_matrix = False , stopping_criterion = None , track_mutation_counts = False , remove_extinct_variants = True ) : import warnings if len ( params . nregions ) != 0 : raise ValueError ( "Simulation of neutral mutations on tree sequences not supported (yet)." ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) params . validate ( ) if recorder is None : from . _fwdpy11 import NoAncientSamples recorder = NoAncientSamples ( ) if stopping_criterion is None : from . _fwdpy11 import _no_stopping stopping_criterion = _no_stopping from . _fwdpy11 import MutationRegions from . _fwdpy11 import dispatch_create_GeneticMap from . _fwdpy11 import evolve_with_tree_sequences pneutral = 0 mm = MutationRegions . create ( pneutral , params . nregions , params . sregions ) rm = dispatch_create_GeneticMap ( params . recrate , params . recregions ) from . _fwdpy11 import SampleRecorder sr = SampleRecorder ( ) evolve_with_tree_sequences ( rng , pop , sr , simplification_interval , params . demography , params . mutrate_s , mm , rm , params . gvalue , recorder , stopping_criterion , params . pself , params . prune_selected is False , suppress_table_indexing , record_gvalue_matrix , track_mutation_counts , remove_extinct_variants )
Evolve a population with tree sequence recording
42,659
def exponential_size_change ( Nstart , Nstop , time ) : if time < 1 : raise RuntimeError ( "time must be >= 1" ) if Nstart < 1 or Nstop < 1 : raise RuntimeError ( "Nstart and Nstop must both be >= 1" ) G = math . exp ( ( math . log ( Nstop ) - math . log ( Nstart ) ) / time ) rv = [ ] for i in range ( time ) : rv . append ( round ( Nstart * pow ( G , i + 1 ) ) ) return rv
Generate a list of population sizes according to exponential size_change model
42,660
def check_file ( filename = None , show_filename = False , add_semicolon = False ) : if filename is not None : with open ( filename , "r" ) as filelike : sql_string = filelike . read ( ) else : with sys . stdin as filelike : sql_string = sys . stdin . read ( ) success , msg = check_string ( sql_string , add_semicolon = add_semicolon ) result = 0 if not success : prefix = "" if show_filename and filename is not None : prefix = filename + ": " print ( prefix + msg ) result = 1 return result
Check whether an input file is valid PostgreSQL . If no filename is passed STDIN is checked .
42,661
def check_string ( sql_string , add_semicolon = False ) : prepped_sql = sqlprep . prepare_sql ( sql_string , add_semicolon = add_semicolon ) success , msg = ecpg . check_syntax ( prepped_sql ) return success , msg
Check whether a string is valid PostgreSQL . Returns a boolean indicating validity and a message from ecpg which will be an empty string if the input was valid or a description of the problem otherwise .
42,662
def check_syntax ( string ) : args = [ "ecpg" , "-o" , "-" , "-" ] with open ( os . devnull , "w" ) as devnull : try : proc = subprocess . Popen ( args , shell = False , stdout = devnull , stdin = subprocess . PIPE , stderr = subprocess . PIPE , universal_newlines = True ) _ , err = proc . communicate ( string ) except OSError : msg = "Unable to execute 'ecpg', you likely need to install it.'" raise OSError ( msg ) if proc . returncode == 0 : return ( True , "" ) else : return ( False , parse_error ( err ) )
Check syntax of a string of PostgreSQL - dialect SQL
42,663
def find_datacenter_by_name ( self , si , path , name ) : return self . find_obj_by_path ( si , path , name , self . Datacenter )
Finds datacenter in the vCenter or returns None
42,664
def find_network_by_name ( self , si , path , name ) : return self . find_obj_by_path ( si , path , name , self . Network )
Finds network in the vCenter or returns None
42,665
def find_vm_by_name ( self , si , path , name ) : return self . find_obj_by_path ( si , path , name , self . VM )
Finds vm in the vCenter or returns None
42,666
def find_obj_by_path ( self , si , path , name , type_name ) : folder = self . get_folder ( si , path ) if folder is None : raise ValueError ( 'vmomi managed object not found at: {0}' . format ( path ) ) look_in = None if hasattr ( folder , type_name ) : look_in = getattr ( folder , type_name ) if hasattr ( folder , self . ChildEntity ) : look_in = folder if look_in is None : raise ValueError ( 'vmomi managed object not found at: {0}' . format ( path ) ) search_index = si . content . searchIndex '#searches for the specific vm in the folder' return search_index . FindChild ( look_in , name )
Finds object in the vCenter or returns None
42,667
def get_folder ( self , si , path , root = None ) : search_index = si . content . searchIndex sub_folder = root if root else si . content . rootFolder if not path : return sub_folder paths = [ p for p in path . split ( "/" ) if p ] child = None try : new_root = search_index . FindChild ( sub_folder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) except : child = None if child is None and hasattr ( sub_folder , self . ChildEntity ) : new_root = search_index . FindChild ( sub_folder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) if child is None and hasattr ( sub_folder , self . VM ) : new_root = search_index . FindChild ( sub_folder . vmFolder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) if child is None and hasattr ( sub_folder , self . Datastore ) : new_root = search_index . FindChild ( sub_folder . datastoreFolder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) if child is None and hasattr ( sub_folder , self . Network ) : new_root = search_index . FindChild ( sub_folder . networkFolder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) if child is None and hasattr ( sub_folder , self . Host ) : new_root = search_index . FindChild ( sub_folder . hostFolder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) if child is None and hasattr ( sub_folder , self . Datacenter ) : new_root = search_index . FindChild ( sub_folder . datacenterFolder , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) if child is None and hasattr ( sub_folder , 'resourcePool' ) : new_root = search_index . FindChild ( sub_folder . resourcePool , paths [ 0 ] ) if new_root : child = self . get_folder ( si , '/' . join ( paths [ 1 : ] ) , new_root ) return child
Finds folder in the vCenter or returns None
42,668
def get_obj ( self , content , vimtype , name ) : obj = None container = self . _get_all_objects_by_type ( content , vimtype ) for c in container . view : if name : if c . name == name : obj = c break else : obj = c break return obj
Return an object by name for a specific type if name is None the first found object is returned
42,669
def save_app ( self , si , logger , vcenter_data_model , reservation_id , save_app_actions , cancellation_context ) : results = [ ] logger . info ( 'Save Sandbox command starting on ' + vcenter_data_model . default_datacenter ) if not save_app_actions : raise Exception ( 'Failed to save app, missing data in request.' ) actions_grouped_by_save_types = groupby ( save_app_actions , lambda x : x . actionParams . saveDeploymentModel ) artifactSaversToActions = { ArtifactHandler . factory ( k , self . pyvmomi_service , vcenter_data_model , si , logger , self . deployer , reservation_id , self . resource_model_parser , self . snapshot_saver , self . task_waiter , self . folder_manager , self . port_group_configurer , self . cs ) : list ( g ) for k , g in actions_grouped_by_save_types } self . validate_requested_save_types_supported ( artifactSaversToActions , logger , results ) error_results = [ r for r in results if not r . success ] if not error_results : logger . info ( 'Handling Save App requests' ) results = self . _execute_save_actions_using_pool ( artifactSaversToActions , cancellation_context , logger , results ) logger . info ( 'Completed Save Sandbox command' ) else : logger . error ( 'Some save app requests were not valid, Save Sandbox command failed.' ) return results
Cretaes an artifact of an app that can later be restored
42,670
def formula_1980 ( household , period , parameters ) : return household ( 'rent' , period ) * parameters ( period ) . benefits . housing_allowance
To compute this allowance the rent value must be provided for the same month but housing_occupancy_status is not necessary .
42,671
def deploy_from_template ( self , context , deploy_action , cancellation_context ) : deploy_from_template_model = self . resource_model_parser . convert_to_resource_model ( attributes = deploy_action . actionParams . deployment . attributes , resource_model_type = vCenterVMFromTemplateResourceModel ) data_holder = DeployFromTemplateDetails ( deploy_from_template_model , deploy_action . actionParams . appName ) deploy_result_action = self . command_wrapper . execute_command_with_connection ( context , self . deploy_command . execute_deploy_from_template , data_holder , cancellation_context , self . folder_manager ) deploy_result_action . actionId = deploy_action . actionId return deploy_result_action
Deploy From Template Command will deploy vm from template
42,672
def deploy_from_image ( self , context , deploy_action , cancellation_context ) : deploy_action . actionParams . deployment . attributes [ 'vCenter Name' ] = context . resource . name deploy_from_image_model = self . resource_model_parser . convert_to_resource_model ( attributes = deploy_action . actionParams . deployment . attributes , resource_model_type = vCenterVMFromImageResourceModel ) data_holder = DeployFromImageDetails ( deploy_from_image_model , deploy_action . actionParams . appName ) deploy_result_action = self . command_wrapper . execute_command_with_connection ( context , self . deploy_command . execute_deploy_from_image , data_holder , context . resource , cancellation_context , self . folder_manager ) deploy_result_action . actionId = deploy_action . actionId return deploy_result_action
Deploy From Image Command will deploy vm from ovf image
42,673
def disconnect_all ( self , context , ports ) : resource_details = self . _parse_remote_model ( context ) res = self . command_wrapper . execute_command_with_connection ( context , self . virtual_switch_disconnect_command . disconnect_all , resource_details . vm_uuid ) return set_command_result ( result = res , unpicklable = False )
Disconnect All Command will the assign all the vnics on the vm to the default network which is sign to be disconnected
42,674
def DeleteInstance ( self , context , ports ) : resource_details = self . _parse_remote_model ( context ) res = self . command_wrapper . execute_command_with_connection ( context , self . destroy_virtual_machine_command . DeleteInstance , resource_details . vm_uuid , resource_details . fullname ) return set_command_result ( result = res , unpicklable = False )
Destroy Vm Command will only destroy the vm and will not remove the resource
42,675
def delete_sandbox ( self , si , logger , vcenter_data_model , delete_sandbox_actions , cancellation_context ) : results = [ ] logger . info ( 'Deleting saved sandbox command starting on ' + vcenter_data_model . default_datacenter ) if not delete_sandbox_actions : raise Exception ( 'Failed to delete saved sandbox, missing data in request.' ) actions_grouped_by_save_types = groupby ( delete_sandbox_actions , lambda x : x . actionParams . saveDeploymentModel ) artifactHandlersToActions = { ArtifactHandler . factory ( k , self . pyvmomi_service , vcenter_data_model , si , logger , self . deployer , None , self . resource_model_parser , self . snapshot_saver , self . task_waiter , self . folder_manager , self . pg , self . cs ) : list ( g ) for k , g in actions_grouped_by_save_types } self . _validate_save_deployment_models ( artifactHandlersToActions , delete_sandbox_actions , results ) error_results = [ r for r in results if not r . success ] if not error_results : results = self . _execute_delete_saved_sandbox ( artifactHandlersToActions , cancellation_context , logger , results ) return results
Deletes a saved sandbox s artifacts
42,676
def save_snapshot ( self , si , logger , vm_uuid , snapshot_name , save_memory ) : vm = self . pyvmomi_service . find_by_uuid ( si , vm_uuid ) snapshot_path_to_be_created = SaveSnapshotCommand . _get_snapshot_name_to_be_created ( snapshot_name , vm ) save_vm_memory_to_snapshot = SaveSnapshotCommand . _get_save_vm_memory_to_snapshot ( save_memory ) SaveSnapshotCommand . _verify_snapshot_uniquness ( snapshot_path_to_be_created , vm ) task = self . _create_snapshot ( logger , snapshot_name , vm , save_vm_memory_to_snapshot ) self . task_waiter . wait_for_task ( task = task , logger = logger , action_name = 'Create Snapshot' ) return snapshot_path_to_be_created
Creates a snapshot of the current state of the virtual machine
42,677
def generate_unique_name ( name_prefix , reservation_id = None ) : if reservation_id and isinstance ( reservation_id , str ) and len ( reservation_id ) >= 4 : unique_id = str ( uuid . uuid4 ( ) ) [ : 4 ] + "-" + reservation_id [ - 4 : ] else : unique_id = str ( uuid . uuid4 ( ) ) [ : 8 ] return name_prefix + "_" + unique_id
Generate a unique name . Method generate a guid and adds the first 8 characteres of the new guid to name_prefix . If reservation id is provided than the first 4 chars of the generated guid are taken and the last 4 of the reservation id
42,678
def remove_interfaces_from_vm_task ( self , virtual_machine , filter_function = None ) : device_change = [ ] for device in virtual_machine . config . hardware . device : if isinstance ( device , vim . vm . device . VirtualEthernetCard ) and ( filter_function is None or filter_function ( device ) ) : nicspec = vim . vm . device . VirtualDeviceSpec ( ) nicspec . operation = vim . vm . device . VirtualDeviceSpec . Operation . remove nicspec . device = device device_change . append ( nicspec ) if len ( device_change ) > 0 : return self . pyvmomi_service . vm_reconfig_task ( virtual_machine , device_change ) return None
Remove interface from VM
42,679
def refresh_ip ( self , si , logger , session , vcenter_data_model , resource_model , cancellation_context , app_request_json ) : self . _do_not_run_on_static_vm ( app_request_json = app_request_json ) default_network = VMLocation . combine ( [ vcenter_data_model . default_datacenter , vcenter_data_model . holding_network ] ) match_function = self . ip_manager . get_ip_match_function ( self . _get_ip_refresh_ip_regex ( resource_model . vm_custom_params ) ) timeout = self . _get_ip_refresh_timeout ( resource_model . vm_custom_params ) vm = self . pyvmomi_service . find_by_uuid ( si , resource_model . vm_uuid ) ip_res = self . ip_manager . get_ip ( vm , default_network , match_function , cancellation_context , timeout , logger ) if ip_res . reason == IpReason . Timeout : raise ValueError ( 'IP address of VM \'{0}\' could not be obtained during {1} seconds' . format ( resource_model . fullname , timeout ) ) if ip_res . reason == IpReason . Success : self . _update_resource_address_with_retry ( session = session , resource_name = resource_model . fullname , ip_address = ip_res . ip_address ) return ip_res . ip_address
Refreshes IP address of virtual machine and updates Address property on the resource
42,680
def get_connection_details ( session , vcenter_resource_model , resource_context ) : session = session resource_context = resource_context user = vcenter_resource_model . user vcenter_url = resource_context . address password = session . DecryptPassword ( vcenter_resource_model . password ) . Value return VCenterConnectionDetails ( vcenter_url , user , password )
Methods retrieves the connection details from the vcenter resource model attributes .
42,681
def deploy_from_linked_clone ( self , si , logger , data_holder , vcenter_data_model , reservation_id , cancellation_context ) : template_resource_model = data_holder . template_resource_model return self . _deploy_a_clone ( si = si , logger = logger , app_name = data_holder . app_name , template_name = template_resource_model . vcenter_vm , other_params = template_resource_model , vcenter_data_model = vcenter_data_model , reservation_id = reservation_id , cancellation_context = cancellation_context , snapshot = template_resource_model . vcenter_vm_snapshot )
deploy Cloned VM From VM Command will deploy vm from a snapshot
42,682
def deploy_clone_from_vm ( self , si , logger , data_holder , vcenter_data_model , reservation_id , cancellation_context ) : template_resource_model = data_holder . template_resource_model return self . _deploy_a_clone ( si , logger , data_holder . app_name , template_resource_model . vcenter_vm , template_resource_model , vcenter_data_model , reservation_id , cancellation_context )
deploy Cloned VM From VM Command will deploy vm from another vm
42,683
def crc32File ( filename , skip = 0 ) : with open ( filename , 'rb' ) as stream : discard = stream . read ( skip ) return zlib . crc32 ( stream . read ( ) ) & 0xffffffff
Computes the CRC - 32 of the contents of filename optionally skipping a certain number of bytes at the beginning of the file .
42,684
def getDefaultDict ( modname , config_key , loader , reload = False , filename = None ) : module = sys . modules [ modname ] default = getattr ( module , 'DefaultDict' , None ) if filename is None : filename = ait . config . get ( '%s.filename' % config_key , None ) if filename is not None and ( default is None or reload is True ) : try : default = ObjectCache ( filename , loader ) . load ( ) setattr ( module , 'DefaultDict' , default ) except IOError , e : msg = 'Could not load default %s "%s": %s' log . error ( msg , config_key , filename , str ( e ) ) return default or loader ( )
Returns default AIT dictonary for modname
42,685
def toBCD ( n ) : bcd = 0 bits = 0 while True : n , r = divmod ( n , 10 ) bcd |= ( r << bits ) if n is 0 : break bits += 4 return bcd
Converts the number n into Binary Coded Decimal .
42,686
def listAllFiles ( directory , suffix = None , abspath = False ) : files = [ ] directory = expandPath ( directory ) for dirpath , dirnames , filenames in os . walk ( directory , followlinks = True ) : if suffix : filenames = [ f for f in filenames if f . endswith ( suffix ) ] for filename in filenames : filepath = os . path . join ( dirpath , filename ) if not abspath : filepath = os . path . relpath ( filepath , start = directory ) files . append ( filepath ) return files
Returns the list of all files within the input directory and all subdirectories .
42,687
def dirty ( self ) : return not os . path . exists ( self . cachename ) or ( os . path . getmtime ( self . filename ) > os . path . getmtime ( self . cachename ) )
True if the cache needs to be updated False otherwise
42,688
def load ( self ) : if self . _dict is None : if self . dirty : self . _dict = self . _loader ( self . filename ) self . cache ( ) else : with open ( self . cachename , 'rb' ) as stream : self . _dict = cPickle . load ( stream ) return self . _dict
Loads the Python object
42,689
def process ( self , input_data , topic = None , ** kwargs ) : try : split = input_data [ 1 : - 1 ] . split ( ',' , 1 ) uid , pkt = int ( split [ 0 ] ) , split [ 1 ] defn = self . packet_dict [ uid ] decoded = tlm . Packet ( defn , data = bytearray ( pkt ) ) self . dbconn . insert ( decoded , ** kwargs ) except Exception as e : log . error ( 'Data archival failed with error: {}.' . format ( e ) )
Splits tuple received from PacketHandler into packet UID and packet message . Decodes packet and inserts into database backend . Logs any exceptions raised .
42,690
def getUTCDatetimeDOY ( days = 0 , hours = 0 , minutes = 0 , seconds = 0 ) : return ( datetime . datetime . utcnow ( ) + datetime . timedelta ( days = days , hours = hours , minutes = minutes , seconds = seconds ) ) . strftime ( DOY_Format )
getUTCDatetimeDOY - > datetime
42,691
def _update_leap_second_data ( self ) : log . info ( 'Attempting to acquire latest leapsecond data' ) ls_file = ait . config . get ( 'leapseconds.filename' , os . path . join ( ait . config . _directory , _DEFAULT_FILE_NAME ) ) url = 'https://www.ietf.org/timezones/data/leap-seconds.list' r = requests . get ( url ) if r . status_code != 200 : msg = 'Unable to locate latest timezone data. Connection to IETF failed' log . error ( msg ) raise ValueError ( msg ) text = r . text . split ( '\n' ) lines = [ l for l in text if l . startswith ( '#@' ) or not l . startswith ( '#' ) ] data = { 'valid' : None , 'leapseconds' : [ ] } data [ 'valid' ] = datetime . datetime ( 1900 , 1 , 1 ) + datetime . timedelta ( seconds = int ( lines [ 0 ] . split ( '\t' ) [ 1 ] ) ) leap = 1 for l in lines [ 1 : - 1 ] : t = datetime . datetime ( 1900 , 1 , 1 ) + datetime . timedelta ( seconds = int ( l . split ( '\t' ) [ 0 ] ) ) if t < GPS_Epoch : continue data [ 'leapseconds' ] . append ( ( t , leap ) ) leap += 1 self . _data = data with open ( ls_file , 'w' ) as outfile : pickle . dump ( data , outfile )
Updates the systems leap second information
42,692
def wait ( self ) : for greenlet in ( self . greenlets + self . servers ) : log . info ( "Starting {} greenlet..." . format ( greenlet ) ) greenlet . start ( ) gevent . joinall ( self . greenlets )
Starts all greenlets for concurrent processing . Joins over all greenlets that are not servers .
42,693
def _load_streams ( self ) : common_err_msg = 'No valid {} stream configurations found. ' specific_err_msg = { 'inbound' : 'No data will be received (or displayed).' , 'outbound' : 'No data will be published.' } err_msgs = { } for stream_type in [ 'inbound' , 'outbound' ] : err_msgs [ stream_type ] = common_err_msg . format ( stream_type ) + specific_err_msg [ stream_type ] streams = ait . config . get ( 'server.{}-streams' . format ( stream_type ) ) if streams is None : log . warn ( err_msgs [ stream_type ] ) else : for index , s in enumerate ( streams ) : try : if stream_type == 'inbound' : strm = self . _create_inbound_stream ( s [ 'stream' ] ) if type ( strm ) == PortInputStream : self . servers . append ( strm ) else : self . inbound_streams . append ( strm ) elif stream_type == 'outbound' : strm = self . _create_outbound_stream ( s [ 'stream' ] ) self . outbound_streams . append ( strm ) log . info ( 'Added {} stream {}' . format ( stream_type , strm ) ) except Exception : exc_type , value , tb = sys . exc_info ( ) log . error ( '{} creating {} stream {}: {}' . format ( exc_type , stream_type , index , value ) ) if not self . inbound_streams and not self . servers : log . warn ( err_msgs [ 'inbound' ] ) if not self . outbound_streams : log . warn ( err_msgs [ 'outbound' ] )
Reads parses and creates streams specified in config . yaml .
42,694
def _create_inbound_stream ( self , config = None ) : if config is None : raise ValueError ( 'No stream config to create stream from.' ) name = self . _get_stream_name ( config ) stream_handlers = self . _get_stream_handlers ( config , name ) stream_input = config . get ( 'input' , None ) if stream_input is None : raise ( cfg . AitConfigMissing ( 'inbound stream {}\'s input' . format ( name ) ) ) if type ( stream_input [ 0 ] ) is int : return PortInputStream ( name , stream_input , stream_handlers , zmq_args = { 'zmq_context' : self . broker . context , 'zmq_proxy_xsub_url' : self . broker . XSUB_URL , 'zmq_proxy_xpub_url' : self . broker . XPUB_URL } ) else : return ZMQStream ( name , stream_input , stream_handlers , zmq_args = { 'zmq_context' : self . broker . context , 'zmq_proxy_xsub_url' : self . broker . XSUB_URL , 'zmq_proxy_xpub_url' : self . broker . XPUB_URL } )
Creates an inbound stream from its config .
42,695
def _create_outbound_stream ( self , config = None ) : if config is None : raise ValueError ( 'No stream config to create stream from.' ) name = self . _get_stream_name ( config ) stream_handlers = self . _get_stream_handlers ( config , name ) stream_input = config . get ( 'input' , None ) stream_output = config . get ( 'output' , None ) if type ( stream_output ) is int : return PortOutputStream ( name , stream_input , stream_output , stream_handlers , zmq_args = { 'zmq_context' : self . broker . context , 'zmq_proxy_xsub_url' : self . broker . XSUB_URL , 'zmq_proxy_xpub_url' : self . broker . XPUB_URL } ) else : if stream_output is not None : log . warn ( "Output of stream {} is not an integer port. " "Stream outputs can only be ports." . format ( name ) ) return ZMQStream ( name , stream_input , stream_handlers , zmq_args = { 'zmq_context' : self . broker . context , 'zmq_proxy_xsub_url' : self . broker . XSUB_URL , 'zmq_proxy_xpub_url' : self . broker . XPUB_URL } )
Creates an outbound stream from its config .
42,696
def _create_handler ( self , config ) : if config is None : raise ValueError ( 'No handler config to create handler from.' ) if 'name' not in config : raise ValueError ( 'Handler name is required.' ) handler_name = config [ 'name' ] module_name = handler_name . rsplit ( '.' , 1 ) [ 0 ] class_name = handler_name . rsplit ( '.' , 1 ) [ - 1 ] module = import_module ( module_name ) handler_class = getattr ( module , class_name ) instance = handler_class ( ** config ) return instance
Creates a handler from its config .
42,697
def _load_plugins ( self ) : plugins = ait . config . get ( 'server.plugins' ) if plugins is None : log . warn ( 'No plugins specified in config.' ) else : for index , p in enumerate ( plugins ) : try : plugin = self . _create_plugin ( p [ 'plugin' ] ) self . plugins . append ( plugin ) log . info ( 'Added plugin {}' . format ( plugin ) ) except Exception : exc_type , value , tb = sys . exc_info ( ) log . error ( '{} creating plugin {}: {}' . format ( exc_type , index , value ) ) if not self . plugins : log . warn ( 'No valid plugin configurations found. No plugins will be added.' )
Reads parses and creates plugins specified in config . yaml .
42,698
def _create_plugin ( self , config ) : if config is None : raise ValueError ( 'No plugin config to create plugin from.' ) name = config . pop ( 'name' , None ) if name is None : raise ( cfg . AitConfigMissing ( 'plugin name' ) ) module_name = name . rsplit ( '.' , 1 ) [ 0 ] class_name = name . rsplit ( '.' , 1 ) [ - 1 ] if class_name in [ x . name for x in ( self . outbound_streams + self . inbound_streams + self . servers + self . plugins ) ] : raise ValueError ( 'Plugin "{}" already loaded. Only one plugin of a given name is allowed' . format ( class_name ) ) plugin_inputs = config . pop ( 'inputs' , None ) if plugin_inputs is None : log . warn ( 'No plugin inputs specified for {}' . format ( name ) ) plugin_inputs = [ ] subscribers = config . pop ( 'outputs' , None ) if subscribers is None : log . warn ( 'No plugin outputs specified for {}' . format ( name ) ) subscribers = [ ] module = import_module ( module_name ) plugin_class = getattr ( module , class_name ) instance = plugin_class ( plugin_inputs , subscribers , zmq_args = { 'zmq_context' : self . broker . context , 'zmq_proxy_xsub_url' : self . broker . XSUB_URL , 'zmq_proxy_xpub_url' : self . broker . XPUB_URL } , ** config ) return instance
Creates a plugin from its config .
42,699
def createDirStruct ( paths , verbose = True ) : for k , path in paths . items ( ) : p = None try : pathlist = path if type ( path ) is list else [ path ] for p in pathlist : os . makedirs ( p ) if verbose : log . info ( 'Creating directory: ' + p ) except OSError , e : if e . errno == errno . EEXIST and os . path . isdir ( p ) : pass else : raise return True
Loops ait . config . _datapaths from AIT_CONFIG and creates a directory .