idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
237,100
def user_model_attributes ( user , service ) : ignore_fields = [ 'id' , 'password' ] attributes = { } for field in user . _meta . fields : if field . name not in ignore_fields : attributes [ field . name ] = getattr ( user , field . name ) return attributes
Return all fields on the user object that are not in the list of fields to ignore .
68
18
237,101
def add_query_params ( url , params ) : def encode ( s ) : return force_bytes ( s , settings . DEFAULT_CHARSET ) params = dict ( [ ( encode ( k ) , encode ( v ) ) for k , v in params . items ( ) if v ] ) parts = list ( urlparse ( url ) ) query = dict ( parse_qsl ( parts [ 4 ] ) ) query . update ( params ) parts [ 4 ] = urlencode ( query ) return urlunparse ( parts )
Inject additional query parameters into an existing URL . If parameters already exist with the same name they will be overwritten . Parameters with empty values are ignored . Return the modified URL as a string .
113
39
237,102
def match_service ( service1 , service2 ) : s1 , s2 = urlparse ( service1 ) , urlparse ( service2 ) try : return ( s1 . scheme , s1 . netloc , s1 . path ) == ( s2 . scheme , s2 . netloc , s2 . path ) except ValueError : return False
Compare two service URLs . Return True if the scheme hostname optional port and path match .
75
18
237,103
def redirect ( to , * args , * * kwargs ) : params = kwargs . pop ( 'params' , { } ) try : to = reverse ( to , args = args , kwargs = kwargs ) except NoReverseMatch : if '/' not in to and '.' not in to : to = reverse ( 'cas_login' ) elif not service_allowed ( to ) : raise PermissionDenied ( ) if params : to = add_query_params ( to , params ) logger . debug ( "Redirecting to %s" % to ) return HttpResponseRedirect ( to )
Similar to the Django redirect shortcut but with altered functionality . If an optional params argument is provided the dictionary items will be injected as query parameters on the redirection URL .
136
33
237,104
def get_config ( self , service , setting ) : try : return self . get_service ( service ) [ setting ] except KeyError : return getattr ( self , setting + '_DEFAULT' )
Access the configuration for a given service and setting . If the service is not found return a default value .
44
21
237,105
def get_status ( self , status_value , message = None ) : status = etree . Element ( 'Status' ) status_code = etree . SubElement ( status , 'StatusCode' ) status_code . set ( 'Value' , 'samlp:' + status_value ) if message : status_message = etree . SubElement ( status , 'StatusMessage' ) status_message . text = message return status
Build a Status XML block for a SAML 1 . 1 Response .
92
14
237,106
def get_assertion ( self , ticket , attributes ) : assertion = etree . Element ( 'Assertion' ) assertion . set ( 'xmlns' , 'urn:oasis:names:tc:SAML:1.0:assertion' ) assertion . set ( 'AssertionID' , self . generate_id ( ) ) assertion . set ( 'IssueInstant' , self . instant ( ) ) assertion . set ( 'Issuer' , 'localhost' ) assertion . set ( 'MajorVersion' , '1' ) assertion . set ( 'MinorVersion' , '1' ) assertion . append ( self . get_conditions ( ticket . service ) ) subject = self . get_subject ( ticket . user . get_username ( ) ) if attributes : assertion . append ( self . get_attribute_statement ( subject , attributes ) ) assertion . append ( self . get_authentication_statement ( subject , ticket ) ) return assertion
Build a SAML 1 . 1 Assertion XML block .
203
13
237,107
def get_conditions ( self , service_id ) : conditions = etree . Element ( 'Conditions' ) conditions . set ( 'NotBefore' , self . instant ( ) ) conditions . set ( 'NotOnOrAfter' , self . instant ( offset = 30 ) ) restriction = etree . SubElement ( conditions , 'AudienceRestrictionCondition' ) audience = etree . SubElement ( restriction , 'Audience' ) audience . text = service_id return conditions
Build a Conditions XML block for a SAML 1 . 1 Assertion .
102
16
237,108
def get_attribute_statement ( self , subject , attributes ) : attribute_statement = etree . Element ( 'AttributeStatement' ) attribute_statement . append ( subject ) for name , value in attributes . items ( ) : attribute = etree . SubElement ( attribute_statement , 'Attribute' ) attribute . set ( 'AttributeName' , name ) attribute . set ( 'AttributeNamespace' , self . namespace ) if isinstance ( value , list ) : for v in value : attribute_value = etree . SubElement ( attribute , 'AttributeValue' ) attribute_value . text = force_text ( v ) else : attribute_value = etree . SubElement ( attribute , 'AttributeValue' ) attribute_value . text = force_text ( value ) return attribute_statement
Build an AttributeStatement XML block for a SAML 1 . 1 Assertion .
166
18
237,109
def get_authentication_statement ( self , subject , ticket ) : authentication_statement = etree . Element ( 'AuthenticationStatement' ) authentication_statement . set ( 'AuthenticationInstant' , self . instant ( instant = ticket . consumed ) ) authentication_statement . set ( 'AuthenticationMethod' , self . authn_method_password ) authentication_statement . append ( subject ) return authentication_statement
Build an AuthenticationStatement XML block for a SAML 1 . 1 Assertion .
86
17
237,110
def get_subject ( self , identifier ) : subject = etree . Element ( 'Subject' ) name = etree . SubElement ( subject , 'NameIdentifier' ) name . text = identifier subject_confirmation = etree . SubElement ( subject , 'SubjectConfirmation' ) method = etree . SubElement ( subject_confirmation , 'ConfirmationMethod' ) method . text = self . confirmation_method return subject
Build a Subject XML block for a SAML 1 . 1 AuthenticationStatement or AttributeStatement .
91
19
237,111
def is_bytes ( string ) : if six . PY3 and isinstance ( string , ( bytes , memoryview , bytearray ) ) : # noqa return True elif six . PY2 and isinstance ( string , ( buffer , bytearray ) ) : # noqa return True return False
Check if a string is a bytes instance
68
8
237,112
def partialclass ( cls , * args , * * kwargs ) : name_attrs = [ n for n in ( getattr ( cls , name , str ( cls ) ) for name in ( "__name__" , "__qualname__" ) ) if n is not None ] name_attrs = name_attrs [ 0 ] type_ = type ( name_attrs , ( cls , ) , { "__init__" : partialmethod ( cls . __init__ , * args , * * kwargs ) } ) # Swiped from attrs.make_class try : type_ . __module__ = sys . _getframe ( 1 ) . f_globals . get ( "__name__" , "__main__" ) except ( AttributeError , ValueError ) : # pragma: no cover pass # pragma: no cover return type_
Returns a partially instantiated class
196
6
237,113
def replace_with_text_stream ( stream_name ) : new_stream = TEXT_STREAMS . get ( stream_name ) if new_stream is not None : new_stream = new_stream ( ) setattr ( sys , stream_name , new_stream ) return None
Given a stream name replace the target stream with a text - converted equivalent
62
14
237,114
def _sanitize_params ( prefix , suffix , dir ) : output_type = _infer_return_type ( prefix , suffix , dir ) if suffix is None : suffix = output_type ( ) if prefix is None : if output_type is str : prefix = "tmp" else : prefix = os . fsencode ( "tmp" ) if dir is None : if output_type is str : dir = gettempdir ( ) else : dir = fs_encode ( gettempdir ( ) ) return prefix , suffix , dir , output_type
Common parameter processing for most APIs in this module .
120
10
237,115
def fromdict ( model , data , exclude = None , exclude_underscore = None , allow_pk = None , follow = None , include = None , only = None ) : follow = arg_to_dict ( follow ) info = inspect ( model ) columns = [ c . key for c in info . mapper . column_attrs ] synonyms = [ c . key for c in info . mapper . synonyms ] relations = [ c . key for c in info . mapper . relationships ] primary_keys = [ c . key for c in info . mapper . primary_key ] if allow_pk is None : allow_pk = getattr ( model , 'dictalchemy_fromdict_allow_pk' , constants . default_fromdict_allow_pk ) if only : valid_keys = only else : exclude = exclude or [ ] exclude += getattr ( model , 'dictalchemy_exclude' , constants . default_exclude ) or [ ] if exclude_underscore is None : exclude_underscore = getattr ( model , 'dictalchemy_exclude_underscore' , constants . default_exclude_underscore ) if exclude_underscore : # Exclude all properties starting with underscore exclude += [ k . key for k in info . mapper . attrs if k . key [ 0 ] == '_' ] include = ( include or [ ] ) + ( getattr ( model , 'dictalchemy_fromdict_include' , getattr ( model , 'dictalchemy_include' , None ) ) or [ ] ) valid_keys = [ k for k in columns + synonyms if k not in exclude ] + include # Keys that will be updated update_keys = set ( valid_keys ) & set ( data . keys ( ) ) # Check for primary keys data_primary_key = update_keys & set ( primary_keys ) if len ( data_primary_key ) and not allow_pk : msg = ( "Primary keys({0}) cannot be updated by fromdict." "Set 'dictalchemy_fromdict_allow_pk' to True in your Model" " or pass 'allow_pk=True'." ) . format ( ',' . join ( data_primary_key ) ) raise errors . DictalchemyError ( msg ) # Update columns and synonyms for k in update_keys : setattr ( model , k , data [ k ] ) # Update simple relations for ( k , args ) in follow . iteritems ( ) : if k not in data : continue if k not in relations : raise errors . MissingRelationError ( k ) rel = getattr ( model , k ) if hasattr ( rel , 'fromdict' ) : rel . fromdict ( data [ k ] , * * args ) return model
Update a model from a dict
603
6
237,116
def make_class_dictable ( cls , exclude = constants . default_exclude , exclude_underscore = constants . default_exclude_underscore , fromdict_allow_pk = constants . default_fromdict_allow_pk , include = None , asdict_include = None , fromdict_include = None ) : setattr ( cls , 'dictalchemy_exclude' , exclude ) setattr ( cls , 'dictalchemy_exclude_underscore' , exclude_underscore ) setattr ( cls , 'dictalchemy_fromdict_allow_pk' , fromdict_allow_pk ) setattr ( cls , 'asdict' , asdict ) setattr ( cls , 'fromdict' , fromdict ) setattr ( cls , '__iter__' , iter ) setattr ( cls , 'dictalchemy_include' , include ) setattr ( cls , 'dictalchemy_asdict_include' , asdict_include ) setattr ( cls , 'dictalchemy_fromdict_include' , fromdict_include ) return cls
Make a class dictable
247
5
237,117
def parse_GFF_attribute_string ( attrStr , extra_return_first_value = False ) : if attrStr . endswith ( "\n" ) : attrStr = attrStr [ : - 1 ] d = { } first_val = "_unnamed_" for ( i , attr ) in itertools . izip ( itertools . count ( ) , _HTSeq . quotesafe_split ( attrStr ) ) : if _re_attr_empty . match ( attr ) : continue if attr . count ( '"' ) not in ( 0 , 2 ) : raise ValueError ( "The attribute string seems to contain mismatched quotes." ) mo = _re_attr_main . match ( attr ) if not mo : raise ValueError ( "Failure parsing GFF attribute line" ) val = mo . group ( 2 ) if val . startswith ( '"' ) and val . endswith ( '"' ) : val = val [ 1 : - 1 ] d [ intern ( mo . group ( 1 ) ) ] = intern ( val ) if extra_return_first_value and i == 0 : first_val = val if extra_return_first_value : return ( d , first_val ) else : return d
Parses a GFF attribute string and returns it as a dictionary .
277
15
237,118
def pair_SAM_alignments ( alignments , bundle = False , primary_only = False ) : mate_missing_count = [ 0 ] def process_list ( almnt_list ) : '''Transform a list of alignment with the same read name into pairs Args: almnt_list (list): alignments to process Yields: each pair of alignments. This function is needed because each line of a BAM file is not a read but an alignment. For uniquely mapped and unmapped reads, those two are the same. For multimapped reads, however, there can be more than one alignment for each read. Also, it is normal for a mapper to uniquely map one read and multimap its mate. This function goes down the list of alignments for a given read name and tries to find the first mate. So if read 1 is uniquely mapped but read 2 is mapped 4 times, only (read 1, read 2 - first occurrence) will yield; the other 3 alignments of read 2 are ignored. ''' while len ( almnt_list ) > 0 : a1 = almnt_list . pop ( 0 ) # Find its mate for a2 in almnt_list : if a1 . pe_which == a2 . pe_which : continue if a1 . aligned != a2 . mate_aligned or a1 . mate_aligned != a2 . aligned : continue if not ( a1 . aligned and a2 . aligned ) : break if a1 . iv . chrom == a2 . mate_start . chrom and a1 . iv . start == a2 . mate_start . pos and a2 . iv . chrom == a1 . mate_start . chrom and a2 . iv . start == a1 . mate_start . pos : break else : if a1 . mate_aligned : mate_missing_count [ 0 ] += 1 if mate_missing_count [ 0 ] == 1 : warnings . warn ( "Read " + a1 . read . name + " claims to have an aligned mate " + "which could not be found in an adjacent line." ) a2 = None if a2 is not None : almnt_list . remove ( a2 ) if a1 . pe_which == "first" : yield ( a1 , a2 ) else : assert a1 . pe_which == "second" yield ( a2 , a1 ) almnt_list = [ ] current_name = None for almnt in alignments : if not almnt . paired_end : raise ValueError ( "'pair_alignments' needs a sequence of paired-end alignments" ) if almnt . pe_which == "unknown" : raise ValueError ( "Paired-end read found with 'unknown' 'pe_which' status." ) # FIXME: almnt.not_primary_alignment currently means secondary if primary_only and ( almnt . not_primary_alignment or almnt . supplementary ) : continue if almnt . read . name == current_name : almnt_list . append ( almnt ) else : if bundle : yield list ( process_list ( almnt_list ) ) else : for p in process_list ( almnt_list ) : yield p current_name = almnt . read . name almnt_list = [ almnt ] if bundle : yield list ( process_list ( almnt_list ) ) else : for p in process_list ( almnt_list ) : yield p if mate_missing_count [ 0 ] > 1 : warnings . warn ( "%d reads with missing mate encountered." % mate_missing_count [ 0 ] )
Iterate over SAM aligments name - sorted paired - end
794
13
237,119
def pair_SAM_alignments_with_buffer ( alignments , max_buffer_size = 30000000 , primary_only = False ) : almnt_buffer = { } ambiguous_pairing_counter = 0 for almnt in alignments : if not almnt . paired_end : raise ValueError ( "Sequence of paired-end alignments expected, but got single-end alignment." ) if almnt . pe_which == "unknown" : raise ValueError ( "Cannot process paired-end alignment found with 'unknown' 'pe_which' status." ) # FIXME: almnt.not_primary_alignment currently means secondary if primary_only and ( almnt . not_primary_alignment or almnt . supplementary ) : continue matekey = ( almnt . read . name , "second" if almnt . pe_which == "first" else "first" , almnt . mate_start . chrom if almnt . mate_aligned else None , almnt . mate_start . pos if almnt . mate_aligned else None , almnt . iv . chrom if almnt . aligned else None , almnt . iv . start if almnt . aligned else None , - almnt . inferred_insert_size if almnt . aligned and almnt . mate_aligned else None ) if matekey in almnt_buffer : if len ( almnt_buffer [ matekey ] ) == 1 : mate = almnt_buffer [ matekey ] [ 0 ] del almnt_buffer [ matekey ] else : mate = almnt_buffer [ matekey ] . pop ( 0 ) if ambiguous_pairing_counter == 0 : ambiguous_pairing_first_occurance = matekey ambiguous_pairing_counter += 1 if almnt . pe_which == "first" : yield ( almnt , mate ) else : yield ( mate , almnt ) else : almntkey = ( almnt . read . name , almnt . pe_which , almnt . iv . chrom if almnt . aligned else None , almnt . iv . start if almnt . aligned else None , almnt . mate_start . chrom if almnt . mate_aligned else None , almnt . mate_start . pos if almnt . mate_aligned else None , almnt . inferred_insert_size if almnt . aligned and almnt . mate_aligned else None ) if almntkey not in almnt_buffer : almnt_buffer [ almntkey ] = [ almnt ] else : almnt_buffer [ almntkey ] . append ( almnt ) if len ( almnt_buffer ) > max_buffer_size : raise ValueError ( "Maximum alignment buffer size exceeded while pairing SAM alignments." ) if len ( almnt_buffer ) > 0 : warnings . warn ( "Mate records missing for %d records; first such record: %s." % ( len ( almnt_buffer ) , str ( list ( almnt_buffer . values ( ) ) [ 0 ] [ 0 ] ) ) ) for almnt_list in list ( almnt_buffer . values ( ) ) : for almnt in almnt_list : if almnt . pe_which == "first" : yield ( almnt , None ) else : yield ( None , almnt ) if ambiguous_pairing_counter > 0 : warnings . warn ( "Mate pairing was ambiguous for %d records; mate key for first such record: %s." % ( ambiguous_pairing_counter , str ( ambiguous_pairing_first_occurance ) ) )
Iterate over SAM aligments with buffer position - sorted paired - end
820
15
237,120
def ensure_v8_src ( ) : path = local_path ( 'v8' ) if not os . path . isdir ( path ) : fetch_v8 ( path ) else : update_v8 ( path ) checkout_v8_version ( local_path ( "v8/v8" ) , V8_VERSION ) dependencies_sync ( path )
Ensure that v8 src are presents and up - to - date
80
14
237,121
def get_filenames ( directory ) : for filename in os . listdir ( directory ) : if re . search ( r"cp\d{2}mu?-manylinux1_\S+\.whl" , filename ) : yield filename
Get all the file to copy
56
6
237,122
def copy_file ( filename ) : print ( "Updating file %s" % filename ) out_dir = os . path . abspath ( DIRECTORY ) tags = filename [ : - 4 ] . split ( "-" ) tags [ - 2 ] = tags [ - 2 ] . replace ( "m" , "" ) new_name = "-" . join ( tags ) + ".whl" wheel_flag = "-" . join ( tags [ 2 : ] ) with InWheelCtx ( os . path . join ( DIRECTORY , filename ) ) as ctx : info_fname = os . path . join ( _dist_info_dir ( ctx . path ) , 'WHEEL' ) infos = pkginfo . read_pkg_info ( info_fname ) print ( "Changing Tag %s to %s" % ( infos [ "Tag" ] , wheel_flag ) ) del infos [ 'Tag' ] infos . add_header ( 'Tag' , wheel_flag ) pkginfo . write_pkg_info ( info_fname , infos ) ctx . out_wheel = os . path . join ( out_dir , new_name ) print ( "Saving new wheel into %s" % ctx . out_wheel )
Copy the file and put the correct tag
278
8
237,123
def is_unicode ( value ) : python_version = sys . version_info [ 0 ] if python_version == 2 : return isinstance ( value , unicode ) elif python_version == 3 : return isinstance ( value , str ) else : raise NotImplementedError ( )
Check if a value is a valid unicode string compatible with python 2 and python 3
63
17
237,124
def execute ( self , js_str , timeout = 0 , max_memory = 0 ) : wrapped = "(function(){return (%s)})()" % js_str return self . eval ( wrapped , timeout , max_memory )
Exec the given JS value
49
5
237,125
def eval ( self , js_str , timeout = 0 , max_memory = 0 ) : if is_unicode ( js_str ) : bytes_val = js_str . encode ( "utf8" ) else : bytes_val = js_str res = None self . lock . acquire ( ) try : res = self . ext . mr_eval_context ( self . ctx , bytes_val , len ( bytes_val ) , ctypes . c_ulong ( timeout ) , ctypes . c_size_t ( max_memory ) ) if bool ( res ) is False : raise JSConversionException ( ) python_value = res . contents . to_python ( ) return python_value finally : self . lock . release ( ) if res is not None : self . free ( res )
Eval the JavaScript string
174
5
237,126
def call ( self , identifier , * args , * * kwargs ) : encoder = kwargs . get ( 'encoder' , None ) timeout = kwargs . get ( 'timeout' , 0 ) max_memory = kwargs . get ( 'max_memory' , 0 ) json_args = json . dumps ( args , separators = ( ',' , ':' ) , cls = encoder ) js = "{identifier}.apply(this, {json_args})" return self . eval ( js . format ( identifier = identifier , json_args = json_args ) , timeout , max_memory )
Call the named function with provided arguments You can pass a custom JSON encoder by passing it in the encoder keyword only argument .
136
26
237,127
def heap_stats ( self ) : self . lock . acquire ( ) res = self . ext . mr_heap_stats ( self . ctx ) self . lock . release ( ) python_value = res . contents . to_python ( ) self . free ( res ) return python_value
Return heap statistics
64
3
237,128
def heap_snapshot ( self ) : self . lock . acquire ( ) res = self . ext . mr_heap_snapshot ( self . ctx ) self . lock . release ( ) python_value = res . contents . to_python ( ) self . free ( res ) return python_value
Return heap snapshot
66
3
237,129
def to_python ( self ) : result = None if self . type == PythonTypes . null : result = None elif self . type == PythonTypes . bool : result = self . value == 1 elif self . type == PythonTypes . integer : if self . value is None : result = 0 else : result = ctypes . c_int32 ( self . value ) . value elif self . type == PythonTypes . double : result = self . _double_value ( ) elif self . type == PythonTypes . str_utf8 : buf = ctypes . c_char_p ( self . value ) ptr = ctypes . cast ( buf , ctypes . POINTER ( ctypes . c_char ) ) result = ptr [ 0 : self . len ] . decode ( "utf8" ) elif self . type == PythonTypes . array : if self . len == 0 : return [ ] ary = [ ] ary_addr = ctypes . c_void_p . from_address ( self . value ) ptr_to_ary = ctypes . pointer ( ary_addr ) for i in range ( self . len ) : pval = PythonValue . from_address ( ptr_to_ary [ i ] ) ary . append ( pval . to_python ( ) ) result = ary elif self . type == PythonTypes . hash : if self . len == 0 : return { } res = { } hash_ary_addr = ctypes . c_void_p . from_address ( self . value ) ptr_to_hash = ctypes . pointer ( hash_ary_addr ) for i in range ( self . len ) : pkey = PythonValue . from_address ( ptr_to_hash [ i * 2 ] ) pval = PythonValue . from_address ( ptr_to_hash [ i * 2 + 1 ] ) res [ pkey . to_python ( ) ] = pval . to_python ( ) result = res elif self . type == PythonTypes . function : result = JSFunction ( ) elif self . type == PythonTypes . parse_exception : msg = ctypes . c_char_p ( self . value ) . value raise JSParseException ( msg ) elif self . type == PythonTypes . execute_exception : msg = ctypes . c_char_p ( self . value ) . value raise JSEvalException ( msg . decode ( 'utf-8' , errors = 'replace' ) ) elif self . type == PythonTypes . oom_exception : msg = ctypes . c_char_p ( self . value ) . value raise JSOOMException ( msg ) elif self . type == PythonTypes . timeout_exception : msg = ctypes . c_char_p ( self . value ) . value raise JSTimeoutException ( msg ) elif self . type == PythonTypes . date : timestamp = self . _double_value ( ) # JS timestamp are milliseconds, in python we are in seconds result = datetime . datetime . utcfromtimestamp ( timestamp / 1000. ) else : raise WrongReturnTypeException ( "unknown type %d" % self . type ) return result
Return an object as native Python
689
6
237,130
def libv8_object ( object_name ) : filename = join ( V8_LIB_DIRECTORY , 'out.gn/x64.release/obj/{}' . format ( object_name ) ) if not isfile ( filename ) : filename = join ( local_path ( 'vendor/v8/out.gn/libv8/obj/{}' . format ( object_name ) ) ) if not isfile ( filename ) : filename = join ( V8_LIB_DIRECTORY , 'out.gn/x64.release/obj/{}' . format ( object_name ) ) return filename
Return a path for object_name which is OS independent
139
11
237,131
def get_static_lib_paths ( ) : libs = [ ] is_linux = sys . platform . startswith ( 'linux' ) if is_linux : libs += [ '-Wl,--start-group' ] libs += get_raw_static_lib_path ( ) if is_linux : libs += [ '-Wl,--end-group' ] return libs
Return the required static libraries path
90
6
237,132
def get_dag_params ( self ) -> Dict [ str , Any ] : try : dag_params : Dict [ str , Any ] = utils . merge_configs ( self . dag_config , self . default_config ) except Exception as e : raise Exception ( f"Failed to merge config with default config, err: {e}" ) dag_params [ "dag_id" ] : str = self . dag_name try : # ensure that default_args dictionary contains key "start_date" with "datetime" value in specified timezone dag_params [ "default_args" ] [ "start_date" ] : datetime = utils . get_start_date ( date_value = dag_params [ "default_args" ] [ "start_date" ] , timezone = dag_params [ "default_args" ] . get ( "timezone" , "UTC" ) , ) except KeyError as e : raise Exception ( f"{self.dag_name} config is missing start_date, err: {e}" ) return dag_params
Merges default config with dag config sets dag_id and extropolates dag_start_date
238
21
237,133
def make_task ( operator : str , task_params : Dict [ str , Any ] ) -> BaseOperator : try : # class is a Callable https://stackoverflow.com/a/34578836/3679900 operator_obj : Callable [ ... , BaseOperator ] = import_string ( operator ) except Exception as e : raise Exception ( f"Failed to import operator: {operator}. err: {e}" ) try : task : BaseOperator = operator_obj ( * * task_params ) except Exception as e : raise Exception ( f"Failed to create {operator_obj} task. err: {e}" ) return task
Takes an operator and params and creates an instance of that operator .
144
14
237,134
def build ( self ) -> Dict [ str , Union [ str , DAG ] ] : dag_params : Dict [ str , Any ] = self . get_dag_params ( ) dag : DAG = DAG ( dag_id = dag_params [ "dag_id" ] , schedule_interval = dag_params [ "schedule_interval" ] , description = dag_params . get ( "description" , "" ) , max_active_runs = dag_params . get ( "max_active_runs" , configuration . conf . getint ( "core" , "max_active_runs_per_dag" ) , ) , default_args = dag_params . get ( "default_args" , { } ) , ) tasks : Dict [ str , Dict [ str , Any ] ] = dag_params [ "tasks" ] # create dictionary to track tasks and set dependencies tasks_dict : Dict [ str , BaseOperator ] = { } for task_name , task_conf in tasks . items ( ) : task_conf [ "task_id" ] : str = task_name operator : str = task_conf [ "operator" ] task_conf [ "dag" ] : DAG = dag params : Dict [ str , Any ] = { k : v for k , v in task_conf . items ( ) if k not in SYSTEM_PARAMS } task : BaseOperator = DagBuilder . make_task ( operator = operator , task_params = params ) tasks_dict [ task . task_id ] : BaseOperator = task # set task dependencies after creating tasks for task_name , task_conf in tasks . items ( ) : if task_conf . get ( "dependencies" ) : source_task : BaseOperator = tasks_dict [ task_name ] for dep in task_conf [ "dependencies" ] : dep_task : BaseOperator = tasks_dict [ dep ] source_task . set_upstream ( dep_task ) return { "dag_id" : dag_params [ "dag_id" ] , "dag" : dag }
Generates a DAG from the DAG parameters .
467
11
237,135
def merge_configs ( config : Dict [ str , Any ] , default_config : Dict [ str , Any ] ) -> Dict [ str , Any ] : for key in default_config : if key in config : if isinstance ( config [ key ] , dict ) and isinstance ( default_config [ key ] , dict ) : merge_configs ( config [ key ] , default_config [ key ] ) else : config [ key ] : Any = default_config [ key ] return config
Merges a default config with DAG config . Used to set default values for a group of DAGs .
108
23
237,136
def _load_config ( config_filepath : str ) -> Dict [ str , Any ] : try : config : Dict [ str , Any ] = yaml . load ( stream = open ( config_filepath , "r" ) ) except Exception as e : raise Exception ( f"Invalid DAG Factory config file; err: {e}" ) return config
Loads YAML config file to dictionary
78
9
237,137
def get_dag_configs ( self ) -> Dict [ str , Dict [ str , Any ] ] : return { dag : self . config [ dag ] for dag in self . config . keys ( ) if dag != "default" }
Returns configuration for each the DAG in factory
53
9
237,138
def generate_dags ( self , globals : Dict [ str , Any ] ) -> None : dag_configs : Dict [ str , Dict [ str , Any ] ] = self . get_dag_configs ( ) default_config : Dict [ str , Any ] = self . get_default_config ( ) for dag_name , dag_config in dag_configs . items ( ) : dag_builder : DagBuilder = DagBuilder ( dag_name = dag_name , dag_config = dag_config , default_config = default_config ) try : dag : Dict [ str , Union [ str , DAG ] ] = dag_builder . build ( ) except Exception as e : raise Exception ( f"Failed to generate dag {dag_name}. make sure config is properly populated. err:{e}" ) globals [ dag [ "dag_id" ] ] : DAG = dag [ "dag" ]
Generates DAGs from YAML config
207
10
237,139
def attributes ( self ) : attr = { 'name' : self . name , 'id' : self . sync_id , 'network_id' : self . network_id , 'serial' : self . serial , 'status' : self . status , 'region' : self . region , 'region_id' : self . region_id , } return attr
Return sync attributes .
80
4
237,140
def arm ( self , value ) : if value : return api . request_system_arm ( self . blink , self . network_id ) return api . request_system_disarm ( self . blink , self . network_id )
Arm or disarm system .
50
5
237,141
def start ( self ) : response = api . request_syncmodule ( self . blink , self . network_id , force = True ) try : self . summary = response [ 'syncmodule' ] self . network_id = self . summary [ 'network_id' ] except ( TypeError , KeyError ) : _LOGGER . error ( ( "Could not retrieve sync module information " "with response: %s" ) , response , exc_info = True ) return False try : self . sync_id = self . summary [ 'id' ] self . serial = self . summary [ 'serial' ] self . status = self . summary [ 'status' ] except KeyError : _LOGGER . error ( "Could not extract some sync module info: %s" , response , exc_info = True ) self . network_info = api . request_network_status ( self . blink , self . network_id ) self . check_new_videos ( ) try : for camera_config in self . camera_list : if 'name' not in camera_config : break name = camera_config [ 'name' ] self . cameras [ name ] = BlinkCamera ( self ) self . motion [ name ] = False camera_info = self . get_camera_info ( camera_config [ 'id' ] ) self . cameras [ name ] . update ( camera_info , force_cache = True , force = True ) except KeyError : _LOGGER . error ( "Could not create cameras instances for %s" , self . name , exc_info = True ) return False return True
Initialize the system .
338
5
237,142
def get_events ( self , * * kwargs ) : force = kwargs . pop ( 'force' , False ) response = api . request_sync_events ( self . blink , self . network_id , force = force ) try : return response [ 'event' ] except ( TypeError , KeyError ) : _LOGGER . error ( "Could not extract events: %s" , response , exc_info = True ) return False
Retrieve events from server .
96
6
237,143
def get_camera_info ( self , camera_id ) : response = api . request_camera_info ( self . blink , self . network_id , camera_id ) try : return response [ 'camera' ] [ 0 ] except ( TypeError , KeyError ) : _LOGGER . error ( "Could not extract camera info: %s" , response , exc_info = True ) return [ ]
Retrieve camera information .
87
5
237,144
def refresh ( self , force_cache = False ) : self . network_info = api . request_network_status ( self . blink , self . network_id ) self . check_new_videos ( ) for camera_name in self . cameras . keys ( ) : camera_id = self . cameras [ camera_name ] . camera_id camera_info = self . get_camera_info ( camera_id ) self . cameras [ camera_name ] . update ( camera_info , force_cache = force_cache )
Get all blink cameras and pulls their most recent status .
113
11
237,145
def check_new_videos ( self ) : resp = api . request_videos ( self . blink , time = self . blink . last_refresh , page = 0 ) for camera in self . cameras . keys ( ) : self . motion [ camera ] = False try : info = resp [ 'videos' ] except ( KeyError , TypeError ) : _LOGGER . warning ( "Could not check for motion. Response: %s" , resp ) return False for entry in info : try : name = entry [ 'camera_name' ] clip = entry [ 'address' ] timestamp = entry [ 'created_at' ] self . motion [ name ] = True self . last_record [ name ] = { 'clip' : clip , 'time' : timestamp } except KeyError : _LOGGER . debug ( "No new videos since last refresh." ) return True
Check if new videos since last refresh .
184
8
237,146
def attributes ( self ) : attributes = { 'name' : self . name , 'camera_id' : self . camera_id , 'serial' : self . serial , 'temperature' : self . temperature , 'temperature_c' : self . temperature_c , 'temperature_calibrated' : self . temperature_calibrated , 'battery' : self . battery , 'thumbnail' : self . thumbnail , 'video' : self . clip , 'motion_enabled' : self . motion_enabled , 'motion_detected' : self . motion_detected , 'wifi_strength' : self . wifi_strength , 'network_id' : self . sync . network_id , 'sync_module' : self . sync . name , 'last_record' : self . last_record } return attributes
Return dictionary of all camera attributes .
181
7
237,147
def snap_picture ( self ) : return api . request_new_image ( self . sync . blink , self . network_id , self . camera_id )
Take a picture with camera to create a new thumbnail .
35
11
237,148
def set_motion_detect ( self , enable ) : if enable : return api . request_motion_detection_enable ( self . sync . blink , self . network_id , self . camera_id ) return api . request_motion_detection_disable ( self . sync . blink , self . network_id , self . camera_id )
Set motion detection .
76
4
237,149
def update ( self , config , force_cache = False , * * kwargs ) : # force = kwargs.pop('force', False) self . name = config [ 'name' ] self . camera_id = str ( config [ 'id' ] ) self . network_id = str ( config [ 'network_id' ] ) self . serial = config [ 'serial' ] self . motion_enabled = config [ 'enabled' ] self . battery_voltage = config [ 'battery_voltage' ] self . battery_state = config [ 'battery_state' ] self . temperature = config [ 'temperature' ] self . wifi_strength = config [ 'wifi_strength' ] # Retrieve calibrated temperature from special endpoint resp = api . request_camera_sensors ( self . sync . blink , self . network_id , self . camera_id ) try : self . temperature_calibrated = resp [ 'temp' ] except KeyError : self . temperature_calibrated = self . temperature _LOGGER . warning ( "Could not retrieve calibrated temperature." ) # Check if thumbnail exists in config, if not try to # get it from the homescreen info in teh sync module # otherwise set it to None and log an error new_thumbnail = None if config [ 'thumbnail' ] : thumb_addr = config [ 'thumbnail' ] else : thumb_addr = self . get_thumb_from_homescreen ( ) if thumb_addr is not None : new_thumbnail = "{}{}.jpg" . format ( self . sync . urls . base_url , thumb_addr ) try : self . motion_detected = self . sync . motion [ self . name ] except KeyError : self . motion_detected = False clip_addr = None if self . name in self . sync . last_record : clip_addr = self . sync . last_record [ self . name ] [ 'clip' ] self . last_record = self . sync . last_record [ self . name ] [ 'time' ] self . clip = "{}{}" . format ( self . sync . urls . base_url , clip_addr ) # If the thumbnail or clip have changed, update the cache update_cached_image = False if new_thumbnail != self . thumbnail or self . _cached_image is None : update_cached_image = True self . thumbnail = new_thumbnail update_cached_video = False if self . _cached_video is None or self . motion_detected : update_cached_video = True if new_thumbnail is not None and ( update_cached_image or force_cache ) : self . _cached_image = api . http_get ( self . sync . blink , url = self . thumbnail , stream = True , json = False ) if clip_addr is not None and ( update_cached_video or force_cache ) : self . _cached_video = api . http_get ( self . sync . blink , url = self . clip , stream = True , json = False )
Update camera info .
674
4
237,150
def image_to_file ( self , path ) : _LOGGER . debug ( "Writing image from %s to %s" , self . name , path ) response = self . _cached_image if response . status_code == 200 : with open ( path , 'wb' ) as imgfile : copyfileobj ( response . raw , imgfile ) else : _LOGGER . error ( "Cannot write image to file, response %s" , response . status_code , exc_info = True )
Write image to file .
110
5
237,151
def video_to_file ( self , path ) : _LOGGER . debug ( "Writing video from %s to %s" , self . name , path ) response = self . _cached_video if response is None : _LOGGER . error ( "No saved video exist for %s." , self . name , exc_info = True ) return with open ( path , 'wb' ) as vidfile : copyfileobj ( response . raw , vidfile )
Write video to file .
102
5
237,152
def get_thumb_from_homescreen ( self ) : for device in self . sync . homescreen [ 'devices' ] : try : device_type = device [ 'device_type' ] device_name = device [ 'name' ] device_thumb = device [ 'thumbnail' ] if device_type == 'camera' and device_name == self . name : return device_thumb except KeyError : pass _LOGGER . error ( "Could not find thumbnail for camera %s" , self . name , exc_info = True ) return None
Retrieve thumbnail from homescreen .
122
7
237,153
def get_time ( time_to_convert = None ) : if time_to_convert is None : time_to_convert = time . time ( ) return time . strftime ( TIMESTAMP_FORMAT , time . localtime ( time_to_convert ) )
Create blink - compatible timestamp .
63
6
237,154
def merge_dicts ( dict_a , dict_b ) : duplicates = [ val for val in dict_a if val in dict_b ] if duplicates : _LOGGER . warning ( ( "Duplicates found during merge: %s. " "Renaming is recommended." ) , duplicates ) return { * * dict_a , * * dict_b }
Merge two dictionaries into one .
81
8
237,155
def attempt_reauthorization ( blink ) : _LOGGER . info ( "Auth token expired, attempting reauthorization." ) headers = blink . get_auth_token ( is_retry = True ) return headers
Attempt to refresh auth token and links .
46
8
237,156
def http_req ( blink , url = 'http://example.com' , data = None , headers = None , reqtype = 'get' , stream = False , json_resp = True , is_retry = False ) : if reqtype == 'post' : req = Request ( 'POST' , url , headers = headers , data = data ) elif reqtype == 'get' : req = Request ( 'GET' , url , headers = headers ) else : _LOGGER . error ( "Invalid request type: %s" , reqtype ) raise BlinkException ( ERROR . REQUEST ) prepped = req . prepare ( ) try : response = blink . session . send ( prepped , stream = stream , timeout = 10 ) if json_resp and 'code' in response . json ( ) : if is_retry : _LOGGER . error ( "Cannot obtain new token for server auth." ) return None else : headers = attempt_reauthorization ( blink ) if not headers : raise exceptions . ConnectionError return http_req ( blink , url = url , data = data , headers = headers , reqtype = reqtype , stream = stream , json_resp = json_resp , is_retry = True ) except ( exceptions . ConnectionError , exceptions . Timeout ) : _LOGGER . info ( "Cannot connect to server with url %s." , url ) if not is_retry : headers = attempt_reauthorization ( blink ) return http_req ( blink , url = url , data = data , headers = headers , reqtype = reqtype , stream = stream , json_resp = json_resp , is_retry = True ) _LOGGER . error ( "Endpoint %s failed. Possible issue with Blink servers." , url ) return None if json_resp : return response . json ( ) return response
Perform server requests and check if reauthorization neccessary .
393
14
237,157
def start ( self ) : if self . _username is None or self . _password is None : if not self . login ( ) : return elif not self . get_auth_token ( ) : return camera_list = self . get_cameras ( ) networks = self . get_ids ( ) for network_name , network_id in networks . items ( ) : if network_id not in camera_list . keys ( ) : camera_list [ network_id ] = { } _LOGGER . warning ( "No cameras found for %s" , network_name ) sync_module = BlinkSyncModule ( self , network_name , network_id , camera_list [ network_id ] ) sync_module . start ( ) self . sync [ network_name ] = sync_module self . cameras = self . merge_cameras ( )
Perform full system setup .
184
6
237,158
def login ( self ) : self . _username = input ( "Username:" ) self . _password = getpass . getpass ( "Password:" ) if self . get_auth_token ( ) : _LOGGER . debug ( "Login successful!" ) return True _LOGGER . warning ( "Unable to login with %s." , self . _username ) return False
Prompt user for username and password .
80
8
237,159
def get_auth_token ( self , is_retry = False ) : if not isinstance ( self . _username , str ) : raise BlinkAuthenticationException ( ERROR . USERNAME ) if not isinstance ( self . _password , str ) : raise BlinkAuthenticationException ( ERROR . PASSWORD ) login_urls = [ LOGIN_URL , OLD_LOGIN_URL , LOGIN_BACKUP_URL ] response = self . login_request ( login_urls , is_retry = is_retry ) if not response : return False self . _host = "{}.{}" . format ( self . region_id , BLINK_URL ) self . _token = response [ 'authtoken' ] [ 'authtoken' ] self . networks = response [ 'networks' ] self . _auth_header = { 'Host' : self . _host , 'TOKEN_AUTH' : self . _token } self . urls = BlinkURLHandler ( self . region_id ) return self . _auth_header
Retrieve the authentication token from Blink .
227
8
237,160
def login_request ( self , login_urls , is_retry = False ) : try : login_url = login_urls . pop ( 0 ) except IndexError : _LOGGER . error ( "Could not login to blink servers." ) return False _LOGGER . info ( "Attempting login with %s" , login_url ) response = api . request_login ( self , login_url , self . _username , self . _password , is_retry = is_retry ) try : if response . status_code != 200 : response = self . login_request ( login_urls ) response = response . json ( ) ( self . region_id , self . region ) , = response [ 'region' ] . items ( ) except AttributeError : _LOGGER . error ( "Login API endpoint failed with response %s" , response , exc_info = True ) return False except KeyError : _LOGGER . warning ( "Could not extract region info." ) self . region_id = 'piri' self . region = 'UNKNOWN' self . _login_url = login_url return response
Make a login request .
243
5
237,161
def get_ids ( self ) : response = api . request_networks ( self ) all_networks = [ ] network_dict = { } for network , status in self . networks . items ( ) : if status [ 'onboarded' ] : all_networks . append ( '{}' . format ( network ) ) network_dict [ status [ 'name' ] ] = network # For the first onboarded network we find, grab the account id for resp in response [ 'networks' ] : if str ( resp [ 'id' ] ) in all_networks : self . account_id = resp [ 'account_id' ] break self . network_ids = all_networks return network_dict
Set the network ID and Account ID .
155
8
237,162
def get_cameras ( self ) : response = api . request_homescreen ( self ) try : all_cameras = { } for camera in response [ 'cameras' ] : camera_network = str ( camera [ 'network_id' ] ) camera_name = camera [ 'name' ] camera_id = camera [ 'id' ] camera_info = { 'name' : camera_name , 'id' : camera_id } if camera_network not in all_cameras : all_cameras [ camera_network ] = [ ] all_cameras [ camera_network ] . append ( camera_info ) return all_cameras except KeyError : _LOGGER . error ( "Initialization failue. Could not retrieve cameras." ) return { }
Retrieve a camera list for each onboarded network .
173
11
237,163
def refresh ( self , force_cache = False ) : if self . check_if_ok_to_update ( ) or force_cache : for sync_name , sync_module in self . sync . items ( ) : _LOGGER . debug ( "Attempting refresh of sync %s" , sync_name ) sync_module . refresh ( force_cache = force_cache ) if not force_cache : # Prevents rapid clearing of motion detect property self . last_refresh = int ( time . time ( ) ) return True return False
Perform a system refresh .
116
6
237,164
def check_if_ok_to_update ( self ) : current_time = int ( time . time ( ) ) last_refresh = self . last_refresh if last_refresh is None : last_refresh = 0 if current_time >= ( last_refresh + self . refresh_rate ) : return True return False
Check if it is ok to perform an http request .
73
11
237,165
def merge_cameras ( self ) : combined = CaseInsensitiveDict ( { } ) for sync in self . sync : combined = merge_dicts ( combined , self . sync [ sync ] . cameras ) return combined
Merge all sync camera dicts into one .
48
10
237,166
def download_videos ( self , path , since = None , camera = 'all' , stop = 10 ) : if since is None : since_epochs = self . last_refresh else : parsed_datetime = parse ( since , fuzzy = True ) since_epochs = parsed_datetime . timestamp ( ) formatted_date = get_time ( time_to_convert = since_epochs ) _LOGGER . info ( "Retrieving videos since %s" , formatted_date ) if not isinstance ( camera , list ) : camera = [ camera ] for page in range ( 1 , stop ) : response = api . request_videos ( self , time = since_epochs , page = page ) _LOGGER . debug ( "Processing page %s" , page ) try : result = response [ 'videos' ] if not result : raise IndexError except ( KeyError , IndexError ) : _LOGGER . info ( "No videos found on page %s. Exiting." , page ) break self . _parse_downloaded_items ( result , camera , path )
Download all videos from server since specified time .
237
9
237,167
def _parse_downloaded_items ( self , result , camera , path ) : for item in result : try : created_at = item [ 'created_at' ] camera_name = item [ 'camera_name' ] is_deleted = item [ 'deleted' ] address = item [ 'address' ] except KeyError : _LOGGER . info ( "Missing clip information, skipping..." ) continue if camera_name not in camera and 'all' not in camera : _LOGGER . debug ( "Skipping videos for %s." , camera_name ) continue if is_deleted : _LOGGER . debug ( "%s: %s is marked as deleted." , camera_name , address ) continue clip_address = "{}{}" . format ( self . urls . base_url , address ) filename = "{}_{}.mp4" . format ( camera_name , created_at ) filename = os . path . join ( path , filename ) if os . path . isfile ( filename ) : _LOGGER . info ( "%s already exists, skipping..." , filename ) continue response = api . http_get ( self , url = clip_address , stream = True , json = False ) with open ( filename , 'wb' ) as vidfile : copyfileobj ( response . raw , vidfile ) _LOGGER . info ( "Downloaded video to %s" , filename )
Parse downloaded videos .
302
5
237,168
def request_login ( blink , url , username , password , is_retry = False ) : headers = { 'Host' : DEFAULT_URL , 'Content-Type' : 'application/json' } data = dumps ( { 'email' : username , 'password' : password , 'client_specifier' : 'iPhone 9.2 | 2.2 | 222' } ) return http_req ( blink , url = url , headers = headers , data = data , json_resp = False , reqtype = 'post' , is_retry = is_retry )
Login request .
125
3
237,169
def request_networks ( blink ) : url = "{}/networks" . format ( blink . urls . base_url ) return http_get ( blink , url )
Request all networks information .
38
5
237,170
def request_network_status ( blink , network ) : url = "{}/network/{}" . format ( blink . urls . base_url , network ) return http_get ( blink , url )
Request network information .
44
4
237,171
def request_syncmodule ( blink , network ) : url = "{}/network/{}/syncmodules" . format ( blink . urls . base_url , network ) return http_get ( blink , url )
Request sync module info .
47
5
237,172
def request_system_arm ( blink , network ) : url = "{}/network/{}/arm" . format ( blink . urls . base_url , network ) return http_post ( blink , url )
Arm system .
47
3
237,173
def request_system_disarm ( blink , network ) : url = "{}/network/{}/disarm" . format ( blink . urls . base_url , network ) return http_post ( blink , url )
Disarm system .
49
4
237,174
def request_command_status ( blink , network , command_id ) : url = "{}/network/{}/command/{}" . format ( blink . urls . base_url , network , command_id ) return http_get ( blink , url )
Request command status .
57
4
237,175
def request_homescreen ( blink ) : url = "{}/api/v3/accounts/{}/homescreen" . format ( blink . urls . base_url , blink . account_id ) return http_get ( blink , url )
Request homescreen info .
57
5
237,176
def request_sync_events ( blink , network ) : url = "{}/events/network/{}" . format ( blink . urls . base_url , network ) return http_get ( blink , url )
Request events from sync module .
46
6
237,177
def request_video_count ( blink ) : url = "{}/api/v2/videos/count" . format ( blink . urls . base_url ) return http_get ( blink , url )
Request total video count .
45
5
237,178
def request_videos ( blink , time = None , page = 0 ) : timestamp = get_time ( time ) url = "{}/api/v2/videos/changed?since={}&page={}" . format ( blink . urls . base_url , timestamp , page ) return http_get ( blink , url )
Perform a request for videos .
70
7
237,179
def request_cameras ( blink , network ) : url = "{}/network/{}/cameras" . format ( blink . urls . base_url , network ) return http_get ( blink , url )
Request all camera information .
49
5
237,180
def request_camera_sensors ( blink , network , camera_id ) : url = "{}/network/{}/camera/{}/signals" . format ( blink . urls . base_url , network , camera_id ) return http_get ( blink , url )
Request camera sensor info for one camera .
63
8
237,181
def request_motion_detection_enable ( blink , network , camera_id ) : url = "{}/network/{}/camera/{}/enable" . format ( blink . urls . base_url , network , camera_id ) return http_post ( blink , url )
Enable motion detection for a camera .
63
7
237,182
def http_get ( blink , url , stream = False , json = True , is_retry = False ) : if blink . auth_header is None : raise BlinkException ( ERROR . AUTH_TOKEN ) _LOGGER . debug ( "Making GET request to %s" , url ) return http_req ( blink , url = url , headers = blink . auth_header , reqtype = 'get' , stream = stream , json_resp = json , is_retry = is_retry )
Perform an http get request .
108
7
237,183
def http_post ( blink , url , is_retry = False ) : if blink . auth_header is None : raise BlinkException ( ERROR . AUTH_TOKEN ) _LOGGER . debug ( "Making POST request to %s" , url ) return http_req ( blink , url = url , headers = blink . auth_header , reqtype = 'post' , is_retry = is_retry )
Perform an http post request .
90
7
237,184
def sls ( minuend , subtrahend , metric = "ssd" , noise = "global" , signed = True , sn_size = None , sn_footprint = None , sn_mode = "reflect" , sn_cval = 0.0 , pn_size = None , pn_footprint = None , pn_mode = "reflect" , pn_cval = 0.0 ) : minuend = numpy . asarray ( minuend ) subtrahend = numpy . asarray ( subtrahend ) if numpy . iscomplexobj ( minuend ) : raise TypeError ( 'complex type not supported' ) if numpy . iscomplexobj ( subtrahend ) : raise TypeError ( 'complex type not supported' ) mshape = [ ii for ii in minuend . shape if ii > 0 ] sshape = [ ii for ii in subtrahend . shape if ii > 0 ] if not len ( mshape ) == len ( sshape ) : raise RuntimeError ( "minuend and subtrahend must be of same shape" ) if not numpy . all ( [ sm == ss for sm , ss in zip ( mshape , sshape ) ] ) : raise RuntimeError ( "minuend and subtrahend must be of same shape" ) sn_footprint = __make_footprint ( minuend , sn_size , sn_footprint ) sn_fshape = [ ii for ii in sn_footprint . shape if ii > 0 ] if len ( sn_fshape ) != minuend . ndim : raise RuntimeError ( 'search neighbourhood footprint array has incorrect shape.' ) #!TODO: Is this required? if not sn_footprint . flags . contiguous : sn_footprint = sn_footprint . copy ( ) # created a padded copy of the subtrahend, whereas the padding mode is always 'reflect' subtrahend = pad ( subtrahend , footprint = sn_footprint , mode = sn_mode , cval = sn_cval ) # compute slicers for position where the search neighbourhood sn_footprint is TRUE slicers = [ [ slice ( x , ( x + 1 ) - d if 0 != ( x + 1 ) - d else None ) for x in range ( d ) ] for d in sn_fshape ] slicers = [ sl for sl , tv in zip ( itertools . product ( * slicers ) , sn_footprint . flat ) if tv ] # compute difference images and sign images for search neighbourhood elements ssds = [ ssd ( minuend , subtrahend [ slicer ] , normalized = True , signed = signed , size = pn_size , footprint = pn_footprint , mode = pn_mode , cval = pn_cval ) for slicer in slicers ] distance = [ x [ 0 ] for x in ssds ] distance_sign = [ x [ 1 ] for x in ssds ] # compute local variance, which constitutes an approximation of local noise, out of patch-distances over the neighbourhood structure variance = numpy . average ( distance , 0 ) variance = gaussian_filter ( variance , sigma = 3 ) #!TODO: Figure out if a fixed sigma is desirable here... I think that yes if 'global' == noise : variance = variance . sum ( ) / float ( numpy . product ( variance . shape ) ) # variance[variance < variance_global / 10.] = variance_global / 10. #!TODO: Should I keep this i.e. regularizing the variance to be at least 10% of the global one? # compute sls sls = [ dist_sign * numpy . exp ( - 1 * ( dist / variance ) ) for dist_sign , dist in zip ( distance_sign , distance ) ] # convert into sls image, swapping dimensions to have varying patches in the last dimension return numpy . rollaxis ( numpy . asarray ( sls ) , 0 , minuend . ndim + 1 )
r Computes the signed local similarity between two images .
874
11
237,185
def average_filter ( input , size = None , footprint = None , output = None , mode = "reflect" , cval = 0.0 , origin = 0 ) : footprint = __make_footprint ( input , size , footprint ) filter_size = footprint . sum ( ) output = _get_output ( output , input ) sum_filter ( input , footprint = footprint , output = output , mode = mode , cval = cval , origin = origin ) output /= filter_size return output
r Calculates a multi - dimensional average filter .
107
10
237,186
def sum_filter ( input , size = None , footprint = None , output = None , mode = "reflect" , cval = 0.0 , origin = 0 ) : footprint = __make_footprint ( input , size , footprint ) slicer = [ slice ( None , None , - 1 ) ] * footprint . ndim return convolve ( input , footprint [ slicer ] , output , mode , cval , origin )
r Calculates a multi - dimensional sum filter .
91
10
237,187
def _gaussian_membership_sigma ( smoothness , eps = 0.0005 ) : # 275us @ smothness=10 error = 0 deltas = [ 0.1 , 0.01 , 0.001 , 0.0001 ] sigma = smoothness * 0.3 point = - 1. * ( smoothness + 0.5 ) for delta in deltas : while error < eps : sigma += delta error = scipy . stats . norm . cdf ( 0.5 , point , sigma ) - scipy . stats . norm . cdf ( - 0.5 , point , sigma ) # x, mu, sigma sigma -= delta return sigma
r Compute the sigma required for a gaussian such that in a neighbourhood of smoothness the maximum error is eps . The error is here the difference between the clipped integral and one .
154
39
237,188
def out_of_date ( original , derived ) : return ( not os . path . exists ( derived ) or os . stat ( derived ) . st_mtime < os . stat ( original ) . st_mtime )
Returns True if derivative is out - of - date wrt original both of which are full file paths .
48
21
237,189
def local_maxima ( vector , min_distance = 4 , brd_mode = "wrap" ) : fits = gaussian_filter ( numpy . asarray ( vector , dtype = numpy . float32 ) , 1. , mode = brd_mode ) for ii in range ( len ( fits ) ) : if fits [ ii ] == fits [ ii - 1 ] : fits [ ii - 1 ] = 0.0 maxfits = maximum_filter ( fits , size = min_distance , mode = brd_mode ) maxima_mask = fits == maxfits maximum = numpy . transpose ( maxima_mask . nonzero ( ) ) return numpy . asarray ( maximum )
Internal finder for local maxima . Returns UNSORTED indices of maxima in input vector .
151
21
237,190
def local_minima ( vector , min_distance = 4 , brd_mode = "wrap" ) : fits = gaussian_filter ( numpy . asarray ( vector , dtype = numpy . float32 ) , 1. , mode = brd_mode ) for ii in range ( len ( fits ) ) : if fits [ ii ] == fits [ ii - 1 ] : fits [ ii - 1 ] = numpy . pi / 2.0 minfits = minimum_filter ( fits , size = min_distance , mode = brd_mode ) minima_mask = fits == minfits minima = numpy . transpose ( minima_mask . nonzero ( ) ) return numpy . asarray ( minima )
Internal finder for local minima . Returns UNSORTED indices of minima in input vector .
158
21
237,191
def find_valley_range ( vector , min_distance = 4 ) : # http://users.monash.edu.au/~dengs/resource/papers/icme08.pdf # find min and max with mode = wrap mode = "wrap" minima = local_minima ( vector , min_distance , mode ) maxima = local_maxima ( vector , min_distance , mode ) if len ( maxima ) > len ( minima ) : if vector [ maxima [ 0 ] ] >= vector [ maxima [ - 1 ] ] : maxima = maxima [ 1 : ] else : maxima = maxima [ : - 1 ] if len ( maxima ) == len ( minima ) : valley_range = numpy . asarray ( [ minima [ ii + 1 ] - minima [ ii ] for ii in range ( len ( minima ) - 1 ) ] + [ len ( vector ) - minima [ - 1 ] + minima [ 0 ] ] ) if minima [ 0 ] < maxima [ 0 ] : minima = numpy . asarray ( list ( minima ) + [ minima [ 0 ] ] ) else : minima = numpy . asarray ( list ( minima ) + [ minima [ - 1 ] ] ) else : valley_range = numpy . asarray ( [ minima [ ii + 1 ] - minima [ ii ] for ii in range ( len ( maxima ) ) ] ) return maxima , minima , valley_range
Internal finder peaks and valley ranges . Returns UNSORTED indices of maxima in input vector . Returns range of valleys before and after maximum
326
29
237,192
def gauss_xminus1d ( img , sigma , dim = 2 ) : img = numpy . array ( img , copy = False ) return xminus1d ( img , gaussian_filter , dim , sigma = sigma )
r Applies a X - 1D gauss to a copy of a XD image slicing it along dim .
53
22
237,193
def anisotropic_diffusion ( img , niter = 1 , kappa = 50 , gamma = 0.1 , voxelspacing = None , option = 1 ) : # define conduction gradients functions if option == 1 : def condgradient ( delta , spacing ) : return numpy . exp ( - ( delta / kappa ) ** 2. ) / float ( spacing ) elif option == 2 : def condgradient ( delta , spacing ) : return 1. / ( 1. + ( delta / kappa ) ** 2. ) / float ( spacing ) elif option == 3 : kappa_s = kappa * ( 2 ** 0.5 ) def condgradient ( delta , spacing ) : top = 0.5 * ( ( 1. - ( delta / kappa_s ) ** 2. ) ** 2. ) / float ( spacing ) return numpy . where ( numpy . abs ( delta ) <= kappa_s , top , 0 ) # initialize output array out = numpy . array ( img , dtype = numpy . float32 , copy = True ) # set default voxel spacing if not supplied if voxelspacing is None : voxelspacing = tuple ( [ 1. ] * img . ndim ) # initialize some internal variables deltas = [ numpy . zeros_like ( out ) for _ in range ( out . ndim ) ] for _ in range ( niter ) : # calculate the diffs for i in range ( out . ndim ) : slicer = [ slice ( None , - 1 ) if j == i else slice ( None ) for j in range ( out . ndim ) ] deltas [ i ] [ slicer ] = numpy . diff ( out , axis = i ) # update matrices matrices = [ condgradient ( delta , spacing ) * delta for delta , spacing in zip ( deltas , voxelspacing ) ] # subtract a copy that has been shifted ('Up/North/West' in 3D case) by one # pixel. Don't as questions. just do it. trust me. for i in range ( out . ndim ) : slicer = [ slice ( 1 , None ) if j == i else slice ( None ) for j in range ( out . ndim ) ] matrices [ i ] [ slicer ] = numpy . diff ( matrices [ i ] , axis = i ) # update the image out += gamma * ( numpy . sum ( matrices , axis = 0 ) ) return out
r Edge - preserving XD Anisotropic diffusion .
539
10
237,194
def __voxel_4conectedness ( shape ) : shape = list ( shape ) while 1 in shape : shape . remove ( 1 ) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array) return int ( round ( sum ( [ ( dim - 1 ) / float ( dim ) for dim in shape ] ) * scipy . prod ( shape ) ) )
Returns the number of edges for the supplied image shape assuming 4 - connectedness . The name of the function has historical reasons . Essentially it returns the number of edges assuming 4 - connectedness only for 2D . For 3D it assumes 6 - connectedness etc .
92
53
237,195
def __skeleton_base ( graph , image , boundary_term , neighbourhood_function , spacing ) : image = scipy . asarray ( image ) image = image . astype ( scipy . float_ ) # iterate over the image dimensions and for each create the appropriate edges and compute the associated weights for dim in range ( image . ndim ) : # construct slice-objects for the current dimension slices_exclude_last = [ slice ( None ) ] * image . ndim slices_exclude_last [ dim ] = slice ( - 1 ) slices_exclude_first = [ slice ( None ) ] * image . ndim slices_exclude_first [ dim ] = slice ( 1 , None ) # compute difference between all layers in the current dimensions direction neighbourhood_intensity_term = neighbourhood_function ( image [ slices_exclude_last ] , image [ slices_exclude_first ] ) # apply boundary term neighbourhood_intensity_term = boundary_term ( neighbourhood_intensity_term ) # compute key offset for relative key difference offset_key = [ 1 if i == dim else 0 for i in range ( image . ndim ) ] offset = __flatten_index ( offset_key , image . shape ) # generate index offset function for index dependent offset idx_offset_divider = ( image . shape [ dim ] - 1 ) * offset idx_offset = lambda x : int ( x / idx_offset_divider ) * offset # weight the computed distanced in dimension dim by the corresponding slice spacing provided if spacing : neighbourhood_intensity_term /= spacing [ dim ] for key , value in enumerate ( neighbourhood_intensity_term . ravel ( ) ) : # apply index dependent offset key += idx_offset ( key ) # add edges and set the weight graph . set_nweight ( key , key + offset , value , value )
Base of the skeleton for voxel based boundary term calculation . This function holds the low level procedures shared by nearly all boundary terms .
399
27
237,196
def __range ( a , bins ) : a = numpy . asarray ( a ) a_max = a . max ( ) a_min = a . min ( ) s = 0.5 * ( a_max - a_min ) / float ( bins - 1 ) return ( a_min - s , a_max + s )
Compute the histogram range of the values in the array a according to scipy . stats . histogram .
73
24
237,197
def centerdistance ( image , voxelspacing = None , mask = slice ( None ) ) : if type ( image ) == tuple or type ( image ) == list : image = image [ 0 ] return _extract_feature ( _extract_centerdistance , image , mask , voxelspacing = voxelspacing )
r Takes a simple or multi - spectral image and returns its voxel - wise center distance in mm . A multi - spectral image must be supplied as a list or tuple of its spectra . Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted . The center distance is the exact euclidean distance in mm of each voxels center to the central point of the overal image volume . Note that this feature is independent of the actual image content but depends solely on its shape . Therefore always a one - dimensional feature is returned even if a multi - spectral image has been supplied .
73
130
237,198
def mask_distance ( image , voxelspacing = None , mask = slice ( None ) ) : if type ( image ) == tuple or type ( image ) == list : image = image [ 0 ] return _extract_mask_distance ( image , mask = mask , voxelspacing = voxelspacing )
r Computes the distance of each point under the mask to the mask border taking the voxel - spacing into account . Note that this feature is independent of the actual image content but depends solely the mask image . Therefore always a one - dimensional feature is returned even if a multi - spectral image has been supplied . If no mask has been supplied the distances to the image borders are returned .
71
78
237,199
def _extract_local_histogram ( image , mask = slice ( None ) , bins = 19 , rang = "image" , cutoffp = ( 0.0 , 100.0 ) , size = None , footprint = None , output = None , mode = "ignore" , origin = 0 ) : if "constant" == mode : raise RuntimeError ( 'boundary mode not supported' ) elif "ignore" == mode : mode = "constant" if 'image' == rang : rang = tuple ( numpy . percentile ( image [ mask ] , cutoffp ) ) elif not 2 == len ( rang ) : raise RuntimeError ( 'the rang must contain exactly two elements or the string "image"' ) _ , bin_edges = numpy . histogram ( [ ] , bins = bins , range = rang ) output = _get_output ( numpy . float if None == output else output , image , shape = [ bins ] + list ( image . shape ) ) # threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive for i in range ( bins - 1 ) : output [ i ] = ( image >= bin_edges [ i ] ) & ( image < bin_edges [ i + 1 ] ) output [ - 1 ] = ( image >= bin_edges [ - 2 ] ) & ( image <= bin_edges [ - 1 ] ) # apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram for i in range ( bins ) : output [ i ] = sum_filter ( output [ i ] , size = size , footprint = footprint , output = None , mode = mode , cval = 0.0 , origin = origin ) divident = numpy . sum ( output , 0 ) divident [ 0 == divident ] = 1 output /= divident # Notes on modes: # mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter # mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense) # mode=X for the histogram equals mode=X for the sum_filter # treat as multi-spectral image which intensities to extracted return _extract_feature ( _extract_intensities , [ h for h in output ] , mask )
Internal single - image version of
537
6