idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
46,700
def prepare_env ( app , env , docname ) : if not hasattr ( env , 'needs_all_needs' ) : env . needs_all_needs = { } if not hasattr ( env , 'needs_functions' ) : env . needs_functions = { } needs_functions = app . needs_functions if needs_functions is None : needs_functions = [ ] if not isinstance ( needs_functions , list ) : raise SphinxError ( 'Config parameter needs_functions must be a list!' ) for need_common_func in needs_common_functions : register_func ( env , need_common_func ) for needs_func in needs_functions : register_func ( env , needs_func ) app . config . needs_hide_options += [ 'hidden' ] app . config . needs_extra_options [ 'hidden' ] = directives . unchanged if not hasattr ( env , 'needs_workflow' ) : env . needs_workflow = { 'backlink_creation' : False , 'dynamic_values_resolved' : False }
Prepares the sphinx environment to store sphinx - needs internal data .
46,701
def make_entity_name ( name ) : invalid_chars = "-=!#$%^&*[](){}/~'`<>:;" for char in invalid_chars : name = name . replace ( char , "_" ) return name
Creates a valid PlantUML entity name from the given value .
46,702
def copy ( app , need , needs , option , need_id = None ) : if need_id is not None : need = needs [ need_id ] return need [ option ]
Copies the value of one need option to another
46,703
def check_linked_values ( app , need , needs , result , search_option , search_value , filter_string = None , one_hit = False ) : links = need [ "links" ] if not isinstance ( search_value , list ) : search_value = [ search_value ] for link in links : if filter_string is not None : try : if not filter_single_need ( needs [ link ] , filter_string ) : continue except Exception as e : logger . warning ( "CheckLinkedValues: Filter {0} not valid: Error: {1}" . format ( filter_string , e ) ) if not one_hit and not needs [ link ] [ search_option ] in search_value : return None elif one_hit and needs [ link ] [ search_option ] in search_value : return result return result
Returns a specific value if for all linked needs a given option has a given value .
46,704
def calc_sum ( app , need , needs , option , filter = None , links_only = False ) : if not links_only : check_needs = needs . values ( ) else : check_needs = [ ] for link in need [ "links" ] : check_needs . append ( needs [ link ] ) calculated_sum = 0 for check_need in check_needs : if filter is not None : try : if not filter_single_need ( check_need , filter ) : continue except ValueError as e : pass except NeedInvalidFilter as ex : logger . warning ( 'Given filter is not valid. Error: {}' . format ( ex ) ) try : calculated_sum += float ( check_need [ option ] ) except ValueError : pass return calculated_sum
Sums the values of a given option in filtered needs up to single number .
46,705
def noaa_prompt_1 ( ) : print ( "Enter the project information below. We'll use this to create the WDS URL" ) print ( "What is the project name?" ) _project = input ( ">" ) print ( "What is the project version?" ) _version = input ( ">" ) return _project , _version
For converting LiPD files to NOAA we need a couple more pieces of information to create the WDS links
46,706
def lpd_to_noaa ( D , wds_url , lpd_url , version , path = "" ) : logger_noaa . info ( "enter process_lpd" ) d = D try : dsn = get_dsn ( D ) dsn = re . sub ( r'[^A-Za-z-.0-9]' , '' , dsn ) version = re . sub ( r'[^A-Za-z-.0-9]' , '' , version ) _convert_obj = LPD_NOAA ( D , dsn , wds_url , lpd_url , version , path ) _convert_obj . main ( ) d = _convert_obj . get_master ( ) noaas = _convert_obj . get_noaa_texts ( ) __write_noaas ( noaas , path ) d = __rm_wdc_url ( d ) except Exception as e : logger_noaa . error ( "lpd_to_noaa: {}" . format ( e ) ) print ( "Error: lpd_to_noaa: {}" . format ( e ) ) return d
Convert a LiPD format to NOAA format
46,707
def __write_noaas ( dat , path ) : for filename , text in dat . items ( ) : try : with open ( os . path . join ( path , filename ) , "w+" ) as f : f . write ( text ) except Exception as e : print ( "write_noaas: There was a problem writing the NOAA text file: {}: {}" . format ( filename , e ) ) return
Use the filename - text data pairs to write the data as NOAA text files
46,708
def _parse_java_version ( line : str ) -> tuple : m = VERSION_RE . search ( line ) version_str = m and m . group ( 0 ) . replace ( '"' , '' ) or '0.0.0' if '_' in version_str : fst , snd = version_str . split ( '_' , maxsplit = 2 ) version = parse_version ( fst ) return ( version [ 1 ] , version [ 2 ] , int ( snd ) ) else : return parse_version ( version_str )
Return the version number found in the first line of java - version
46,709
def find_java_home ( cratedb_version : tuple ) -> str : if MIN_VERSION_FOR_JVM11 <= cratedb_version < ( 4 , 0 ) : return os . environ . get ( 'JAVA_HOME' , '' ) if cratedb_version < MIN_VERSION_FOR_JVM11 : return _find_matching_java_home ( lambda ver : ver [ 0 ] == 8 ) else : return _find_matching_java_home ( lambda ver : ver [ 0 ] >= 11 )
Return a path to a JAVA_HOME suites for the given CrateDB version
46,710
def percentile ( sorted_values , p ) : size = len ( sorted_values ) idx = ( p / 100.0 ) * size - 0.5 if idx < 0 or idx > size : raise ValueError ( 'Too few data points ({}) for {}th percentile' . format ( size , p ) ) return sorted_values [ int ( idx ) ]
Calculate the percentile using the nearest rank method .
46,711
def get_sampler ( sample_mode : str ) : if sample_mode == 'all' : return All mode = sample_mode . split ( ':' ) if mode [ 0 ] == 'reservoir' : if len ( mode ) == 2 : return partial ( UniformReservoir , size = int ( mode [ 1 ] ) ) else : return UniformReservoir raise TypeError ( f'Invalid sample_mode: {sample_mode}' )
Return a sampler constructor
46,712
def _get_sheet_metadata ( workbook , name ) : ct_paleo = 1 ct_chron = 1 metadata_str = "" sheets = [ ] skip_sheets = [ "example" , "sample" , "lists" , "guidelines" ] for sheet in workbook . sheet_names ( ) : old = "" . join ( sheet . lower ( ) . strip ( ) . split ( ) ) if not any ( word in sheet . lower ( ) for word in skip_sheets ) : if 'metadata' in sheet . lower ( ) : metadata_str = sheet elif "about" not in sheet . lower ( ) and "proxy" not in sheet . lower ( ) : logger_excel . info ( "creating sheets metadata" ) m = re . match ( re_sheet , sheet . lower ( ) ) if m : sheets , paleo_ct , chron_ct = _sheet_meta_from_regex ( m , sheets , sheet , name , ct_paleo , ct_chron ) elif old == "data" or "data(qc)" in old or "data(original)" in old : sheets . append ( { "paleo_chron" : "paleo" , "idx_pc" : ct_paleo , "idx_model" : None , "table_type" : "measurement" , "idx_table" : 1 , "old_name" : sheet , "new_name" : sheet , "filename" : "paleo{}measurementTable1.csv" . format ( ct_paleo ) , "table_name" : "paleo{}measurementTable1" . format ( ct_paleo ) , "data" : "" } ) ct_paleo += 1 elif old == "chronology" : sheets . append ( { "paleo_chron" : "chron" , "idx_pc" : ct_chron , "idx_model" : None , "table_type" : "measurement" , "idx_table" : 1 , "old_name" : sheet , "new_name" : sheet , "filename" : "chron{}measurementTable1.csv" . format ( ct_chron ) , "table_name" : "chron{}measurementTable1" . format ( ct_chron ) , "data" : "" } ) ct_chron += 1 else : print ( "This sheet name does not conform to naming standard: {}" . format ( sheet ) ) sheets , paleo_ct , chron_ct = _sheet_meta_from_prompts ( sheets , sheet , name , ct_paleo , ct_chron ) return sheets , ct_paleo , ct_chron , metadata_str
Get worksheet metadata . The sheet names tell us what type of table it is and where in the LiPD structure the data should be placed .
46,713
def idx_num_to_name ( L ) : logger_jsons . info ( "enter idx_num_to_name" ) try : if "paleoData" in L : L [ "paleoData" ] = _import_data ( L [ "paleoData" ] , "paleo" ) if "chronData" in L : L [ "chronData" ] = _import_data ( L [ "chronData" ] , "chron" ) except Exception as e : logger_jsons . error ( "idx_num_to_name: {}" . format ( e ) ) print ( "Error: idx_name_to_num: {}" . format ( e ) ) logger_jsons . info ( "exit idx_num_to_name" ) return L
Switch from index - by - number to index - by - name .
46,714
def _import_data ( sections , crumbs ) : logger_jsons . info ( "enter import_data: {}" . format ( crumbs ) ) _sections = OrderedDict ( ) try : for _idx , section in enumerate ( sections ) : _tmp = OrderedDict ( ) if "measurementTable" in section : _tmp [ "measurementTable" ] = _idx_table_by_name ( section [ "measurementTable" ] , "{}{}{}" . format ( crumbs , _idx , "measurement" ) ) if "model" in section : _tmp [ "model" ] = _import_model ( section [ "model" ] , "{}{}{}" . format ( crumbs , _idx , "model" ) ) _table_name = "{}{}" . format ( crumbs , _idx ) if _table_name in _sections : _table_name = "{}_{}" . format ( _table_name , _idx ) _sections [ _table_name ] = _tmp except Exception as e : logger_jsons . error ( "import_data: Exception: {}" . format ( e ) ) print ( "Error: import_data: {}" . format ( e ) ) logger_jsons . info ( "exit import_data: {}" . format ( crumbs ) ) return _sections
Import the section metadata and change it to index - by - name .
46,715
def _import_model ( models , crumbs ) : logger_jsons . info ( "enter import_model" . format ( crumbs ) ) _models = OrderedDict ( ) try : for _idx , model in enumerate ( models ) : if "summaryTable" in model : model [ "summaryTable" ] = _idx_table_by_name ( model [ "summaryTable" ] , "{}{}{}" . format ( crumbs , _idx , "summary" ) ) if "ensembleTable" in model : model [ "ensembleTable" ] = _idx_table_by_name ( model [ "ensembleTable" ] , "{}{}{}" . format ( crumbs , _idx , "ensemble" ) ) if "distributionTable" in model : model [ "distributionTable" ] = _idx_table_by_name ( model [ "distributionTable" ] , "{}{}{}" . format ( crumbs , _idx , "distribution" ) ) _table_name = "{}{}" . format ( crumbs , _idx ) _models [ _table_name ] = model except Exception as e : logger_jsons . error ( "import_model: {}" . format ( e ) ) print ( "Error: import_model: {}" . format ( e ) ) logger_jsons . info ( "exit import_model: {}" . format ( crumbs ) ) return _models
Change the nested items of the paleoModel data . Overwrite the data in - place .
46,716
def _idx_table_by_name ( tables , crumbs ) : _tables = OrderedDict ( ) try : for _idx , _table in enumerate ( tables ) : _name = "{}{}" . format ( crumbs , _idx ) _tmp = _idx_col_by_name ( _table ) if _name in _tables : _name = "{}_{}" . format ( _name , _idx ) _tmp [ "tableName" ] = _name _tables [ _name ] = _tmp except Exception as e : logger_jsons . error ( "idx_table_by_name: {}" . format ( e ) ) print ( "Error: idx_table_by_name: {}" . format ( e ) ) return _tables
Import summary ensemble or distribution data .
46,717
def _idx_col_by_name ( table ) : _columns = OrderedDict ( ) try : for _column in table [ "columns" ] : try : _name = _column [ "variableName" ] if _name in _columns : _name = get_appended_name ( _name , _columns ) _columns [ _name ] = _column except Exception as e : print ( "Error: idx_col_by_name: inner: {}" . format ( e ) ) logger_jsons . info ( "idx_col_by_name: inner: {}" . format ( e ) ) table [ "columns" ] = _columns except Exception as e : print ( "Error: idx_col_by_name: {}" . format ( e ) ) logger_jsons . error ( "idx_col_by_name: {}" . format ( e ) ) return table
Iter over columns list . Turn indexed - by - num list into an indexed - by - name dict . Keys are the variable names .
46,718
def _export_model ( models ) : logger_jsons . info ( "enter export_model" ) _models = [ ] try : for name , model in models . items ( ) : if "summaryTable" in model : model [ "summaryTable" ] = _idx_table_by_num ( model [ "summaryTable" ] ) if "ensembleTable" in model : model [ "ensembleTable" ] = _idx_table_by_num ( model [ "ensembleTable" ] ) if "distributionTable" in model : model [ "distributionTable" ] = _idx_table_by_num ( model [ "distributionTable" ] ) _models . append ( model ) except Exception as e : logger_jsons . error ( "export_model: {}" . format ( e ) ) print ( "Error: export_model: {}" . format ( e ) ) logger_jsons . info ( "exit export_model" ) return _models
Switch model tables to index - by - number
46,719
def _idx_table_by_num ( tables ) : logger_jsons . info ( "enter idx_table_by_num" ) _tables = [ ] for name , table in tables . items ( ) : try : tmp = _idx_col_by_num ( table ) _tables . append ( tmp ) except Exception as e : logger_jsons . error ( "idx_table_by_num: {}" . format ( e ) ) logger_jsons . info ( "exit idx_table_by_num" ) return _tables
Switch tables to index - by - number
46,720
def _idx_col_by_num ( table ) : _columns = [ ] try : _columns = [ None for i in range ( 0 , len ( table [ "columns" ] ) ) ] for _name , _dat in table [ "columns" ] . items ( ) : try : if isinstance ( _dat [ "number" ] , list ) : _columns . append ( _dat ) else : n = int ( _dat [ "number" ] ) _columns [ n - 1 ] = _dat except KeyError as ke : print ( "Error: idx_col_by_num: {}" . format ( ke ) ) logger_jsons . error ( "idx_col_by_num: KeyError: missing number key: {}, {}" . format ( _name , ke ) ) except Exception as e : print ( "Error: idx_col_by_num: {}" . format ( e ) ) logger_jsons . error ( "idx_col_by_num: Exception: {}" . format ( e ) ) table [ "columns" ] = _columns except Exception as e : logger_jsons . error ( "idx_col_by_num: {}" . format ( e ) ) print ( "Error: idx_col_by_num: {}" . format ( e ) ) return table
Index columns by number instead of by name . Use number key in column to maintain order
46,721
def _parse_tags ( tag_file ) : tag_name = None tag_value = None for num , line in enumerate ( tag_file ) : if num == 0 : if line . startswith ( BOM ) : line = line . lstrip ( BOM ) if len ( line ) == 0 or line . isspace ( ) : continue elif line [ 0 ] . isspace ( ) and tag_value is not None : tag_value += line else : if tag_name : yield ( tag_name , tag_value . strip ( ) ) if ':' not in line : raise BagValidationError ( "invalid line '%s' in %s" % ( line . strip ( ) , os . path . basename ( tag_file . name ) ) ) parts = line . strip ( ) . split ( ':' , 1 ) tag_name = parts [ 0 ] . strip ( ) tag_value = parts [ 1 ] if tag_name : yield ( tag_name , tag_value . strip ( ) )
Parses a tag file according to RFC 2822 . This includes line folding permitting extra - long field values .
46,722
def compare_fetch_with_fs ( self ) : files_on_fs = set ( self . payload_files ( ) ) files_in_fetch = set ( self . files_to_be_fetched ( ) ) return list ( files_in_fetch - files_on_fs )
Compares the fetch entries with the files actually in the payload and returns a list of all the files that still need to be fetched .
46,723
def _validate_bagittxt ( self ) : bagit_file_path = os . path . join ( self . path , "bagit.txt" ) with open ( bagit_file_path , 'r' ) as bagit_file : first_line = bagit_file . readline ( ) if first_line . startswith ( BOM ) : raise BagValidationError ( "bagit.txt must not contain a byte-order mark" )
Verify that bagit . txt conforms to specification
46,724
def resolve_dynamic_values ( env ) : if env . needs_workflow [ 'dynamic_values_resolved' ] : return needs = env . needs_all_needs for key , need in needs . items ( ) : for need_option in need : if need_option in [ 'docname' , 'lineno' , 'target_node' , 'content' ] : continue if not isinstance ( need [ need_option ] , ( list , set ) ) : func_call = True while func_call : try : func_call , func_return = _detect_and_execute ( need [ need_option ] , need , env ) except FunctionParsingException : raise SphinxError ( "Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list" . format ( option = need_option , file = need [ 'docname' ] , line = need [ 'lineno' ] ) ) if func_call is None : continue if func_return is None : need [ need_option ] = need [ need_option ] . replace ( '[[{}]]' . format ( func_call ) , '' ) else : need [ need_option ] = need [ need_option ] . replace ( '[[{}]]' . format ( func_call ) , str ( func_return ) ) if need [ need_option ] == '' : need [ need_option ] = None else : new_values = [ ] for element in need [ need_option ] : try : func_call , func_return = _detect_and_execute ( element , need , env ) except FunctionParsingException : raise SphinxError ( "Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list" . format ( option = need_option , file = need [ 'docname' ] , line = need [ 'lineno' ] ) ) if func_call is None : new_values . append ( element ) else : if isinstance ( need [ need_option ] , ( str , int , float ) ) : new_values . append ( element . replace ( '[[{}]]' . format ( func_call ) , str ( func_return ) ) ) else : if isinstance ( need [ need_option ] , ( list , set ) ) : new_values += func_return need [ need_option ] = new_values env . needs_workflow [ 'dynamic_values_resolved' ] = True
Resolve dynamic values inside need data .
46,725
def run ( ) : global cwd , files , logger_start , logger_benchmark , settings , _timeseries_data _timeseries_data = { } settings = { "note_update" : True , "note_validate" : True , "verbose" : True } cwd = os . getcwd ( ) logger_start = create_logger ( "start" ) logger_benchmark = create_benchmark ( "benchmarks" , "benchmark.log" ) files = { ".txt" : [ ] , ".lpd" : [ ] , ".xls" : [ ] } return
Initialize and start objects . This is called automatically when importing the package .
46,726
def excel ( ) : global files , cwd , settings _d = { } settings [ "verbose" ] = False print ( "Found " + str ( len ( files [ ".xls" ] ) ) + " Excel files" ) logger_start . info ( "found excel files: {}" . format ( len ( files [ ".xls" ] ) ) ) start = clock ( ) for file in files [ ".xls" ] : dsn = excel_main ( file ) try : _d [ dsn ] = readLipd ( os . path . join ( file [ "dir" ] , dsn + ".lpd" ) ) writeLipd ( _d [ dsn ] , cwd ) except Exception as e : logger_start . error ( "excel: Unable to read new LiPD file, {}" . format ( e ) ) print ( "Error: Unable to read new LiPD file: {}, {}" . format ( dsn , e ) ) end = clock ( ) logger_benchmark . info ( log_benchmark ( "excel" , start , end ) ) settings [ "verbose" ] = True return _d
Convert Excel files to LiPD files . LiPD data is returned directly from this function .
46,727
def noaa ( D = "" , path = "" , wds_url = "" , lpd_url = "" , version = "" ) : global files , cwd _mode = noaa_prompt ( ) start = clock ( ) if _mode == "1" : if not version or not lpd_url : print ( "Missing parameters: Please try again and provide all parameters." ) return if not D : print ( "Error: LiPD data must be provided for LiPD -> NOAA conversions" ) else : if "paleoData" in D : _d = copy . deepcopy ( D ) D = lpd_to_noaa ( _d , wds_url , lpd_url , version , path ) else : for dsn , dat in D . items ( ) : _d = copy . deepcopy ( dat ) _d = lpd_to_noaa ( _d , wds_url , lpd_url , version , path ) D [ dsn ] = _d if not wds_url : D = rm_wds_url ( D ) if ( path ) : writeLipd ( D , path ) else : print ( "Path not provided. Writing to CWD..." ) writeLipd ( D , cwd ) elif _mode == "2" : noaa_to_lpd ( files ) else : print ( "Invalid input. Try again." ) end = clock ( ) logger_benchmark . info ( log_benchmark ( "noaa" , start , end ) ) return
Convert between NOAA and LiPD files
46,728
def collapseTs ( ts = None ) : global _timeseries_data _d = { } if not ts : print ( "Error: Time series data not provided. Pass time series into the function." ) else : try : _raw = _timeseries_data [ ts [ 0 ] [ "time_id" ] ] print ( mode_ts ( "collapse" , mode = "" , ts = ts ) ) _d = collapse ( ts , _raw ) _d = rm_empty_fields ( _d ) except Exception as e : print ( "Error: Unable to collapse the time series: {}" . format ( e ) ) logger_start . error ( "collapseTs: unable to collapse the time series: {}" . format ( e ) ) return _d
Collapse a time series back into LiPD record form .
46,729
def filterTs ( ts , expressions ) : new_ts = ts [ : ] if isinstance ( expressions , str ) : expr_lst = translate_expression ( expressions ) if expr_lst : new_ts , _idx = get_matches ( expr_lst , new_ts ) elif isinstance ( expressions , list ) : for expr in expressions : expr_lst = translate_expression ( expr ) if expr_lst : new_ts , _idx = get_matches ( expr_lst , new_ts ) return new_ts
Create a new time series that only contains entries that match the given expression .
46,730
def queryTs ( ts , expression ) : _idx = [ ] if isinstance ( expressions , str ) : expr_lst = translate_expression ( expressions ) if expr_lst : new_ts , _idx = get_matches ( expr_lst , new_ts ) elif isinstance ( expressions , list ) : for expr in expressions : expr_lst = translate_expression ( expr ) if expr_lst : new_ts , _idx = get_matches ( expr_lst , new_ts ) return _idx
Find the indices of the time series entries that match the given expression .
46,731
def viewTs ( ts ) : _ts = ts if isinstance ( ts , list ) : _ts = ts [ 0 ] print ( "It looks like you input a full time series. It's best to view one entry at a time.\n" "I'll show you the first entry..." ) _tmp_sort = OrderedDict ( ) _tmp_sort [ "ROOT" ] = { } _tmp_sort [ "PUBLICATION" ] = { } _tmp_sort [ "GEO" ] = { } _tmp_sort [ "OTHERS" ] = { } _tmp_sort [ "DATA" ] = { } for k , v in _ts . items ( ) : if not any ( i == k for i in [ "paleoData" , "chronData" , "mode" , "@context" ] ) : if k in [ "archiveType" , "dataSetName" , "googleSpreadSheetKey" , "metadataMD5" , "tagMD5" , "googleMetadataWorksheet" , "lipdVersion" ] : _tmp_sort [ "ROOT" ] [ k ] = v elif "pub" in k : _tmp_sort [ "PUBLICATION" ] [ k ] = v elif "geo" in k : _tmp_sort [ "GEO" ] [ k ] = v elif "paleoData_" in k or "chronData_" in k : if isinstance ( v , list ) and len ( v ) > 2 : _tmp_sort [ "DATA" ] [ k ] = "[{}, {}, {}, ...]" . format ( v [ 0 ] , v [ 1 ] , v [ 2 ] ) else : _tmp_sort [ "DATA" ] [ k ] = v else : if isinstance ( v , list ) and len ( v ) > 2 : _tmp_sort [ "OTHERS" ] [ k ] = "[{}, {}, {}, ...]" . format ( v [ 0 ] , v [ 1 ] , v [ 2 ] ) else : _tmp_sort [ "OTHERS" ] [ k ] = v for k1 , v1 in _tmp_sort . items ( ) : print ( "\n{}\n===============" . format ( k1 ) ) for k2 , v2 in v1 . items ( ) : print ( "{} : {}" . format ( k2 , v2 ) ) return
View the contents of one time series entry in a nicely formatted way
46,732
def showLipds ( D = None ) : if not D : print ( "Error: LiPD data not provided. Pass LiPD data into the function." ) else : print ( json . dumps ( D . keys ( ) , indent = 2 ) ) return
Display the dataset names of a given LiPD data
46,733
def showMetadata ( dat ) : _tmp = rm_values_fields ( copy . deepcopy ( dat ) ) print ( json . dumps ( _tmp , indent = 2 ) ) return
Display the metadata specified LiPD in pretty print
46,734
def showDfs ( d ) : if "metadata" in d : print ( "metadata" ) if "paleoData" in d : try : for k , v in d [ "paleoData" ] . items ( ) : print ( k ) except KeyError : pass except AttributeError : pass if "chronData" in d : try : for k , v in d [ "chronData" ] . items ( ) : print ( k ) except KeyError : pass except AttributeError : pass return
Display the available data frame names in a given data frame collection
46,735
def getLipdNames ( D = None ) : _names = [ ] try : if not D : print ( "Error: LiPD data not provided. Pass LiPD data into the function." ) else : _names = D . keys ( ) except Exception : pass return _names
Get a list of all LiPD names in the library
46,736
def getMetadata ( L ) : _l = { } try : _l = copy . deepcopy ( L ) _l = rm_values_fields ( _l ) except Exception as e : print ( "Error: Unable to get data. Please check that input is LiPD data: {}" . format ( e ) ) return _l
Get metadata from a LiPD data in memory
46,737
def getCsv ( L = None ) : _c = { } try : if not L : print ( "Error: LiPD data not provided. Pass LiPD data into the function." ) else : _j , _c = get_csv_from_metadata ( L [ "dataSetName" ] , L ) except KeyError as ke : print ( "Error: Unable to get data. Please check that input is one LiPD dataset: {}" . format ( ke ) ) except Exception as e : print ( "Error: Unable to get data. Something went wrong: {}" . format ( e ) ) logger_start . warn ( "getCsv: Exception: Unable to process lipd data: {}" . format ( e ) ) return _c
Get CSV from LiPD metadata
46,738
def __universal_read ( file_path , file_type ) : global files , cwd , settings correct_ext = load_fn_matches_ext ( file_path , file_type ) valid_path = path_type ( file_path , "file" ) if valid_path and correct_ext : file_meta = collect_metadata_file ( file_path ) if file_type == ".lpd" : files [ ".lpd" ] . append ( file_meta ) elif file_type in [ ".xls" , ".xlsx" ] : print ( "reading: {}" . format ( print_filename ( file_meta [ "full_path" ] ) ) ) files [ ".xls" ] . append ( file_meta ) elif file_type == ".txt" : print ( "reading: {}" . format ( print_filename ( file_meta [ "full_path" ] ) ) ) files [ ".txt" ] . append ( file_meta ) cwd = file_meta [ "dir" ] if cwd : os . chdir ( cwd ) return
Use a file path to create file metadata and load a file in the appropriate way according to the provided file type .
46,739
def __read_lipd_contents ( ) : global files , settings _d = { } try : if len ( files [ ".lpd" ] ) == 1 : _d = lipd_read ( files [ ".lpd" ] [ 0 ] [ "full_path" ] ) if settings [ "verbose" ] : print ( "Finished read: 1 record" ) else : for file in files [ ".lpd" ] : _d [ file [ "filename_no_ext" ] ] = lipd_read ( file [ "full_path" ] ) if settings [ "verbose" ] : print ( "Finished read: {} records" . format ( len ( _d ) ) ) except Exception as e : print ( "Error: read_lipd_contents: {}" . format ( e ) ) return _d
Use the file metadata to read in the LiPD file contents as a dataset library
46,740
def __read_file ( usr_path , file_type ) : global files if not usr_path : src_dir , src_files = get_src_or_dst ( "read" , "file" ) if src_files : for file_path in src_files : __universal_read ( file_path , file_type ) else : print ( "No file(s) chosen" ) else : __universal_read ( usr_path , file_type ) return
Universal read file . Given a path and a type it will do the appropriate read actions
46,741
def __read_directory ( usr_path , file_type ) : if not usr_path : usr_path , src_files = get_src_or_dst ( "read" , "directory" ) valid_path = path_type ( usr_path , "directory" ) if valid_path : files_found = [ ] if file_type == ".xls" : files_found += list_files ( ".xlsx" , usr_path ) files_found += list_files ( file_type , usr_path ) print ( "Found: {} {} file(s)" . format ( len ( files_found ) , FILE_TYPE_MAP [ file_type ] [ "file_type" ] ) ) for file_path in files_found : __read_file ( file_path , file_type ) else : print ( "Directory path is not valid: {}" . format ( usr_path ) ) return
Universal read directory . Given a path and a type it will do the appropriate read actions
46,742
def __write_lipd ( dat , usr_path ) : global settings if not usr_path : usr_path , _ignore = get_src_or_dst ( "write" , "directory" ) valid_path = path_type ( usr_path , "directory" ) if valid_path : if "paleoData" in dat : try : if settings [ "verbose" ] : print ( "writing: {}" . format ( dat [ "dataSetName" ] ) ) lipd_write ( dat , usr_path ) except KeyError as ke : print ( "Error: Unable to write file: unknown, {}" . format ( ke ) ) except Exception as e : print ( "Error: Unable to write file: {}, {}" . format ( dat [ "dataSetName" ] , e ) ) else : if dat : for name , lipd_dat in dat . items ( ) : try : if settings [ "verbose" ] : print ( "writing: {}" . format ( name ) ) lipd_write ( lipd_dat , usr_path ) except Exception as e : print ( "Error: Unable to write file: {}, {}" . format ( name , e ) ) return
Write LiPD data to file provided an output directory and dataset name .
46,743
def __disclaimer ( opt = "" ) : global settings if opt is "update" : print ( "Disclaimer: LiPD files may be updated and modified to adhere to standards\n" ) settings [ "note_update" ] = False if opt is "validate" : print ( "Note: Use lipd.validate() or www.LiPD.net/create " "to ensure that your new LiPD file(s) are valid" ) settings [ "note_validate" ] = False return
Print the disclaimers once . If they ve already been shown skip over .
46,744
def get_shreds ( self , feature_extractors , sheet_name ) : if self . _shreds is None : shreds = [ ] _ , contours , _ = cv2 . findContours ( self . _foreground_mask , cv2 . RETR_EXTERNAL , cv2 . CHAIN_APPROX_SIMPLE ) for i , contour in enumerate ( contours ) : shred = self . _make_shred ( contour , i , feature_extractors , sheet_name ) if shred is not None : shreds . append ( shred ) self . _shreds = shreds return self . _shreds
Detects shreds in the current sheet and constructs Shred instances .
46,745
def run_track ( track , result_hosts = None , crate_root = None , output_fmt = None , logfile_info = None , logfile_result = None , failfast = False , sample_mode = 'reservoir' ) : with Logger ( output_fmt = output_fmt , logfile_info = logfile_info , logfile_result = logfile_result ) as log : executor = Executor ( track_dir = os . path . dirname ( track ) , log = log , result_hosts = result_hosts , crate_root = crate_root , fail_fast = failfast , sample_mode = sample_mode ) error = executor . execute ( toml . load ( track ) ) if error : sys . exit ( 1 )
Execute a track file
46,746
def update_lipd_version ( L ) : L , version = get_lipd_version ( L ) if version in [ 1.0 , "1.0" ] : L = update_lipd_v1_1 ( L ) version = 1.1 if version in [ 1.1 , "1.1" ] : L = update_lipd_v1_2 ( L ) version = 1.2 if version in [ 1.2 , "1.2" ] : L = update_lipd_v1_3 ( L ) version = 1.3 L = fix_doi ( L ) L [ "lipdVersion" ] = 1.3 return L
Metadata is indexed by number at this step .
46,747
def update_lipd_v1_1 ( d ) : logger_versions . info ( "enter update_lipd_v1_1" ) tmp_all = [ ] try : if "chronData" in d : for table in d [ "chronData" ] : if "chronMeasurementTable" not in table : tmp_all . append ( { "chronMeasurementTable" : [ table ] } ) elif "chronMeasurementTable" in table : if isinstance ( table [ "chronMeasurementTable" ] , dict ) : tmp_all . append ( { "chronMeasurementTable" : [ table [ "chronMeasurementTable" ] ] } ) if tmp_all : d [ "chronData" ] = tmp_all d [ "lipdVersion" ] = 1.1 except Exception as e : logger_versions . error ( "update_lipd_v1_1: Exception: {}" . format ( e ) ) logger_versions . info ( "exit update_lipd_v1_1" ) return d
Update LiPD v1 . 0 to v1 . 1 - chronData entry is a list that allows multiple tables - paleoData entry is a list that allows multiple tables - chronData now allows measurement model summary modelTable ensemble calibratedAges tables - Added lipdVersion key
46,748
def merge_csv_metadata ( d , csvs ) : logger_csvs . info ( "enter merge_csv_metadata" ) if "paleoData" in d : d [ "paleoData" ] = _merge_csv_section ( d [ "paleoData" ] , "paleo" , csvs ) if "chronData" in d : d [ "chronData" ] = _merge_csv_section ( d [ "chronData" ] , "chron" , csvs ) logger_csvs . info ( "exit merge_csv_metadata" ) return d
Using the given metadata dictionary retrieve CSV data from CSV files and insert the CSV values into their respective metadata columns . Checks for both paleoData and chronData tables .
46,749
def _merge_csv_section ( sections , pc , csvs ) : logger_csvs . info ( "enter merge_csv_section" ) try : for _name , _section in sections . items ( ) : if "measurementTable" in _section : sections [ _name ] [ "measurementTable" ] = _merge_csv_table ( _section [ "measurementTable" ] , pc , csvs ) if "model" in _section : sections [ _name ] [ "model" ] = _merge_csv_model ( _section [ "model" ] , pc , csvs ) except Exception as e : print ( "Error: There was an error merging CSV data into the metadata " ) logger_csvs . error ( "merge_csv_section: {}" . format ( e ) ) logger_csvs . info ( "exit merge_csv_section" ) return sections
Add csv data to all paleo data tables
46,750
def _merge_csv_model ( models , pc , csvs ) : logger_csvs . info ( "enter merge_csv_model" ) try : for _name , _model in models . items ( ) : if "summaryTable" in _model : models [ _name ] [ "summaryTable" ] = _merge_csv_table ( _model [ "summaryTable" ] , pc , csvs ) if "ensembleTable" in _model : models [ _name ] [ "ensembleTable" ] = _merge_csv_table ( _model [ "ensembleTable" ] , pc , csvs ) if "distributionTable" in _model : models [ _name ] [ "distributionTable" ] = _merge_csv_table ( _model [ "distributionTable" ] , pc , csvs ) except Exception as e : logger_csvs . error ( "merge_csv_model: {}" , format ( e ) ) logger_csvs . info ( "exit merge_csv_model" ) return models
Add csv data to each column in chron model
46,751
def _merge_csv_column ( table , csvs ) : try : ensemble = is_ensemble ( table [ "columns" ] ) if ensemble : if len ( table [ "columns" ] ) == 1 : for _name , _column in table [ "columns" ] . items ( ) : _column [ "values" ] = csvs elif len ( table [ "columns" ] ) == 2 : _multi_column = False for _name , _column in table [ "columns" ] . items ( ) : if isinstance ( _column [ "number" ] , ( int , float ) ) : col_num = cast_int ( _column [ "number" ] ) _column [ 'values' ] = csvs [ col_num - 1 ] elif isinstance ( _column [ "number" ] , list ) : if _multi_column : raise Exception ( "Error: merge_csv_column: This jsonld metadata looks wrong!\n" "\tAn ensemble table depth should not reference multiple columns of CSV data.\n" "\tPlease manually fix the ensemble columns in 'metadata.jsonld' inside of your LiPD file." ) else : _multi_column = True _column [ "values" ] = csvs [ 2 : ] else : for _name , _column in table [ 'columns' ] . items ( ) : col_num = cast_int ( _column [ "number" ] ) _column [ 'values' ] = csvs [ col_num - 1 ] except IndexError : logger_csvs . warning ( "merge_csv_column: IndexError: index out of range of csv_data list" ) except KeyError : logger_csvs . error ( "merge_csv_column: KeyError: missing columns key" ) except Exception as e : logger_csvs . error ( "merge_csv_column: Unknown Error: {}" . format ( e ) ) print ( "Quitting..." ) exit ( 1 ) return table , ensemble
Add csv data to each column in a list of columns
46,752
def read_csv_from_file ( filename ) : logger_csvs . info ( "enter read_csv_from_file" ) d = { } l = [ ] try : logger_csvs . info ( "open file: {}" . format ( filename ) ) with open ( filename , 'r' ) as f : r = csv . reader ( f , delimiter = ',' ) for idx , col in enumerate ( next ( r ) ) : d [ idx ] = [ ] d = cast_values_csvs ( d , idx , col ) for row in r : for idx , col in enumerate ( row ) : d = cast_values_csvs ( d , idx , col ) for idx , col in d . items ( ) : l . append ( col ) except FileNotFoundError as e : print ( 'CSV FileNotFound: ' + filename ) logger_csvs . warn ( "read_csv_to_columns: FileNotFound: {}, {}" . format ( filename , e ) ) logger_csvs . info ( "exit read_csv_from_file" ) return l
Opens the target CSV file and creates a dictionary with one list for each CSV column .
46,753
def write_csv_to_file ( d ) : logger_csvs . info ( "enter write_csv_to_file" ) try : for filename , data in d . items ( ) : try : l_columns = _reorder_csv ( data , filename ) rows = zip ( * l_columns ) with open ( filename , 'w+' ) as f : w = csv . writer ( f ) for row in rows : row2 = decimal_precision ( row ) w . writerow ( row2 ) except TypeError as e : print ( "Error: Unable to write values to CSV file, {}:\n" "(1) The data table may have 2 or more identical variables. Please correct the LiPD file manually\n" "(2) There may have been an error trying to prep the values for file write. The 'number' field in the data columns may be a 'string' instead of an 'integer' data type" . format ( filename ) ) print ( e ) except Exception as e : print ( "Error: CSV file not written, {}, {}:\n" "The data table may have 2 or more identical variables. Please correct the LiPD file manually" . format ( filename , e ) ) except AttributeError as e : logger_csvs . error ( "write_csv_to_file: Unable to write CSV File: {}" . format ( e , exc_info = True ) ) logger_csvs . info ( "exit write_csv_to_file" ) return
Writes columns of data to a target CSV file .
46,754
def get_csv_from_metadata ( dsn , d ) : logger_csvs . info ( "enter get_csv_from_metadata" ) _csvs = OrderedDict ( ) _d = copy . deepcopy ( d ) try : if "paleoData" in _d : _d [ "paleoData" ] , _csvs = _get_csv_from_section ( _d [ "paleoData" ] , "{}.paleo" . format ( dsn ) , _csvs ) if "chronData" in _d : _d [ "chronData" ] , _csvs = _get_csv_from_section ( _d [ "chronData" ] , "{}.chron" . format ( dsn ) , _csvs ) except Exception as e : print ( "Error: get_csv_from_metadata: {}, {}" . format ( dsn , e ) ) logger_csvs . error ( "get_csv_from_metadata: {}, {}" . format ( dsn , e ) ) logger_csvs . info ( "exit get_csv_from_metadata" ) return _d , _csvs
Two goals . Get all csv from metadata and return new metadata with generated filenames to match files .
46,755
def _get_csv_from_section ( sections , crumbs , csvs ) : logger_csvs . info ( "enter get_csv_from_section: {}" . format ( crumbs ) ) _idx = 0 try : for _name , _section in sections . items ( ) : if "measurementTable" in _section : sections [ _name ] [ "measurementTable" ] , csvs = _get_csv_from_table ( _section [ "measurementTable" ] , "{}{}{}" . format ( crumbs , _idx , "measurement" ) , csvs ) if "model" in _section : sections [ _name ] [ "model" ] , csvs = _get_csv_from_model ( _section [ "model" ] , "{}{}{}" . format ( crumbs , _idx , "model" ) , csvs ) _idx += 1 except Exception as e : logger_csvs . error ( "get_csv_from_section: {}, {}" . format ( crumbs , e ) ) print ( "Error: get_csv_from_section: {}, {}" . format ( crumbs , e ) ) logger_csvs . info ( "exit get_csv_from_section: {}" . format ( crumbs ) ) return sections , csvs
Get table name variable name and column values from paleo metadata
46,756
def _get_csv_from_model ( models , crumbs , csvs ) : logger_csvs . info ( "enter get_csv_from_model: {}" . format ( crumbs ) ) _idx = 0 try : for _name , _model in models . items ( ) : if "distributionTable" in _model : models [ _name ] [ "distributionTable" ] , csvs = _get_csv_from_table ( _model [ "distributionTable" ] , "{}{}{}" . format ( crumbs , _idx , "distribution" ) , csvs ) if "summaryTable" in _model : models [ _name ] [ "summaryTable" ] , csvs = _get_csv_from_table ( _model [ "summaryTable" ] , "{}{}{}" . format ( crumbs , _idx , "summary" ) , csvs ) if "ensembleTable" in _model : models [ _name ] [ "ensembleTable" ] , csvs = _get_csv_from_table ( _model [ "ensembleTable" ] , "{}{}{}" . format ( crumbs , _idx , "ensemble" ) , csvs ) _idx += 1 except Exception as e : print ( "Error: get_csv_from_model: {}, {}" . format ( crumbs , e ) ) logger_csvs . error ( "Error: get_csv_from_model: {}, {}" . format ( crumbs , e ) ) return models , csvs
Get csv from model data
46,757
def get_validator_format ( L ) : _api_data = [ ] _j , _csvs = get_csv_from_metadata ( L [ "dataSetName" ] , L ) _j = rm_values_fields ( copy . deepcopy ( L ) ) _j = idx_name_to_num ( _j ) _filenames = [ "metadata.jsonld" , "bagit.txt" , "bag-info.txt" , "manifest-md5.txt" , "tagmanifest-md5.txt" ] + [ k for k , v in _csvs . items ( ) ] for filename in _filenames : _file = { "type" : "" , "filenameFull" : "" , "filenameShort" : "" , "data" : "" , "pretty" : "" } _short = filename if filename . endswith ( ".txt" ) : _file = { "type" : "bagit" , "filenameFull" : filename , "filenameShort" : _short } elif filename . endswith ( ".jsonld" ) : _file = { "type" : "json" , "filenameFull" : filename , "filenameShort" : _short , "data" : _j } elif filename . endswith ( ".csv" ) : _cols_rows = { "cols" : 0 , "rows" : 0 } ensemble = is_ensemble ( _csvs [ _short ] ) if ensemble : _cols_rows = get_ensemble_counts ( _csvs [ _short ] ) else : _cols_rows [ "cols" ] = len ( _csvs [ _short ] ) for k , v in _csvs [ _short ] . items ( ) : _cols_rows [ "rows" ] = len ( v [ "values" ] ) break _file = { "type" : "csv" , "filenameFull" : filename , "filenameShort" : _short , "data" : _cols_rows } _api_data . append ( _file ) return _api_data
Format the LIPD data in the layout that the Lipd . net validator accepts .
46,758
def create_detailed_results ( result ) : string = "" string += "STATUS: {}\n" . format ( result [ "status" ] ) if result [ "feedback" ] : string += "WARNINGS: {}\n" . format ( len ( result [ "feedback" ] [ "wrnMsgs" ] ) ) for msg in result [ "feedback" ] [ "wrnMsgs" ] : string += "- {}\n" . format ( msg ) string += "ERRORS: {}\n" . format ( len ( result [ "feedback" ] [ "errMsgs" ] ) ) for msg in result [ "feedback" ] [ "errMsgs" ] : string += "- {}\n" . format ( msg ) return string
Use the result from the API call to create an organized single string output for printing to the console .
46,759
def display_results ( results , detailed = False ) : if not detailed : print ( 'FILENAME......................................... STATUS..........' ) for entry in results : try : if detailed : print ( "\n{}" . format ( entry [ "filename" ] ) ) print ( create_detailed_results ( entry ) ) else : print ( "{:<50}{}" . format ( entry [ "filename" ] , entry [ "status" ] ) ) except Exception as e : logger_validator_api . debug ( "display_results: Exception: {}" . format ( e ) ) print ( "Error: display_results: {}" . format ( e ) ) return
Display the results from the validator in a brief or detailed output
46,760
def call_validator_api ( dsn , api_data ) : _filename = dsn + ".lpd" try : api_data = json . dumps ( api_data ) payload = { 'json_payload' : api_data , 'apikey' : 'lipd_linked' } response = requests . post ( 'http://www.lipd.net/api/validator' , data = payload ) if response . status_code == 413 : result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : "HTTP 413: Request Entity Too Large" } elif response . status_code == 404 : result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : "HTTP 404: Not Found" } elif response . status_code == 400 : result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : response . text } else : result = json . loads ( response . text ) except TypeError as e : logger_validator_api . warning ( "get_validator_results: TypeError: {}" . format ( e ) ) result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : "JSON DECODE ERROR" } except requests . exceptions . ConnectionError as e : logger_validator_api . warning ( "get_validator_results: ConnectionError: {}" . format ( e ) ) result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : "UNABLE TO REACH SERVER" } except Exception as e : logger_validator_api . debug ( "get_validator_results: Exception: {}" . format ( e ) ) result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : "ERROR BEFORE VALIDATION, {}" . format ( e ) } if not result : result = { "dat" : { } , "feedback" : { } , "filename" : _filename , "status" : "EMPTY RESPONSE" } result [ "filename" ] = _filename return result
Single call to the lipd . net validator API
46,761
def procces_filters ( all_needs , current_needlist ) : if current_needlist [ "sort_by" ] is not None : if current_needlist [ "sort_by" ] == "id" : all_needs = sorted ( all_needs , key = lambda node : node [ "id" ] ) elif current_needlist [ "sort_by" ] == "status" : all_needs = sorted ( all_needs , key = status_sorter ) found_needs_by_options = [ ] all_needs_incl_parts = prepare_need_list ( all_needs ) for need_info in all_needs_incl_parts : status_filter_passed = False if current_needlist [ "status" ] is None or len ( current_needlist [ "status" ] ) == 0 : status_filter_passed = True elif need_info [ "status" ] is not None and need_info [ "status" ] in current_needlist [ "status" ] : status_filter_passed = True tags_filter_passed = False if len ( set ( need_info [ "tags" ] ) & set ( current_needlist [ "tags" ] ) ) > 0 or len ( current_needlist [ "tags" ] ) == 0 : tags_filter_passed = True type_filter_passed = False if need_info [ "type" ] in current_needlist [ "types" ] or need_info [ "type_name" ] in current_needlist [ "types" ] or len ( current_needlist [ "types" ] ) == 0 : type_filter_passed = True if status_filter_passed and tags_filter_passed and type_filter_passed : found_needs_by_options . append ( need_info ) found_needs_by_string = filter_needs ( all_needs_incl_parts , current_needlist [ "filter" ] ) found_needs = check_need_list ( found_needs_by_options , found_needs_by_string ) return found_needs
Filters all needs with given configuration
46,762
def filter_needs ( needs , filter_string = "" , filter_parts = True , merge_part_with_parent = True ) : if filter_string is None or filter_string == "" : return needs found_needs = [ ] for filter_need in needs : try : if filter_single_need ( filter_need , filter_string ) : found_needs . append ( filter_need ) except Exception as e : logger . warning ( "Filter {0} not valid: Error: {1}" . format ( filter_string , e ) ) return found_needs
Filters given needs based on a given filter string . Returns all needs which pass the given filter .
46,763
def get_logging_level ( debug ) : level = logging . INFO if debug : level = logging . DEBUG return level
Returns logging level based on boolean
46,764
def run_spec ( spec , benchmark_hosts , result_hosts = None , output_fmt = None , logfile_info = None , logfile_result = None , action = None , fail_if = None , sample_mode = 'reservoir' ) : with Logger ( output_fmt = output_fmt , logfile_info = logfile_info , logfile_result = logfile_result ) as log : do_run_spec ( spec = spec , benchmark_hosts = benchmark_hosts , log = log , result_hosts = result_hosts , action = action , fail_if = fail_if , sample_mode = sample_mode )
Run a spec file executing the statements on the benchmark_hosts .
46,765
def get_sections ( need_info ) : sections = [ ] current_node = need_info [ 'target_node' ] while current_node : if isinstance ( current_node , nodes . section ) : title = current_node . children [ 0 ] . astext ( ) title = NON_BREAKING_SPACE . sub ( ' ' , title ) sections . append ( title ) current_node = getattr ( current_node , 'parent' , None ) return sections
Gets the hierarchy of the section nodes as a list starting at the section of the current need and then its parent sections
46,766
def add_sections ( app , doctree , fromdocname ) : needs = getattr ( app . builder . env , 'needs_all_needs' , { } ) for key , need_info in needs . items ( ) : sections = get_sections ( need_info ) need_info [ 'sections' ] = sections need_info [ 'section_name' ] = sections [ 0 ] if sections else ""
Add section titles to the needs as additional attributes that can be used in tables and filters
46,767
def create_back_links ( env ) : if env . needs_workflow [ 'backlink_creation' ] : return needs = env . needs_all_needs for key , need in needs . items ( ) : for link in need [ "links" ] : link_main = link . split ( '.' ) [ 0 ] try : link_part = link . split ( '.' ) [ 1 ] except IndexError : link_part = None if link_main in needs : if key not in needs [ link_main ] [ "links_back" ] : needs [ link_main ] [ "links_back" ] . append ( key ) if link_part is not None : if link_part in needs [ link_main ] [ 'parts' ] : if 'links_back' not in needs [ link_main ] [ 'parts' ] [ link_part ] . keys ( ) : needs [ link_main ] [ 'parts' ] [ link_part ] [ 'links_back' ] = [ ] needs [ link_main ] [ 'parts' ] [ link_part ] [ 'links_back' ] . append ( key ) env . needs_workflow [ 'backlink_creation' ] = True
Create back - links in all found needs . But do this only once as all needs are already collected and this sorting is for all needs and not only for the ones of the current document .
46,768
def _fix_list_dyn_func ( list ) : open_func_string = False new_list = [ ] for element in list : if '[[' in element : open_func_string = True new_link = [ element ] elif ']]' in element : new_link . append ( element ) open_func_string = False element = "," . join ( new_link ) new_list . append ( element ) elif open_func_string : new_link . append ( element ) else : new_list . append ( element ) return new_list
This searches a list for dynamic function fragments which may have been cut by generic searches for | ; .
46,769
def merge_extra_options ( self , needs_info ) : extra_keys = set ( self . options . keys ( ) ) . difference ( set ( needs_info . keys ( ) ) ) for key in extra_keys : needs_info [ key ] = self . options [ key ] for key in self . option_spec : if key not in needs_info . keys ( ) : needs_info [ key ] = "" return extra_keys
Add any extra options introduced via options_ext to needs_info
46,770
def merge_global_options ( self , needs_info ) : global_options = getattr ( self . env . app . config , 'needs_global_options' , None ) if global_options is None : return for key , value in global_options . items ( ) : if key in needs_info . keys ( ) : continue needs_info [ key ] = value
Add all global defined options to needs_info
46,771
def process_needlist ( app , doctree , fromdocname ) : env = app . builder . env for node in doctree . traverse ( Needlist ) : if not app . config . needs_include_needs : for att in ( 'ids' , 'names' , 'classes' , 'dupnames' ) : node [ att ] = [ ] node . replace_self ( [ ] ) continue id = node . attributes [ "ids" ] [ 0 ] current_needfilter = env . need_all_needlists [ id ] all_needs = env . needs_all_needs content = [ ] all_needs = list ( all_needs . values ( ) ) if current_needfilter [ "sort_by" ] is not None : if current_needfilter [ "sort_by" ] == "id" : all_needs = sorted ( all_needs , key = lambda node : node [ "id" ] ) elif current_needfilter [ "sort_by" ] == "status" : all_needs = sorted ( all_needs , key = status_sorter ) found_needs = procces_filters ( all_needs , current_needfilter ) line_block = nodes . line_block ( ) for need_info in found_needs : para = nodes . line ( ) description = "%s: %s" % ( need_info [ "id" ] , need_info [ "title" ] ) if current_needfilter [ "show_status" ] and need_info [ "status" ] is not None : description += " (%s)" % need_info [ "status" ] if current_needfilter [ "show_tags" ] and need_info [ "tags" ] is not None : description += " [%s]" % "; " . join ( need_info [ "tags" ] ) title = nodes . Text ( description , description ) if not need_info [ "hide" ] : ref = nodes . reference ( '' , '' ) ref [ 'refdocname' ] = need_info [ 'docname' ] ref [ 'refuri' ] = app . builder . get_relative_uri ( fromdocname , need_info [ 'docname' ] ) ref [ 'refuri' ] += '#' + need_info [ 'target_node' ] [ 'refid' ] ref . append ( title ) para += ref else : para += title line_block . append ( para ) content . append ( line_block ) if len ( content ) == 0 : content . append ( no_needs_found_paragraph ( ) ) if current_needfilter [ "show_filters" ] : content . append ( used_filter_paragraph ( current_needfilter ) ) node . replace_self ( content )
Replace all needlist nodes with a list of the collected needs . Augment each need with a backlink to the original location .
46,772
def get_lines ( filename : str ) -> Iterator [ str ] : if filename . endswith ( '.gz' ) : with gzip . open ( filename , 'r' ) as f : for line in f : yield line . decode ( 'utf-8' ) else : with open ( filename , 'r' , encoding = 'utf-8' ) as f : for line in f : yield line
Create an iterator that returns the lines of a utf - 8 encoded file .
46,773
def as_statements ( lines : Iterator [ str ] ) -> Iterator [ str ] : lines = ( l . strip ( ) for l in lines if l ) lines = ( l for l in lines if l and not l . startswith ( '--' ) ) parts = [ ] for line in lines : parts . append ( line . rstrip ( ';' ) ) if line . endswith ( ';' ) : yield ' ' . join ( parts ) parts . clear ( ) if parts : yield ' ' . join ( parts )
Create an iterator that transforms lines into sql statements .
46,774
def break_iterable ( iterable , pred ) : sublist = [ ] for i in iterable : if pred ( i ) : yield sublist sublist = [ ] else : sublist . append ( i ) yield sublist
Break a iterable on the item that matches the predicate into lists .
46,775
def regenerate ( location = 'http://www.iana.org/assignments/language-subtag-registry' , filename = None , default_encoding = 'utf-8' ) : paren = re . compile ( '\([^)]*\)' ) data = urllib2 . urlopen ( location ) if ( 'content-type' in data . headers and 'charset=' in data . headers [ 'content-type' ] ) : encoding = data . headers [ 'content-type' ] . split ( 'charset=' ) [ - 1 ] else : encoding = default_encoding content = data . read ( ) . decode ( encoding ) languages = [ ] info = { } p = None for line in content . splitlines ( ) : if line == '%%' : if 'Type' in info and info [ 'Type' ] == 'language' : languages . append ( info ) info = { } elif ':' not in line and p : info [ p [ 0 ] ] = paren . sub ( '' , p [ 2 ] + line ) . strip ( ) else : p = line . partition ( ':' ) if not p [ 0 ] in info : info [ p [ 0 ] ] = paren . sub ( '' , p [ 2 ] ) . strip ( ) languages_lines = map ( lambda x : '("%s", _(u"%s")),' % ( x [ 'Subtag' ] , x [ 'Description' ] ) , languages ) if not filename : filename = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , 'languages.py' ) f = codecs . open ( filename , 'w' , 'utf-8' ) f . write ( TEMPLATE % { 'languages' : '\n ' . join ( languages_lines ) , } ) f . close ( )
Generate the languages Python module .
46,776
def newton ( power_sum , elementary_symmetric_polynomial , order ) : r if len ( power_sum ) > len ( elementary_symmetric_polynomial ) : _update_elementary_symmetric_polynomial ( power_sum , elementary_symmetric_polynomial , order ) elif len ( power_sum ) < len ( elementary_symmetric_polynomial ) : _update_power_sum ( power_sum , elementary_symmetric_polynomial , order )
r Given two lists of values the first list being the power sum s of a polynomial and the second being expressions of the roots of the polynomial as found by Viete s Formula use information from the longer list to fill out the shorter list using Newton s Identities .
46,777
def max_variants ( composition ) : max_n_variants = 0 for element , count in composition . items ( ) : if element == "H+" : continue try : max_n_variants += count * periodic_table [ element ] . max_neutron_shift ( ) except KeyError : pass return max_n_variants
Calculates the maximum number of isotopic variants that could be produced by a composition .
46,778
def isotopic_variants ( composition , npeaks = None , charge = 0 , charge_carrier = PROTON ) : if npeaks is None : max_n_variants = max_variants ( composition ) npeaks = int ( sqrt ( max_n_variants ) - 2 ) npeaks = max ( npeaks , 3 ) else : npeaks -= 1 return IsotopicDistribution ( composition , npeaks ) . aggregated_isotopic_variants ( charge , charge_carrier = charge_carrier )
Compute a peak list representing the theoretical isotopic cluster for composition .
46,779
def getnii_descr ( fim ) : nim = nib . load ( fim ) hdr = nim . header rcnlst = hdr [ 'descrip' ] . item ( ) . split ( ';' ) rcndic = { } if rcnlst [ 0 ] == '' : return rcndic for ci in range ( len ( rcnlst ) ) : tmp = rcnlst [ ci ] . split ( '=' ) rcndic [ tmp [ 0 ] ] = tmp [ 1 ] return rcndic
Extracts the custom description header field to dictionary
46,780
def orientnii ( imfile ) : strorient = [ 'L-R' , 'S-I' , 'A-P' ] niiorient = [ ] niixyz = np . zeros ( 3 , dtype = np . int8 ) if os . path . isfile ( imfile ) : nim = nib . load ( imfile ) pct = nim . get_data ( ) A = nim . get_sform ( ) for i in range ( 3 ) : niixyz [ i ] = np . argmax ( abs ( A [ i , : - 1 ] ) ) niiorient . append ( strorient [ niixyz [ i ] ] ) print niiorient
Get the orientation from NIfTI sform . Not fully functional yet .
46,781
def pick_t1w ( mri ) : if isinstance ( mri , dict ) : if 'T1N4' in mri and os . path . isfile ( mri [ 'T1N4' ] ) : ft1w = mri [ 'T1N4' ] elif 'T1bc' in mri and os . path . isfile ( mri [ 'T1bc' ] ) : ft1w = mri [ 'T1bc' ] elif 'T1nii' in mri and os . path . isfile ( mri [ 'T1nii' ] ) : ft1w = mri [ 'T1nii' ] elif 'T1DCM' in mri and os . path . exists ( mri [ 'MRT1W' ] ) : fnii = 'converted' call ( [ rs . DCM2NIIX , '-f' , fnii , mri [ 'T1nii' ] ] ) ft1nii = glob . glob ( os . path . join ( mri [ 'T1nii' ] , '*converted*.nii*' ) ) ft1w = ft1nii [ 0 ] else : print 'e> disaster: could not find a T1w image!' return None else : ( 'e> no correct input found for the T1w image' ) return None return ft1w
Pick the MR T1w from the dictionary for MR - > PET registration .
46,782
def list_dcm_datain ( datain ) : if not isinstance ( datain , dict ) : raise ValueError ( 'The input is not a dictionary!' ) dcmlst = [ ] if 'mumapDCM' in datain : dcmump = os . listdir ( datain [ 'mumapDCM' ] ) dcmump = [ os . path . join ( datain [ 'mumapDCM' ] , d ) for d in dcmump if d . endswith ( dcmext ) ] dcmlst += dcmump if 'T1DCM' in datain : dcmt1 = os . listdir ( datain [ 'T1DCM' ] ) dcmt1 = [ os . path . join ( datain [ 'T1DCM' ] , d ) for d in dcmt1 if d . endswith ( dcmext ) ] dcmlst += dcmt1 if 'T2DCM' in datain : dcmt2 = os . listdir ( datain [ 'T2DCM' ] ) dcmt2 = [ os . path . join ( datain [ 'T2DCM' ] , d ) for d in dcmt2 if d . endswith ( dcmext ) ] dcmlst += dcmt2 if 'UTE1' in datain : dcmute1 = os . listdir ( datain [ 'UTE1' ] ) dcmute1 = [ os . path . join ( datain [ 'UTE1' ] , d ) for d in dcmute1 if d . endswith ( dcmext ) ] dcmlst += dcmute1 if 'UTE2' in datain : dcmute2 = os . listdir ( datain [ 'UTE2' ] ) dcmute2 = [ os . path . join ( datain [ 'UTE2' ] , d ) for d in dcmute2 if d . endswith ( dcmext ) ] dcmlst += dcmute2 if 'lm_dcm' in datain : dcmlst += [ datain [ 'lm_dcm' ] ] if 'lm_ima' in datain : dcmlst += [ datain [ 'lm_ima' ] ] if 'nrm_dcm' in datain : dcmlst += [ datain [ 'nrm_dcm' ] ] if 'nrm_ima' in datain : dcmlst += [ datain [ 'nrm_ima' ] ] return dcmlst
List all DICOM file paths in the datain dictionary of input data .
46,783
def find_cuda ( ) : for fldr in os . environ [ 'PATH' ] . split ( os . pathsep ) : cuda_path = join ( fldr , 'nvcc' ) if os . path . exists ( cuda_path ) : cuda_path = os . path . dirname ( os . path . dirname ( cuda_path ) ) break cuda_path = None if cuda_path is None : print 'w> nvcc compiler could not be found from the PATH!' return None lcuda_path = os . path . join ( cuda_path , 'lib64' ) if 'LD_LIBRARY_PATH' in os . environ . keys ( ) : if lcuda_path in os . environ [ 'LD_LIBRARY_PATH' ] . split ( os . pathsep ) : print 'i> found CUDA lib64 in LD_LIBRARY_PATH: ' , lcuda_path elif os . path . isdir ( lcuda_path ) : print 'i> found CUDA lib64 in : ' , lcuda_path else : print 'w> folder for CUDA library (64-bit) could not be found!' return cuda_path , lcuda_path
Locate the CUDA environment on the system .
46,784
def resources_setup ( ) : print 'i> installing file <resources.py> into home directory if it does not exist.' path_current = os . path . dirname ( os . path . realpath ( __file__ ) ) path_install = os . path . join ( path_current , 'resources' ) path_resources = path_niftypet_local ( ) print path_current flg_resources = False if not os . path . exists ( path_resources ) : os . makedirs ( path_resources ) if not os . path . isfile ( os . path . join ( path_resources , 'resources.py' ) ) : if os . path . isfile ( os . path . join ( path_install , 'resources.py' ) ) : shutil . copyfile ( os . path . join ( path_install , 'resources.py' ) , os . path . join ( path_resources , 'resources.py' ) ) else : print 'e> could not fine file <resources.py> to be installed!' raise IOError ( 'could not find <resources.py' ) else : print 'i> <resources.py> should be already in the local NiftyPET folder.' , path_resources flg_resources = True sys . path . append ( path_resources ) try : import resources except ImportError as ie : print '----------------------------' print 'e> Import Error: NiftyPET' 's resources file <resources.py> could not be imported. It should be in ' '~/.niftypet/resources.py' ' but likely it does not exists.' print '----------------------------' gpuarch = dev_setup ( ) return gpuarch
This function checks CUDA devices selects some and installs resources . py
46,785
def coroutine ( f ) : @ functools . wraps ( f ) def _coroutine ( * args , ** kwargs ) : def _resolver ( resolve , reject ) : try : generator = f ( * args , ** kwargs ) except BaseException as e : reject ( e ) else : if not isinstance ( generator , types . GeneratorType ) : resolve ( generator ) else : def _step ( previous , previous_type ) : element = None try : if previous_type == None : element = next ( generator ) elif previous_type : element = generator . send ( previous ) else : if not isinstance ( previous , BaseException ) : previous = RejectedException ( previous ) element = generator . throw ( previous ) except StopIteration as e : resolve ( getattr ( e , "value" , None ) ) except ReturnValueException as e : resolve ( e . value ) except BaseException as e : reject ( e ) else : try : element . then ( lambda value : _step ( value , True ) , lambda reason : _step ( reason , False ) ) except AttributeError : reject ( InvalidCoroutineException ( element ) ) _step ( None , None ) return Promise ( _resolver ) return _coroutine
Implementation of a coroutine .
46,786
def no_coroutine ( f ) : @ functools . wraps ( f ) def _no_coroutine ( * args , ** kwargs ) : generator = f ( * args , ** kwargs ) if not isinstance ( generator , types . GeneratorType ) : return generator previous = None first = True while True : element = None try : if first : element = next ( generator ) else : element = generator . send ( previous ) except StopIteration as e : return getattr ( e , "value" , None ) except ReturnValueException as e : return e . value else : previous = element first = False return _no_coroutine
This is not a coroutine ; )
46,787
def maybe_coroutine ( decide ) : def _maybe_coroutine ( f ) : @ functools . wraps ( f ) def __maybe_coroutine ( * args , ** kwargs ) : if decide ( * args , ** kwargs ) : return coroutine ( f ) ( * args , ** kwargs ) else : return no_coroutine ( f ) ( * args , ** kwargs ) return __maybe_coroutine return _maybe_coroutine
Either be a coroutine or not .
46,788
def makeCallbackPromise ( function , * args , ** kwargs ) : def _resolver ( resolve , reject ) : function ( lambda success , result : resolve ( result ) if success else reject ( result ) , * args , ** kwargs ) return Promise ( _resolver )
Take a function that reports its result using a callback and return a Promise that listenes for this callback .
46,789
def calculate_mass ( composition , mass_data = None ) : mass = 0.0 if mass_data is None : mass_data = nist_mass for element in composition : try : mass += ( composition [ element ] * mass_data [ element ] [ 0 ] [ 0 ] ) except KeyError : match = re . search ( r"(\S+)\[(\d+)\]" , element ) if match : element_ = match . group ( 1 ) isotope = int ( match . group ( 2 ) ) mass += composition [ element ] * mass_data [ element_ ] [ isotope ] [ 0 ] else : raise return mass
Calculates the monoisotopic mass of a composition
46,790
def login ( self , username = None , password = None ) : if username is not None : self . _username = username if password is not None : self . _password = password if self . _username is None or not isinstance ( self . _username , str ) : raise SkybellAuthenticationException ( ERROR . USERNAME ) if self . _password is None or not isinstance ( self . _password , str ) : raise SkybellAuthenticationException ( ERROR . PASSWORD ) self . update_cache ( { CONST . ACCESS_TOKEN : None } ) login_data = { 'username' : self . _username , 'password' : self . _password , 'appId' : self . cache ( CONST . APP_ID ) , CONST . TOKEN : self . cache ( CONST . TOKEN ) } try : response = self . send_request ( 'post' , CONST . LOGIN_URL , json_data = login_data , retry = False ) except Exception as exc : raise SkybellAuthenticationException ( ERROR . LOGIN_FAILED , exc ) _LOGGER . debug ( "Login Response: %s" , response . text ) response_object = json . loads ( response . text ) self . update_cache ( { CONST . ACCESS_TOKEN : response_object [ CONST . ACCESS_TOKEN ] } ) _LOGGER . info ( "Login successful" ) return True
Execute Skybell login .
46,791
def logout ( self ) : if self . cache ( CONST . ACCESS_TOKEN ) : self . _session = requests . session ( ) self . _devices = None self . update_cache ( { CONST . ACCESS_TOKEN : None } ) return True
Explicit Skybell logout .
46,792
def send_request ( self , method , url , headers = None , json_data = None , retry = True ) : if not self . cache ( CONST . ACCESS_TOKEN ) and url != CONST . LOGIN_URL : self . login ( ) if not headers : headers = { } if self . cache ( CONST . ACCESS_TOKEN ) : headers [ 'Authorization' ] = 'Bearer ' + self . cache ( CONST . ACCESS_TOKEN ) headers [ 'user-agent' ] = ( 'SkyBell/3.4.1 (iPhone9,2; iOS 11.0; loc=en_US; lang=en-US) ' 'com.skybell.doorbell/1' ) headers [ 'content-type' ] = 'application/json' headers [ 'accepts' ] = '*/*' headers [ 'x-skybell-app-id' ] = self . cache ( CONST . APP_ID ) headers [ 'x-skybell-client-id' ] = self . cache ( CONST . CLIENT_ID ) _LOGGER . debug ( "HTTP %s %s Request with headers: %s" , method , url , headers ) try : response = getattr ( self . _session , method ) ( url , headers = headers , json = json_data ) _LOGGER . debug ( "%s %s" , response , response . text ) if response and response . status_code < 400 : return response except RequestException as exc : _LOGGER . warning ( "Skybell request exception: %s" , exc ) if retry : self . login ( ) return self . send_request ( method , url , headers , json_data , False ) raise SkybellException ( ERROR . REQUEST , "Retry failed" )
Send requests to Skybell .
46,793
def update_cache ( self , data ) : UTILS . update ( self . _cache , data ) self . _save_cache ( )
Update a cached value .
46,794
def dev_cache ( self , device , key = None ) : device_cache = self . _cache . get ( CONST . DEVICES , { } ) . get ( device . device_id ) if device_cache and key : return device_cache . get ( key ) return device_cache
Get a cached value for a device .
46,795
def update_dev_cache ( self , device , data ) : self . update_cache ( { CONST . DEVICES : { device . device_id : data } } )
Update cached values for a device .
46,796
def deleteInactiveDevicesByQuota ( self , per_jid_max = 15 , global_max = 0 ) : if per_jid_max < 1 and global_max < 1 : return if per_jid_max < 1 : per_jid_max = None if global_max < 1 : global_max = None bare_jids = yield self . _storage . listJIDs ( ) if not per_jid_max == None : for bare_jid in bare_jids : devices = yield self . __loadInactiveDevices ( bare_jid ) if len ( devices ) > per_jid_max : devices = sorted ( devices . items ( ) , key = lambda device : device [ 1 ] ) devices = devices [ : - per_jid_max ] devices = list ( map ( lambda device : device [ 0 ] , devices ) ) yield self . __deleteInactiveDevices ( bare_jid , devices ) if not global_max == None : all_inactive_devices = [ ] for bare_jid in bare_jids : devices = yield self . __loadInactiveDevices ( bare_jid ) all_inactive_devices . extend ( map ( lambda device : ( bare_jid , device [ 0 ] , device [ 1 ] ) , devices . items ( ) ) ) if len ( all_inactive_devices ) > global_max : devices = sorted ( all_inactive_devices , key = lambda device : device [ 2 ] ) devices = devices [ : - global_max ] delete_devices = { } for device in devices : bare_jid = device [ 0 ] device_id = device [ 1 ] delete_devices [ bare_jid ] = delete_devices . get ( bare_jid , [ ] ) delete_devices [ bare_jid ] . append ( device_id ) for bare_jid , devices in delete_devices . items ( ) : yield self . __deleteInactiveDevices ( bare_jid , devices )
Delete inactive devices by setting a quota . With per_jid_max you can define the amount of inactive devices that are kept for each jid with global_max you can define a global maximum for inactive devices . If any of the quotas is reached inactive devices are deleted on an LRU basis . This also deletes the corresponding sessions so if a device comes active again and tries to send you an encrypted message you will not be able to decrypt it .
46,797
def deleteInactiveDevicesByAge ( self , age_days ) : if age_days < 1 : return now = time . time ( ) bare_jids = yield self . _storage . listJIDs ( ) for bare_jid in bare_jids : devices = yield self . __loadInactiveDevices ( bare_jid ) delete_devices = [ ] for device , timestamp in list ( devices . items ( ) ) : elapsed_s = now - timestamp elapsed_m = elapsed_s / 60 elapsed_h = elapsed_m / 60 elapsed_d = elapsed_h / 24 if elapsed_d >= age_days : delete_devices . append ( device ) if len ( delete_devices ) > 0 : yield self . __deleteInactiveDevices ( bare_jid , delete_devices )
Delete all inactive devices from the device list storage and cache that are older then a given number of days . This also deletes the corresponding sessions so if a device comes active again and tries to send you an encrypted message you will not be able to decrypt it . You are not allowed to delete inactive devices that were inactive for less than a day . Thus the minimum value for age_days is 1 .
46,798
def runInactiveDeviceCleanup ( self ) : yield self . deleteInactiveDevicesByQuota ( self . __inactive_per_jid_max , self . __inactive_global_max ) yield self . deleteInactiveDevicesByAge ( self . __inactive_max_age )
Runs both the deleteInactiveDevicesByAge and the deleteInactiveDevicesByQuota methods with the configuration that was set when calling create .
46,799
def get_setup ( Cnt = { } ) : Cnt [ 'DIRTOOLS' ] = DIRTOOLS Cnt [ 'CMAKE_TLS_PAR' ] = CMAKE_TLS_PAR Cnt [ 'HMULIST' ] = hrdwr_mu Cnt [ 'MSVC_VRSN' ] = MSVC_VRSN Cnt = get_gpu_constants ( Cnt ) if 'PATHTOOLS' in globals ( ) and PATHTOOLS != '' : Cnt [ 'PATHTOOLS' ] = PATHTOOLS if 'RESPATH' in globals ( ) and RESPATH != '' : Cnt [ 'RESPATH' ] = RESPATH if 'REGPATH' in globals ( ) and REGPATH != '' : Cnt [ 'REGPATH' ] = REGPATH if 'DCM2NIIX' in globals ( ) and DCM2NIIX != '' : Cnt [ 'DCM2NIIX' ] = DCM2NIIX if 'HMUDIR' in globals ( ) and HMUDIR != '' : Cnt [ 'HMUDIR' ] = HMUDIR if 'VINCIPATH' in globals ( ) and VINCIPATH != '' : Cnt [ 'VINCIPATH' ] = VINCIPATH Cnt [ 'ENBLXNAT' ] = ENBLXNAT Cnt [ 'ENBLAGG' ] = ENBLAGG Cnt [ 'CMPL_DCM2NIIX' ] = CMPL_DCM2NIIX return Cnt
Return a dictionary of GPU mu - map hardware and third party set - up .