idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
41,100 | def points_awarded ( target = None , source = None , since = None ) : lookup_params = { } if target is not None : if isinstance ( target , get_user_model ( ) ) : lookup_params [ "target_user" ] = target else : lookup_params . update ( { "target_content_type" : ContentType . objects . get_for_model ( target ) , "target_object_id" : target . pk , } ) if source is not None : if isinstance ( source , get_user_model ( ) ) : lookup_params [ "source_user" ] = source else : lookup_params . update ( { "source_content_type" : ContentType . objects . get_for_model ( source ) , "source_object_id" : source . pk , } ) if since is None : if target is not None and source is None : try : return TargetStat . objects . get ( ** lookup_params ) . points except TargetStat . DoesNotExist : return 0 else : return AwardedPointValue . points_awarded ( ** lookup_params ) else : lookup_params [ "timestamp__gte" ] = since return AwardedPointValue . points_awarded ( ** lookup_params ) | Determine out how many points the given target has received . |
41,101 | def __validate_dates ( start_date , end_date ) : try : start_date = datetime . datetime . strptime ( start_date , '%Y-%m-%d' ) end_date = datetime . datetime . strptime ( end_date , '%Y-%m-%d' ) except ValueError : raise ValueError ( "Incorrect data format, should be yyyy-mm-dd" ) if ( end_date - start_date ) . days > 366 : raise ValueError ( "The difference between start and end date " + "should be less than or equal to 366 days." ) if ( end_date - start_date ) . days < 0 : raise ValueError ( "End date cannot be before start date." ) | Validate if a date string . |
41,102 | def __yahoo_request ( query ) : query = quote ( query ) url = 'https://query.yahooapis.com/v1/public/yql?q=' + query + '&format=json&env=store://datatables.org/alltableswithkeys' response = urlopen ( url ) . read ( ) return json . loads ( response . decode ( 'utf-8' ) ) [ 'query' ] [ 'results' ] | Request Yahoo Finance information . |
41,103 | def request_quotes ( tickers_list , selected_columns = [ '*' ] ) : __validate_list ( tickers_list ) __validate_list ( selected_columns ) query = 'select {cols} from yahoo.finance.quotes where symbol in ({vals})' query = query . format ( cols = ', ' . join ( selected_columns ) , vals = ', ' . join ( '"{0}"' . format ( s ) for s in tickers_list ) ) response = __yahoo_request ( query ) if not response : raise RequestError ( 'Unable to process the request. Check if the ' + 'columns selected are valid.' ) if not type ( response [ 'quote' ] ) is list : return [ response [ 'quote' ] ] return response [ 'quote' ] | Request Yahoo Finance recent quotes . |
41,104 | def request_historical ( ticker , start_date , end_date ) : __validate_dates ( start_date , end_date ) cols = [ 'Date' , 'Open' , 'High' , 'Low' , 'Close' , 'Volume' , 'Adj_Close' ] query = 'select {cols} from yahoo.finance.historicaldata ' + 'where symbol in ("{ticker}") and startDate = "{start_date}" ' + 'and endDate = "{end_date}"' query = query . format ( cols = ', ' . join ( cols ) , ticker = ticker , start_date = start_date , end_date = end_date ) response = __yahoo_request ( query ) if not response : raise RequestError ( 'Unable to process the request. Check if the ' + 'stock ticker used is a valid one.' ) if not type ( response [ 'quote' ] ) is list : return [ response [ 'quote' ] ] return response [ 'quote' ] | Get stock s daily historical information . |
41,105 | def download_historical ( tickers_list , output_folder ) : __validate_list ( tickers_list ) for ticker in tickers_list : file_name = os . path . join ( output_folder , ticker + '.csv' ) with open ( file_name , 'wb' ) as f : base_url = 'http://real-chart.finance.yahoo.com/table.csv?s=' try : urlopen ( base_url + ticker ) urlretrieve ( base_url + ticker , f . name ) except : os . remove ( file_name ) raise RequestError ( 'Unable to process the request. Check if ' + ticker + ' is a valid stock ticker' ) | Download historical data from Yahoo Finance . |
41,106 | def load ( self , file_name ) : new_rundata = self . loader ( file_name ) new_rundata = self . inspect ( new_rundata ) return new_rundata | Load a raw data - file |
41,107 | def datetime2ole ( dt ) : delta = dt - OLE_TIME_ZERO delta_float = delta / datetime . timedelta ( days = 1 ) return delta_float | converts from datetime object to ole datetime float |
41,108 | def loader ( self , file_name , bad_steps = None , ** kwargs ) : new_tests = [ ] if not os . path . isfile ( file_name ) : self . logger . info ( "Missing file_\n %s" % file_name ) return None filesize = os . path . getsize ( file_name ) hfilesize = humanize_bytes ( filesize ) txt = "Filesize: %i (%s)" % ( filesize , hfilesize ) self . logger . debug ( txt ) temp_dir = tempfile . gettempdir ( ) temp_filename = os . path . join ( temp_dir , os . path . basename ( file_name ) ) shutil . copy2 ( file_name , temp_dir ) self . logger . debug ( "tmp file: %s" % temp_filename ) self . logger . debug ( "HERE WE LOAD THE DATA" ) data = DataSet ( ) fid = FileID ( file_name ) test_no = 1 data . test_no = test_no data . loaded_from = file_name data . channel_index = None data . channel_number = None data . creator = None data . item_ID = None data . schedule_file_name = None data . start_datetime = None data . test_ID = None data . test_name = None data . raw_data_files . append ( fid ) self . logger . debug ( "reading raw-data" ) self . mpr_data = None self . mpr_log = None self . mpr_settings = None self . _load_mpr_data ( temp_filename , bad_steps ) length_of_test = self . mpr_data . shape [ 0 ] self . logger . debug ( f"length of test: {length_of_test}" ) self . logger . debug ( "renaming columns" ) self . _rename_headers ( ) summary_df = self . _create_summary_data ( ) if summary_df . empty : txt = "\nCould not find any summary (stats-file)!" txt += " (summary_df.empty = True)" txt += "\n -> issue make_summary(use_cellpy_stat_file=False)" warnings . warn ( txt ) data . dfsummary = summary_df data . dfdata = self . mpr_data data . raw_data_files_length . append ( length_of_test ) new_tests . append ( data ) self . _clean_up ( temp_filename ) return new_tests | Loads data from biologics . mpr files . |
41,109 | def csv_dumper ( ** kwargs ) : logging . info ( "dumping to csv" ) barn = kwargs [ "barn" ] farms = kwargs [ "farms" ] experiments = kwargs [ "experiments" ] for experiment , farm in zip ( experiments , farms ) : name = experiment . journal . name project = experiment . journal . project project_dir , batch_dir , raw_dir = experiment . journal . paginate ( ) if batch_dir is None : logging . info ( "have to generate folder-name on the fly" ) out_data_dir , project_dir , batch_dir , raw_dir = generate_folder_names ( name , project ) if barn == "batch_dir" : out_dir = batch_dir elif barn == "project_dir" : out_dir = project_dir elif barn == "raw_dir" : out_dir = raw_dir else : out_dir = barn for animal in farm : file_name = os . path . join ( out_dir , "summary_%s_%s.csv" % ( animal . name , name ) ) logging . info ( f"> {file_name}" ) animal . to_csv ( file_name , sep = prms . Reader . sep ) | dump data to csv |
41,110 | def ram_dumper ( ** kwargs ) : logging . debug ( "trying to save stuff in memory" ) farms = kwargs [ "farms" ] experiments = kwargs [ "experiments" ] engine = kwargs [ "engine" ] try : engine_name = engine . __name__ except AttributeError : engine_name = engine . __dict__ . __name__ accepted_engines = [ "summary_engine" , ] if engine_name in accepted_engines : logging . debug ( "found the engine that I will try to dump from: " f"{engine_name}" ) for experiment , farm in zip ( experiments , farms ) : name = experiment . journal . name project = experiment . journal . project experiment . memory_dumped [ engine_name ] = farm logging . debug ( f"farm put into memory_dumped ({project}::{name})" ) | Dump data to memory for later usage . |
41,111 | def screen_dumper ( ** kwargs ) : farms = kwargs [ "farms" ] engine = kwargs [ "engine" ] logging . info ( "dumping to screen" ) print ( f"\n[Screen dumper] ({engine})" ) try : if len ( farms ) == 1 : print ( f"You have one farm with little pandas." ) else : print ( f"You have {len(farms)} farms with little pandas." ) except TypeError : print ( " - your farm has burned to the ground." ) else : for number , farm in enumerate ( farms ) : print ( f"[#{number+1}]You have {len(farm)} " f"little pandas in this farm." ) for animal in farm : print ( 80 * "=" ) try : print ( animal . name ) except AttributeError : print ( "no-name" ) print ( 80 * "-" ) print ( animal . head ( 5 ) ) print ( ) | Dump data to screen . |
41,112 | def create_legend ( info , c , option = "clean" , use_index = False ) : logging . debug ( " - creating legends" ) mass , loading , label = info . loc [ c , [ "masses" , "loadings" , "labels" ] ] if use_index or not label : label = c . split ( "_" ) label = "_" . join ( label [ 1 : ] ) if option == "clean" : return label if option == "mass" : label = f"{label} ({mass:.2f} mg)" elif option == "loading" : label = f"{label} ({loading:.2f} mg/cm2)" elif option == "all" : label = f"{label} ({mass:.2f} mg) ({loading:.2f} mg/cm2)" return label | creating more informative legends |
41,113 | def create_plot_option_dicts ( info , marker_types = None , colors = None , line_dash = None , size = None ) : logging . debug ( " - creating plot-options-dict (for bokeh)" ) if marker_types is None : marker_types = [ "circle" , "square" , "triangle" , "invertedtriangle" , "diamond" , "cross" , "asterix" ] if line_dash is None : line_dash = [ 0 , 0 ] if size is None : size = 10 groups = info . groups . unique ( ) number_of_groups = len ( groups ) if colors is None : if number_of_groups < 4 : colors = bokeh . palettes . brewer [ 'YlGnBu' ] [ 3 ] else : colors = bokeh . palettes . brewer [ 'YlGnBu' ] [ min ( 9 , number_of_groups ) ] sub_groups = info . sub_groups . unique ( ) marker_it = itertools . cycle ( marker_types ) colors_it = itertools . cycle ( colors ) group_styles = dict ( ) sub_group_styles = dict ( ) for j in groups : color = next ( colors_it ) marker_options = { "line_color" : color , "fill_color" : color , } line_options = { "line_color" : color , } group_styles [ j ] = { "marker" : marker_options , "line" : line_options , } for j in sub_groups : marker_type = next ( marker_it ) marker_options = { "marker" : marker_type , "size" : size , } line_options = { "line_dash" : line_dash , } sub_group_styles [ j ] = { "marker" : marker_options , "line" : line_options , } return group_styles , sub_group_styles | Create two dictionaries with plot - options . |
41,114 | def summary_plotting_engine ( ** kwargs ) : logging . debug ( f"Using {prms.Batch.backend} for plotting" ) experiments = kwargs [ "experiments" ] farms = kwargs [ "farms" ] barn = None logging . debug ( " - summary_plot_engine" ) farms = _preparing_data_and_plotting ( experiments = experiments , farms = farms ) return farms , barn | creates plots of summary data . |
41,115 | def _read ( name ) : logging . debug ( "Reading config-file: %s" % name ) try : with open ( name , "r" ) as config_file : prm_dict = yaml . load ( config_file ) except yaml . YAMLError : raise yaml . YAMLErrorr else : return prm_dict | read the yml file |
41,116 | def cycles_engine ( ** kwargs ) : logging . info ( "cycles_engine:" ) logging . info ( "Not ready for production" ) experiments = kwargs [ "experiments" ] farms = [ ] barn = "raw_dir" for experiment in experiments : farms . append ( [ ] ) if experiment . all_in_memory : logging . debug ( "all in memory" ) for key in experiment . cell_data_frames : logging . debug ( f"extracting cycles from {key}" ) else : logging . debug ( "dont have it in memory - need to lookup in the files" ) for key in experiment . cell_data_frames : logging . debug ( f"looking up cellpyfile for {key}" ) return farms , barn | engine to extract cycles |
41,117 | def raw_data_engine ( ** kwargs ) : logger . debug ( "cycles_engine" ) raise NotImplementedError experiments = kwargs [ "experiments" ] farms = [ ] barn = "raw_dir" for experiment in experiments : farms . append ( [ ] ) return farms , barn | engine to extract raw data |
41,118 | def summary_engine ( ** kwargs ) : logger . debug ( "summary_engine" ) farms = [ ] experiments = kwargs [ "experiments" ] for experiment in experiments : if experiment . selected_summaries is None : selected_summaries = [ "discharge_capacity" , "charge_capacity" , "coulombic_efficiency" , "cumulated_coulombic_efficiency" , "ir_discharge" , "ir_charge" , "end_voltage_discharge" , "end_voltage_charge" , ] else : selected_summaries = experiment . selected_summaries farm = helper . join_summaries ( experiment . summary_frames , selected_summaries ) farms . append ( farm ) barn = "batch_dir" return farms , barn | engine to extract summary data |
41,119 | def simple_db_engine ( reader = None , srnos = None ) : if reader is None : reader = dbreader . Reader ( ) logger . debug ( "No reader provided. Creating one myself." ) info_dict = dict ( ) info_dict [ "filenames" ] = [ reader . get_cell_name ( srno ) for srno in srnos ] info_dict [ "masses" ] = [ reader . get_mass ( srno ) for srno in srnos ] info_dict [ "total_masses" ] = [ reader . get_total_mass ( srno ) for srno in srnos ] info_dict [ "loadings" ] = [ reader . get_loading ( srno ) for srno in srnos ] info_dict [ "fixed" ] = [ reader . inspect_hd5f_fixed ( srno ) for srno in srnos ] info_dict [ "labels" ] = [ reader . get_label ( srno ) for srno in srnos ] info_dict [ "cell_type" ] = [ reader . get_cell_type ( srno ) for srno in srnos ] info_dict [ "raw_file_names" ] = [ ] info_dict [ "cellpy_file_names" ] = [ ] logger . debug ( "created info-dict" ) for key in list ( info_dict . keys ( ) ) : logger . debug ( "%s: %s" % ( key , str ( info_dict [ key ] ) ) ) _groups = [ reader . get_group ( srno ) for srno in srnos ] logger . debug ( ">\ngroups: %s" % str ( _groups ) ) groups = helper . fix_groups ( _groups ) info_dict [ "groups" ] = groups my_timer_start = time . time ( ) filename_cache = [ ] info_dict = helper . find_files ( info_dict , filename_cache ) my_timer_end = time . time ( ) if ( my_timer_end - my_timer_start ) > 5.0 : logger . info ( "The function _find_files was very slow. " "Save your info_df so you don't have to run it again!" ) info_df = pd . DataFrame ( info_dict ) info_df = info_df . sort_values ( [ "groups" , "filenames" ] ) info_df = helper . make_unique_groups ( info_df ) info_df [ "labels" ] = info_df [ "filenames" ] . apply ( helper . create_labels ) info_df . set_index ( "filenames" , inplace = True ) return info_df | engine that gets values from the simple excel db |
41,120 | def orify ( event , changed_callback ) : event . changed = changed_callback if not hasattr ( event , '_set' ) : event . _set = event . set event . _clear = event . clear event . set = lambda : or_set ( event ) event . clear = lambda : or_clear ( event ) | Override set and clear methods on event to call specified callback function after performing default behaviour . |
41,121 | def request ( device , response_queue , payload , timeout_s = None , poll = POLL_QUEUES ) : device . write ( payload ) if poll : start = dt . datetime . now ( ) while not response_queue . qsize ( ) : if ( dt . datetime . now ( ) - start ) . total_seconds ( ) > timeout_s : raise queue . Empty ( 'No response received.' ) return response_queue . get ( ) else : return response_queue . get ( timeout = timeout_s ) | Send payload to serial device and wait for response . |
41,122 | def connection_made ( self , transport ) : self . port = transport . serial . port logger . debug ( 'connection_made: `%s` `%s`' , self . port , transport ) self . transport = transport self . connected . set ( ) self . disconnected . clear ( ) | Called when reader thread is started |
41,123 | def connection_lost ( self , exception ) : if isinstance ( exception , Exception ) : logger . debug ( 'Connection to port `%s` lost: %s' , self . port , exception ) else : logger . debug ( 'Connection to port `%s` closed' , self . port ) self . connected . clear ( ) self . disconnected . set ( ) | \ Called when the serial port is closed or the reader loop terminated otherwise . |
41,124 | def write ( self , data , timeout_s = None ) : self . connected . wait ( timeout_s ) self . protocol . transport . write ( data ) | Write to serial port . |
41,125 | def fieldname_to_dtype ( fieldname ) : if fieldname == 'mode' : return ( 'mode' , np . uint8 ) elif fieldname in ( "ox/red" , "error" , "control changes" , "Ns changes" , "counter inc." ) : return ( fieldname , np . bool_ ) elif fieldname in ( "time/s" , "P/W" , "(Q-Qo)/mA.h" , "x" , "control/V" , "control/V/mA" , "(Q-Qo)/C" , "dQ/C" , "freq/Hz" , "|Ewe|/V" , "|I|/A" , "Phase(Z)/deg" , "|Z|/Ohm" , "Re(Z)/Ohm" , "-Im(Z)/Ohm" ) : return ( fieldname , np . float_ ) elif fieldname in ( "cycle number" , "I Range" , "Ns" , "half cycle" ) : return ( fieldname , np . int_ ) elif fieldname in ( "dq/mA.h" , "dQ/mA.h" ) : return ( "dQ/mA.h" , np . float_ ) elif fieldname in ( "I/mA" , "<I>/mA" ) : return ( "I/mA" , np . float_ ) elif fieldname in ( "Ewe/V" , "<Ewe>/V" ) : return ( "Ewe/V" , np . float_ ) else : raise ValueError ( "Invalid column header: %s" % fieldname ) | Converts a column header from the MPT file into a tuple of canonical name and appropriate numpy dtype |
41,126 | def comma_converter ( float_string ) : trans_table = maketrans ( b',' , b'.' ) return float ( float_string . translate ( trans_table ) ) | Convert numbers to floats whether the decimal point is . or |
41,127 | def MPTfile ( file_or_path ) : if isinstance ( file_or_path , str ) : mpt_file = open ( file_or_path , 'rb' ) else : mpt_file = file_or_path magic = next ( mpt_file ) if magic != b'EC-Lab ASCII FILE\r\n' : raise ValueError ( "Bad first line for EC-Lab file: '%s'" % magic ) nb_headers_match = re . match ( b'Nb header lines : (\d+)\s*$' , next ( mpt_file ) ) nb_headers = int ( nb_headers_match . group ( 1 ) ) if nb_headers < 3 : raise ValueError ( "Too few header lines: %d" % nb_headers ) comments = [ next ( mpt_file ) for i in range ( nb_headers - 3 ) ] fieldnames = str3 ( next ( mpt_file ) ) . strip ( ) . split ( '\t' ) record_type = np . dtype ( list ( map ( fieldname_to_dtype , fieldnames ) ) ) converter_dict = dict ( ( ( i , comma_converter ) for i in range ( len ( fieldnames ) ) ) ) mpt_array = np . loadtxt ( mpt_file , dtype = record_type , converters = converter_dict ) return mpt_array , comments | Opens . mpt files as numpy record arrays |
41,128 | def MPTfileCSV ( file_or_path ) : if isinstance ( file_or_path , str ) : mpt_file = open ( file_or_path , 'r' ) else : mpt_file = file_or_path magic = next ( mpt_file ) if magic . rstrip ( ) != 'EC-Lab ASCII FILE' : raise ValueError ( "Bad first line for EC-Lab file: '%s'" % magic ) nb_headers_match = re . match ( 'Nb header lines : (\d+)\s*$' , next ( mpt_file ) ) nb_headers = int ( nb_headers_match . group ( 1 ) ) if nb_headers < 3 : raise ValueError ( "Too few header lines: %d" % nb_headers ) comments = [ next ( mpt_file ) for i in range ( nb_headers - 3 ) ] mpt_csv = csv . DictReader ( mpt_file , dialect = 'excel-tab' ) expected_fieldnames = ( [ "mode" , "ox/red" , "error" , "control changes" , "Ns changes" , "counter inc." , "time/s" , "control/V/mA" , "Ewe/V" , "dq/mA.h" , "P/W" , "<I>/mA" , "(Q-Qo)/mA.h" , "x" ] , [ 'mode' , 'ox/red' , 'error' , 'control changes' , 'Ns changes' , 'counter inc.' , 'time/s' , 'control/V' , 'Ewe/V' , 'dq/mA.h' , '<I>/mA' , '(Q-Qo)/mA.h' , 'x' ] , [ "mode" , "ox/red" , "error" , "control changes" , "Ns changes" , "counter inc." , "time/s" , "control/V" , "Ewe/V" , "I/mA" , "dQ/mA.h" , "P/W" ] , [ "mode" , "ox/red" , "error" , "control changes" , "Ns changes" , "counter inc." , "time/s" , "control/V" , "Ewe/V" , "<I>/mA" , "dQ/mA.h" , "P/W" ] ) if mpt_csv . fieldnames not in expected_fieldnames : raise ValueError ( "Unrecognised headers for MPT file format" ) return mpt_csv , comments | Simple function to open MPT files as csv . DictReader objects |
41,129 | def get_headers_global ( ) : headers = dict ( ) headers [ "applications_path_txt" ] = 'Applications_Path' headers [ "channel_index_txt" ] = 'Channel_Index' headers [ "channel_number_txt" ] = 'Channel_Number' headers [ "channel_type_txt" ] = 'Channel_Type' headers [ "comments_txt" ] = 'Comments' headers [ "creator_txt" ] = 'Creator' headers [ "daq_index_txt" ] = 'DAQ_Index' headers [ "item_id_txt" ] = 'Item_ID' headers [ "log_aux_data_flag_txt" ] = 'Log_Aux_Data_Flag' headers [ "log_chanstat_data_flag_txt" ] = 'Log_ChanStat_Data_Flag' headers [ "log_event_data_flag_txt" ] = 'Log_Event_Data_Flag' headers [ "log_smart_battery_data_flag_txt" ] = 'Log_Smart_Battery_Data_Flag' headers [ "mapped_aux_conc_cnumber_txt" ] = 'Mapped_Aux_Conc_CNumber' headers [ "mapped_aux_di_cnumber_txt" ] = 'Mapped_Aux_DI_CNumber' headers [ "mapped_aux_do_cnumber_txt" ] = 'Mapped_Aux_DO_CNumber' headers [ "mapped_aux_flow_rate_cnumber_txt" ] = 'Mapped_Aux_Flow_Rate_CNumber' headers [ "mapped_aux_ph_number_txt" ] = 'Mapped_Aux_PH_Number' headers [ "mapped_aux_pressure_number_txt" ] = 'Mapped_Aux_Pressure_Number' headers [ "mapped_aux_temperature_number_txt" ] = 'Mapped_Aux_Temperature_Number' headers [ "mapped_aux_voltage_number_txt" ] = 'Mapped_Aux_Voltage_Number' headers [ "schedule_file_name_txt" ] = 'Schedule_File_Name' headers [ "start_datetime_txt" ] = 'Start_DateTime' headers [ "test_id_txt" ] = 'Test_ID' headers [ "test_name_txt" ] = 'Test_Name' return headers | Defines the so - called global column headings for Arbin . res - files |
41,130 | def _save_multi ( data , file_name , sep = ";" ) : logger . debug ( "saving multi" ) with open ( file_name , "w" , newline = '' ) as f : logger . debug ( f"{file_name} opened" ) writer = csv . writer ( f , delimiter = sep ) try : writer . writerows ( itertools . zip_longest ( * data ) ) except Exception as e : logger . info ( f"Exception encountered in batch._save_multi: {e}" ) raise ExportFailed logger . debug ( "wrote rows using itertools in _save_multi" ) | convenience function for storing data column - wise in a csv - file . |
41,131 | def _extract_dqdv ( cell_data , extract_func , last_cycle ) : from cellpy . utils . ica import dqdv list_of_cycles = cell_data . get_cycle_numbers ( ) if last_cycle is not None : list_of_cycles = [ c for c in list_of_cycles if c <= int ( last_cycle ) ] logger . debug ( f"only processing up to cycle {last_cycle}" ) logger . debug ( f"you have {len(list_of_cycles)} cycles to process" ) out_data = [ ] for cycle in list_of_cycles : try : c , v = extract_func ( cycle ) v , dq = dqdv ( v , c ) v = v . tolist ( ) dq = dq . tolist ( ) except NullData as e : v = list ( ) dq = list ( ) logger . info ( " Ups! Could not process this (cycle %i)" % cycle ) logger . info ( " %s" % e ) header_x = "dQ cycle_no %i" % cycle header_y = "voltage cycle_no %i" % cycle dq . insert ( 0 , header_x ) v . insert ( 0 , header_y ) out_data . append ( v ) out_data . append ( dq ) return out_data | Simple wrapper around the cellpy . utils . ica . dqdv function . |
41,132 | def make_df_from_batch ( batch_name , batch_col = "b01" , reader = None , reader_label = None ) : batch_name = batch_name batch_col = batch_col logger . debug ( f"batch_name, batch_col: {batch_name}, {batch_col}" ) if reader is None : reader_obj = get_db_reader ( reader_label ) reader = reader_obj ( ) srnos = reader . select_batch ( batch_name , batch_col ) logger . debug ( "srnos:" + str ( srnos ) ) info_dict = _create_info_dict ( reader , srnos ) info_df = pd . DataFrame ( info_dict ) info_df = info_df . sort_values ( [ "groups" , "filenames" ] ) info_df = _make_unique_groups ( info_df ) info_df [ "labels" ] = info_df [ "filenames" ] . apply ( create_labels ) info_df . set_index ( "filenames" , inplace = True ) return info_df | Create a pandas DataFrame with the info needed for cellpy to load the runs . |
41,133 | def create_folder_structure ( project_name , batch_name ) : out_data_dir = prms . Paths [ "outdatadir" ] project_dir = os . path . join ( out_data_dir , project_name ) batch_dir = os . path . join ( project_dir , batch_name ) raw_dir = os . path . join ( batch_dir , "raw_data" ) if not os . path . isdir ( project_dir ) : os . mkdir ( project_dir ) if not os . path . isdir ( batch_dir ) : os . mkdir ( batch_dir ) if not os . path . isdir ( raw_dir ) : os . mkdir ( raw_dir ) info_file = "cellpy_batch_%s.json" % batch_name info_file = os . path . join ( project_dir , info_file ) return info_file , ( project_dir , batch_dir , raw_dir ) | This function creates a folder structure for the batch project . |
41,134 | def save_summaries ( frames , keys , selected_summaries , batch_dir , batch_name ) : if not frames : logger . info ( "Could save summaries - no summaries to save!" ) logger . info ( "You have no frames - aborting" ) return None if not keys : logger . info ( "Could save summaries - no summaries to save!" ) logger . info ( "You have no keys - aborting" ) return None selected_summaries_dict = create_selected_summaries_dict ( selected_summaries ) summary_df = pd . concat ( frames , keys = keys , axis = 1 ) for key , value in selected_summaries_dict . items ( ) : _summary_file_name = os . path . join ( batch_dir , "summary_%s_%s.csv" % ( key , batch_name ) ) _summary_df = summary_df . iloc [ : , summary_df . columns . get_level_values ( 1 ) == value ] _header = _summary_df . columns _summary_df . to_csv ( _summary_file_name , sep = ";" ) logger . info ( "saved summary (%s) to:\n %s" % ( key , _summary_file_name ) ) logger . info ( "finished saving summaries" ) return summary_df | Writes the summaries to csv - files |
41,135 | def pick_summary_data ( key , summary_df , selected_summaries ) : selected_summaries_dict = create_selected_summaries_dict ( selected_summaries ) value = selected_summaries_dict [ key ] return summary_df . iloc [ : , summary_df . columns . get_level_values ( 1 ) == value ] | picks the selected pandas . DataFrame |
41,136 | def plot_summary_data ( ax , df , info_df , color_list , symbol_list , is_charge = False , plot_style = None ) : logger . debug ( "trying to plot summary data" ) if plot_style is None : logger . debug ( "no plot_style given, using default" ) plot_style = DEFAULT_PLOT_STYLE else : logger . debug ( "plot_style given" ) list_of_lines = list ( ) for datacol in df . columns : group = info_df . get_value ( datacol [ 0 ] , "groups" ) sub_group = info_df . get_value ( datacol [ 0 ] , "sub_groups" ) color = color_list [ group - 1 ] marker = symbol_list [ sub_group - 1 ] plot_style [ "marker" ] = marker plot_style [ "markeredgecolor" ] = color plot_style [ "color" ] = color plot_style [ "markerfacecolor" ] = 'none' logger . debug ( "selecting color for group: " + str ( color ) ) if not is_charge : plot_style [ "markerfacecolor" ] = color lines = ax . plot ( df [ datacol ] , ** plot_style ) list_of_lines . extend ( lines ) return list_of_lines , plot_style | creates a plot of the selected df - data in the given axes . |
41,137 | def init ( * args , ** kwargs ) : default_log_level = kwargs . pop ( "default_log_level" , None ) import cellpy . log as log log . setup_logging ( custom_log_dir = prms . Paths [ "filelogdir" ] , default_level = default_log_level ) b = Batch ( * args , ** kwargs ) return b | Returns an initialized instance of the Batch class |
41,138 | def debugging ( ) : print ( "In debugging" ) json_file = r"C:\Scripting\Processing\Cell" r"data\outdata\SiBEC\cellpy_batch_bec_exp02.json" b = init ( default_log_level = "DEBUG" ) b . load_info_df ( json_file ) print ( b . info_df . head ( ) ) b . export_raw = False b . export_cycles = False b . export_ica = False b . save_cellpy_file = True b . force_raw_file = False b . force_cellpy_file = True b . load_and_save_raw ( parent_level = "cellpydata" ) | This one I use for debugging ... |
41,139 | def save_info_df ( self ) : logger . debug ( "running save_info_df" ) info_df = self . info_df top_level_dict = { 'info_df' : info_df , 'metadata' : self . _prm_packer ( ) } jason_string = json . dumps ( top_level_dict , default = lambda info_df : json . loads ( info_df . to_json ( ) ) ) with open ( self . info_file , 'w' ) as outfile : outfile . write ( jason_string ) logger . info ( "Saved file to {}" . format ( self . info_file ) ) | Saves the DataFrame with info about the runs to a JSON file |
41,140 | def create_folder_structure ( self ) : self . info_file , directories = create_folder_structure ( self . project , self . name ) self . project_dir , self . batch_dir , self . raw_dir = directories logger . debug ( "create folders:" + str ( directories ) ) | Creates a folder structure based on the project and batch name . |
41,141 | def make_summaries ( self ) : self . summary_df = save_summaries ( self . frames , self . keys , self . selected_summaries , self . batch_dir , self . name ) logger . debug ( "made and saved summaries" ) | Make and save summary csv files each containing values from all cells |
41,142 | def plot_summaries ( self , show = False , save = True , figure_type = None ) : if not figure_type : figure_type = self . default_figure_type if not figure_type in self . default_figure_types : logger . debug ( "unknown figure type selected" ) figure_type = self . default_figure_type color_list , symbol_list = self . _create_colors_markers_list ( ) summary_df = self . summary_df selected_summaries = self . selected_summaries batch_dir = self . batch_dir batch_name = self . name fig , ax = plot_summary_figure ( self . info_df , summary_df , color_list , symbol_list , selected_summaries , batch_dir , batch_name , show = show , save = save , figure_type = figure_type ) self . figure [ figure_type ] = fig self . axes [ figure_type ] = ax | Plot summary graphs . |
41,143 | def link ( self ) : logging . info ( "[estblishing links]" ) logging . debug ( "checking and establishing link to data" ) cell_data_frames = dict ( ) counter = 0 errors = [ ] try : for indx , row in self . journal . pages . iterrows ( ) : counter += 1 l_txt = "starting to process file # %i (index=%s)" % ( counter , indx ) logging . debug ( l_txt ) logging . info ( f"linking cellpy-file: {row.cellpy_file_names}" ) if not os . path . isfile ( row . cellpy_file_names ) : logging . error ( "File does not exist" ) raise IOError cell_data_frames [ indx ] = cellreader . CellpyData ( initialize = True ) step_table = helper . look_up_and_get ( row . cellpy_file_names , "step_table" ) cell_data_frames [ indx ] . dataset . step_table = step_table self . cell_data_frames = cell_data_frames except IOError as e : logging . warning ( e ) e_txt = "links not established - try update" logging . warning ( e_txt ) errors . append ( e_txt ) self . errors [ "link" ] = errors | Ensure that an appropriate link to the cellpy - files exists for each cell . |
41,144 | def get_default_config_file_path ( init_filename = None ) : prm_dir = get_package_prm_dir ( ) if not init_filename : init_filename = DEFAULT_FILENAME src = os . path . join ( prm_dir , init_filename ) return src | gets the path to the default config - file |
41,145 | def get_user_dir_and_dst ( init_filename ) : user_dir = get_user_dir ( ) dst_file = os . path . join ( user_dir , init_filename ) return user_dir , dst_file | gets the name of the user directory and full prm filepath |
41,146 | def setup ( interactive , not_relative , dry_run , reset , root_dir , testuser ) : click . echo ( "[cellpy] (setup)" ) init_filename = create_custom_init_filename ( ) userdir , dst_file = get_user_dir_and_dst ( init_filename ) if testuser : if not root_dir : root_dir = os . getcwd ( ) click . echo ( f"[cellpy] (setup) DEV-MODE testuser: {testuser}" ) init_filename = create_custom_init_filename ( testuser ) userdir = root_dir dst_file = get_dst_file ( userdir , init_filename ) click . echo ( f"[cellpy] (setup) DEV-MODE userdir: {userdir}" ) click . echo ( f"[cellpy] (setup) DEV-MODE dst_file: {dst_file}" ) if not pathlib . Path ( dst_file ) . is_file ( ) : reset = True if interactive : click . echo ( " interactive mode " . center ( 80 , "-" ) ) _update_paths ( root_dir , not not_relative , dry_run = dry_run , reset = reset ) _write_config_file ( userdir , dst_file , init_filename , dry_run , ) _check ( ) else : _write_config_file ( userdir , dst_file , init_filename , dry_run ) _check ( ) | This will help you to setup cellpy . |
41,147 | def _parse_g_dir ( repo , gdirpath ) : for f in repo . get_contents ( gdirpath ) : if f . type == "dir" : for sf in repo . get_contents ( f . path ) : yield sf else : yield f | parses a repo directory two - levels deep |
41,148 | def look_up_and_get ( cellpy_file_name , table_name ) : root = '/CellpyData' table_path = '/' . join ( [ root , table_name ] ) logging . debug ( f"look_up_and_get({cellpy_file_name}, {table_name}" ) store = pd . HDFStore ( cellpy_file_name ) table = store . select ( table_path ) store . close ( ) return table | Extracts table from cellpy hdf5 - file . |
41,149 | def fix_groups ( groups ) : _groups = [ ] for g in groups : try : if not float ( g ) > 0 : _groups . append ( 1000 ) else : _groups . append ( int ( g ) ) except TypeError as e : logging . info ( "Error in reading group number (check your db)" ) logging . debug ( g ) logging . debug ( e ) _groups . append ( 1000 ) return _groups | Takes care of strange group numbers . |
41,150 | def create_selected_summaries_dict ( summaries_list ) : headers_summary = cellpy . parameters . internal_settings . get_headers_summary ( ) selected_summaries = dict ( ) for h in summaries_list : selected_summaries [ h ] = headers_summary [ h ] return selected_summaries | Creates a dictionary with summary column headers . |
41,151 | def generate_folder_names ( name , project ) : out_data_dir = prms . Paths . outdatadir project_dir = os . path . join ( out_data_dir , project ) batch_dir = os . path . join ( project_dir , name ) raw_dir = os . path . join ( batch_dir , "raw_data" ) return out_data_dir , project_dir , batch_dir , raw_dir | Creates sensible folder names . |
41,152 | def _interpolate_df_col ( df , x = None , y = None , new_x = None , dx = 10.0 , number_of_points = None , direction = 1 , ** kwargs ) : if x is None : x = df . columns [ 0 ] if y is None : y = df . columns [ 1 ] xs = df [ x ] . values ys = df [ y ] . values if direction > 0 : x_min = xs . min ( ) x_max = xs . max ( ) else : x_max = xs . min ( ) x_min = xs . max ( ) dx = - dx bounds_error = kwargs . pop ( "bounds_error" , False ) f = interpolate . interp1d ( xs , ys , bounds_error = bounds_error , ** kwargs ) if new_x is None : if number_of_points : new_x = np . linspace ( x_min , x_max , number_of_points ) else : new_x = np . arange ( x_min , x_max , dx ) new_y = f ( new_x ) new_df = pd . DataFrame ( { x : new_x , y : new_y } ) return new_df | Interpolate a column based on another column . |
41,153 | def _collect_capacity_curves ( data , direction = "charge" ) : minimum_v_value = np . Inf maximum_v_value = - np . Inf charge_list = [ ] cycles = data . get_cycle_numbers ( ) for cycle in cycles : try : if direction == "charge" : q , v = data . get_ccap ( cycle ) else : q , v = data . get_dcap ( cycle ) except NullData as e : logging . warning ( e ) break else : d = pd . DataFrame ( { "q" : q , "v" : v } ) d . name = cycle charge_list . append ( d ) v_min = v . min ( ) v_max = v . max ( ) if v_min < minimum_v_value : minimum_v_value = v_min if v_max > maximum_v_value : maximum_v_value = v_max return charge_list , cycles , minimum_v_value , maximum_v_value | Create a list of pandas . DataFrames one for each charge step . |
41,154 | def cell ( filename = None , mass = None , instrument = None , logging_mode = "INFO" , cycle_mode = None , auto_summary = True ) : from cellpy import log log . setup_logging ( default_level = logging_mode ) cellpy_instance = setup_cellpy_instance ( ) if instrument is not None : cellpy_instance . set_instrument ( instrument = instrument ) if cycle_mode is not None : cellpy_instance . cycle_mode = cycle_mode if filename is not None : filename = Path ( filename ) if filename . suffix in [ ".h5" , ".hdf5" , ".cellpy" , ".cpy" ] : logging . info ( f"Loading cellpy-file: {filename}" ) cellpy_instance . load ( filename ) else : logging . info ( f"Loading raw-file: {filename}" ) cellpy_instance . from_raw ( filename ) if mass is not None : logging . info ( "Setting mass" ) cellpy_instance . set_mass ( mass ) if auto_summary : logging . info ( "Creating step table" ) cellpy_instance . make_step_table ( ) logging . info ( "Creating summary data" ) cellpy_instance . make_summary ( ) logging . info ( "Created CellpyData object" ) return cellpy_instance | Create a CellpyData object |
41,155 | def load_and_save_resfile ( filename , outfile = None , outdir = None , mass = 1.00 ) : d = CellpyData ( ) if not outdir : outdir = prms . Paths [ "cellpydatadir" ] if not outfile : outfile = os . path . basename ( filename ) . split ( "." ) [ 0 ] + ".h5" outfile = os . path . join ( outdir , outfile ) print ( "filename:" , filename ) print ( "outfile:" , outfile ) print ( "outdir:" , outdir ) print ( "mass:" , mass , "mg" ) d . from_raw ( filename ) d . set_mass ( mass ) d . make_step_table ( ) d . make_summary ( ) d . save ( filename = outfile ) d . to_csv ( datadir = outdir , cycles = True , raw = True , summary = True ) return outfile | Load a raw data file and save it as cellpy - file . |
41,156 | def load_and_print_resfile ( filename , info_dict = None ) : if info_dict is None : info_dict = dict ( ) info_dict [ "mass" ] = 1.23 info_dict [ "nom_cap" ] = 3600 info_dict [ "tot_mass" ] = 2.33 d = CellpyData ( ) print ( "filename:" , filename ) print ( "info_dict in:" , end = ' ' ) print ( info_dict ) d . from_raw ( filename ) d . set_mass ( info_dict [ "mass" ] ) d . make_step_table ( ) d . make_summary ( ) for test in d . datasets : print ( "newtest" ) print ( test ) return info_dict | Load a raw data file and print information . |
41,157 | def set_raw_datadir ( self , directory = None ) : if directory is None : self . logger . info ( "no directory name given" ) return if not os . path . isdir ( directory ) : self . logger . info ( directory ) self . logger . info ( "directory does not exist" ) return self . raw_datadir = directory | Set the directory containing . res - files . |
41,158 | def set_cellpy_datadir ( self , directory = None ) : if directory is None : self . logger . info ( "no directory name given" ) return if not os . path . isdir ( directory ) : self . logger . info ( "directory does not exist" ) return self . cellpy_datadir = directory | Set the directory containing . hdf5 - files . |
41,159 | def _check_raw ( self , file_names , abort_on_missing = False ) : strip_file_names = True check_on = self . filestatuschecker if not self . _is_listtype ( file_names ) : file_names = [ file_names , ] ids = dict ( ) for f in file_names : self . logger . debug ( f"checking res file {f}" ) fid = FileID ( f ) if fid . name is None : warnings . warn ( f"file does not exist: {f}" ) if abort_on_missing : sys . exit ( - 1 ) else : if strip_file_names : name = os . path . basename ( f ) else : name = f if check_on == "size" : ids [ name ] = int ( fid . size ) elif check_on == "modified" : ids [ name ] = int ( fid . last_modified ) else : ids [ name ] = int ( fid . last_accessed ) return ids | Get the file - ids for the res_files . |
41,160 | def _check_cellpy_file ( self , filename ) : strip_filenames = True check_on = self . filestatuschecker self . logger . debug ( "checking cellpy-file" ) self . logger . debug ( filename ) if not os . path . isfile ( filename ) : self . logger . debug ( "cellpy-file does not exist" ) return None try : store = pd . HDFStore ( filename ) except Exception as e : self . logger . debug ( f"could not open cellpy-file ({e})" ) return None try : fidtable = store . select ( "CellpyData/fidtable" ) except KeyError : self . logger . warning ( "no fidtable -" " you should update your hdf5-file" ) fidtable = None finally : store . close ( ) if fidtable is not None : raw_data_files , raw_data_files_length = self . _convert2fid_list ( fidtable ) txt = "contains %i res-files" % ( len ( raw_data_files ) ) self . logger . debug ( txt ) ids = dict ( ) for fid in raw_data_files : full_name = fid . full_name size = fid . size mod = fid . last_modified self . logger . debug ( f"fileID information for: {full_name}" ) self . logger . debug ( f" modified: {mod}" ) self . logger . debug ( f" size: {size}" ) if strip_filenames : name = os . path . basename ( full_name ) else : name = full_name if check_on == "size" : ids [ name ] = int ( fid . size ) elif check_on == "modified" : ids [ name ] = int ( fid . last_modified ) else : ids [ name ] = int ( fid . last_accessed ) return ids else : return None | Get the file - ids for the cellpy_file . |
41,161 | def loadcell ( self , raw_files , cellpy_file = None , mass = None , summary_on_raw = False , summary_ir = True , summary_ocv = False , summary_end_v = True , only_summary = False , only_first = False , force_raw = False , use_cellpy_stat_file = None ) : self . logger . info ( "started loadcell" ) if cellpy_file is None : similar = False elif force_raw : similar = False else : similar = self . check_file_ids ( raw_files , cellpy_file ) self . logger . debug ( "checked if the files were similar" ) if only_summary : self . load_only_summary = True else : self . load_only_summary = False if not similar : self . logger . info ( "cellpy file(s) needs updating - loading raw" ) self . logger . debug ( raw_files ) self . from_raw ( raw_files ) self . logger . debug ( "loaded files" ) if self . status_datasets : if mass : self . set_mass ( mass ) if summary_on_raw : self . make_summary ( all_tests = False , find_ocv = summary_ocv , find_ir = summary_ir , find_end_voltage = summary_end_v , use_cellpy_stat_file = use_cellpy_stat_file ) else : self . logger . warning ( "Empty run!" ) else : self . load ( cellpy_file ) return self | Loads data for given cells . |
41,162 | def from_raw ( self , file_names = None , ** kwargs ) : if file_names : self . file_names = file_names if not isinstance ( file_names , ( list , tuple ) ) : self . file_names = [ file_names , ] raw_file_loader = self . loader set_number = 0 test = None counter = 0 self . logger . debug ( "start iterating through file(s)" ) for f in self . file_names : self . logger . debug ( "loading raw file:" ) self . logger . debug ( f"{f}" ) new_tests = raw_file_loader ( f , ** kwargs ) if new_tests : if test is not None : self . logger . debug ( "continuing reading files..." ) _test = self . _append ( test [ set_number ] , new_tests [ set_number ] ) if not _test : self . logger . warning ( f"EMPTY TEST: {f}" ) continue test [ set_number ] = _test self . logger . debug ( "added this test - started merging" ) for j in range ( len ( new_tests [ set_number ] . raw_data_files ) ) : raw_data_file = new_tests [ set_number ] . raw_data_files [ j ] file_size = new_tests [ set_number ] . raw_data_files_length [ j ] test [ set_number ] . raw_data_files . append ( raw_data_file ) test [ set_number ] . raw_data_files_length . append ( file_size ) counter += 1 if counter > 10 : self . logger . debug ( "ERROR? Too many files to merge" ) raise ValueError ( "Too many files to merge - " "could be a p2-p3 zip thing" ) else : self . logger . debug ( "getting data from first file" ) if new_tests [ set_number ] . no_data : self . logger . debug ( "NO DATA" ) else : test = new_tests else : self . logger . debug ( "NOTHING LOADED" ) self . logger . debug ( "finished loading the raw-files" ) test_exists = False if test : if test [ 0 ] . no_data : self . logging . debug ( "the first dataset (or only dataset) loaded from the raw data file is empty" ) else : test_exists = True if test_exists : if not prms . Reader . sorted_data : self . logger . debug ( "sorting data" ) test [ set_number ] = self . _sort_data ( test [ set_number ] ) self . datasets . append ( test [ set_number ] ) else : self . logger . warning ( "No new datasets added!" ) self . number_of_datasets = len ( self . datasets ) self . status_datasets = self . _validate_datasets ( ) self . _invent_a_name ( ) return self | Load a raw data - file . |
41,163 | def check ( self ) : if len ( self . status_datasets ) == 0 : return False if all ( self . status_datasets ) : return True return False | Returns False if no datasets exists or if one or more of the datasets are empty |
41,164 | def load ( self , cellpy_file , parent_level = "CellpyData" ) : try : self . logger . debug ( "loading cellpy-file (hdf5):" ) self . logger . debug ( cellpy_file ) new_datasets = self . _load_hdf5 ( cellpy_file , parent_level ) self . logger . debug ( "cellpy-file loaded" ) except AttributeError : new_datasets = [ ] self . logger . warning ( "This cellpy-file version is not supported by" "current reader (try to update cellpy)." ) if new_datasets : for dataset in new_datasets : self . datasets . append ( dataset ) else : self . logger . warning ( "Could not load" ) self . logger . warning ( str ( cellpy_file ) ) self . number_of_datasets = len ( self . datasets ) self . status_datasets = self . _validate_datasets ( ) self . _invent_a_name ( cellpy_file ) return self | Loads a cellpy file . |
41,165 | def _load_hdf5 ( self , filename , parent_level = "CellpyData" ) : if not os . path . isfile ( filename ) : self . logger . info ( f"file does not exist: {filename}" ) raise IOError store = pd . HDFStore ( filename ) required_keys = [ 'dfdata' , 'dfsummary' , 'info' ] required_keys = [ "/" + parent_level + "/" + _ for _ in required_keys ] for key in required_keys : if key not in store . keys ( ) : self . logger . info ( f"This hdf-file is not good enough - " f"at least one key is missing: {key}" ) raise Exception ( f"OH MY GOD! At least one crucial key" f"is missing {key}!" ) self . logger . debug ( f"Keys in current hdf5-file: {store.keys()}" ) data = DataSet ( ) if parent_level != "CellpyData" : self . logger . debug ( "Using non-default parent label for the " "hdf-store: {}" . format ( parent_level ) ) infotable = store . select ( parent_level + "/info" ) try : data . cellpy_file_version = self . _extract_from_dict ( infotable , "cellpy_file_version" ) except Exception as e : data . cellpy_file_version = 0 warnings . warn ( f"Unhandled exception raised: {e}" ) if data . cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION : raise WrongFileVersion if data . cellpy_file_version > CELLPY_FILE_VERSION : raise WrongFileVersion data . dfsummary = store . select ( parent_level + "/dfsummary" ) data . dfdata = store . select ( parent_level + "/dfdata" ) try : data . step_table = store . select ( parent_level + "/step_table" ) except Exception as e : self . logging . debug ( "could not get step_table from cellpy-file" ) data . step_table = pd . DataFrame ( ) warnings . warn ( f"Unhandled exception raised: {e}" ) try : fidtable = store . select ( parent_level + "/fidtable" ) fidtable_selected = True except Exception as e : self . logging . debug ( "could not get fid-table from cellpy-file" ) fidtable = [ ] warnings . warn ( "no fidtable - you should update your hdf5-file" ) fidtable_selected = False self . logger . debug ( " h5" ) newtests = [ ] data = self . _load_infotable ( data , infotable , filename ) if fidtable_selected : data . raw_data_files , data . raw_data_files_length = self . _convert2fid_list ( fidtable ) else : data . raw_data_files = None data . raw_data_files_length = None newtests . append ( data ) store . close ( ) return newtests | Load a cellpy - file . |
41,166 | def merge ( self , datasets = None , separate_datasets = False ) : self . logger . info ( "merging" ) if separate_datasets : warnings . warn ( "The option seperate_datasets=True is" "not implemented yet. Performing merging, but" "neglecting the option." ) else : if datasets is None : datasets = list ( range ( len ( self . datasets ) ) ) first = True for dataset_number in datasets : if first : dataset = self . datasets [ dataset_number ] first = False else : dataset = self . _append ( dataset , self . datasets [ dataset_number ] ) for raw_data_file , file_size in zip ( self . datasets [ dataset_number ] . raw_data_files , self . datasets [ dataset_number ] . raw_data_files_length ) : dataset . raw_data_files . append ( raw_data_file ) dataset . raw_data_files_length . append ( file_size ) self . datasets = [ dataset ] self . number_of_datasets = 1 return self | This function merges datasets into one set . |
41,167 | def print_step_table ( self , dataset_number = None ) : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return st = self . datasets [ dataset_number ] . step_table print ( st ) | Print the step table . |
41,168 | def load_step_specifications ( self , file_name , short = False , dataset_number = None ) : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return step_specs = pd . read_csv ( file_name , sep = prms . Reader . sep ) if "step" not in step_specs . columns : self . logger . info ( "step col is missing" ) raise IOError if "type" not in step_specs . columns : self . logger . info ( "type col is missing" ) raise IOError if not short and "cycle" not in step_specs . columns : self . logger . info ( "cycle col is missing" ) raise IOError self . make_step_table ( custom_step_definition = True , step_specifications = step_specs , short = short ) | Load a table that contains step - type definitions . |
41,169 | def sget_voltage ( self , cycle , step , set_number = None ) : time_00 = time . time ( ) set_number = self . _validate_dataset_number ( set_number ) if set_number is None : self . _report_empty_dataset ( ) return cycle_index_header = self . headers_normal . cycle_index_txt voltage_header = self . headers_normal . voltage_txt step_index_header = self . headers_normal . step_index_txt test = self . datasets [ set_number ] . dfdata if isinstance ( step , ( list , tuple ) ) : warnings . warn ( f"The varialbe step is a list." f"Should be an integer." f"{step}" ) step = step [ 0 ] c = test [ ( test [ cycle_index_header ] == cycle ) & ( test [ step_index_header ] == step ) ] self . logger . debug ( f"(dt: {(time.time() - time_00):4.2f}s)" ) if not self . is_empty ( c ) : v = c [ voltage_header ] return v else : return None | Returns voltage for cycle step . |
41,170 | def sget_steptime ( self , cycle , step , dataset_number = None ) : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return cycle_index_header = self . headers_normal . cycle_index_txt step_time_header = self . headers_normal . step_time_txt step_index_header = self . headers_normal . step_index_txt test = self . datasets [ dataset_number ] . dfdata if isinstance ( step , ( list , tuple ) ) : warnings . warn ( f"The varialbe step is a list." f"Should be an integer." f"{step}" ) step = step [ 0 ] c = test . loc [ ( test [ cycle_index_header ] == cycle ) & ( test [ step_index_header ] == step ) , : ] if not self . is_empty ( c ) : t = c [ step_time_header ] return t else : return None | Returns step time for cycle step . |
41,171 | def sget_timestamp ( self , cycle , step , dataset_number = None ) : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return cycle_index_header = self . headers_normal . cycle_index_txt timestamp_header = self . headers_normal . test_time_txt step_index_header = self . headers_normal . step_index_txt test = self . datasets [ dataset_number ] . dfdata if isinstance ( step , ( list , tuple ) ) : warnings . warn ( f"The varialbe step is a list." f"Should be an integer." f"{step}" ) step = step [ 0 ] c = test [ ( test [ cycle_index_header ] == cycle ) & ( test [ step_index_header ] == step ) ] if not self . is_empty ( c ) : t = c [ timestamp_header ] return t else : return pd . Series ( ) | Returns timestamp for cycle step . |
41,172 | def get_ocv ( self , cycles = None , direction = "up" , remove_first = False , interpolated = False , dx = None , number_of_points = None ) : if cycles is None : cycles = self . get_cycle_numbers ( ) else : if not isinstance ( cycles , ( list , tuple ) ) : cycles = [ cycles , ] else : remove_first = False ocv_rlx_id = "ocvrlx" if direction == "up" : ocv_rlx_id += "_up" elif direction == "down" : ocv_rlx_id += "_down" step_table = self . dataset . step_table dfdata = self . dataset . dfdata ocv_steps = step_table . loc [ step_table [ "cycle" ] . isin ( cycles ) , : ] ocv_steps = ocv_steps . loc [ ocv_steps . type . str . startswith ( ocv_rlx_id ) , : ] if remove_first : ocv_steps = ocv_steps . iloc [ 1 : , : ] step_time_label = self . headers_normal . step_time_txt voltage_label = self . headers_normal . voltage_txt cycle_label = self . headers_normal . cycle_index_txt step_label = self . headers_normal . step_index_txt selected_df = dfdata . where ( dfdata [ cycle_label ] . isin ( ocv_steps . cycle ) & dfdata [ step_label ] . isin ( ocv_steps . step ) ) . dropna ( ) selected_df = selected_df . loc [ : , [ cycle_label , step_label , step_time_label , voltage_label ] ] if interpolated : if dx is None and number_of_points is None : dx = prms . Reader . time_interpolation_step new_dfs = list ( ) groupby_list = [ cycle_label , step_label ] for name , group in selected_df . groupby ( groupby_list ) : new_group = _interpolate_df_col ( group , x = step_time_label , y = voltage_label , dx = dx , number_of_points = number_of_points , ) for i , j in zip ( groupby_list , name ) : new_group [ i ] = j new_dfs . append ( new_group ) selected_df = pd . concat ( new_dfs ) return selected_df | get the open curcuit voltage relaxation curves . |
41,173 | def get_number_of_cycles ( self , dataset_number = None , steptable = None ) : if steptable is None : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return d = self . datasets [ dataset_number ] . dfdata no_cycles = np . amax ( d [ self . headers_normal . cycle_index_txt ] ) else : no_cycles = np . amax ( steptable [ self . headers_step_table . cycle ] ) return no_cycles | Get the number of cycles in the test . |
41,174 | def get_cycle_numbers ( self , dataset_number = None , steptable = None ) : if steptable is None : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return d = self . datasets [ dataset_number ] . dfdata cycles = np . unique ( d [ self . headers_normal . cycle_index_txt ] ) else : cycles = np . unique ( steptable [ self . headers_step_table . cycle ] ) return cycles | Get a list containing all the cycle numbers in the test . |
41,175 | def get_converter_to_specific ( self , dataset = None , mass = None , to_unit = None , from_unit = None ) : if not dataset : dataset_number = self . _validate_dataset_number ( None ) if dataset_number is None : self . _report_empty_dataset ( ) return dataset = self . datasets [ dataset_number ] if not mass : mass = dataset . mass if not to_unit : to_unit_cap = self . cellpy_units [ "charge" ] to_unit_mass = self . cellpy_units [ "specific" ] to_unit = to_unit_cap / to_unit_mass if not from_unit : from_unit_cap = self . raw_units [ "charge" ] from_unit_mass = self . raw_units [ "mass" ] from_unit = from_unit_cap / from_unit_mass return from_unit / to_unit / mass | get the convertion values |
41,176 | def set_col_first ( df , col_names ) : column_headings = df . columns column_headings = column_headings . tolist ( ) try : for col_name in col_names : i = column_headings . index ( col_name ) column_headings . pop ( column_headings . index ( col_name ) ) column_headings . insert ( 0 , col_name ) finally : df = df . reindex ( columns = column_headings ) return df | set selected columns first in a pandas . DataFrame . |
41,177 | def get_summary ( self , dataset_number = None , use_dfsummary_made = False ) : dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return None test = self . get_dataset ( dataset_number ) if use_dfsummary_made : dfsummary_made = test . dfsummary_made else : dfsummary_made = True if not dfsummary_made : warnings . warn ( "Summary is not made yet" ) return None else : self . logger . info ( "returning datasets[test_no].dfsummary" ) return test . dfsummary | Retrieve summary returned as a pandas DataFrame . |
41,178 | def make_summary ( self , find_ocv = False , find_ir = False , find_end_voltage = False , use_cellpy_stat_file = None , all_tests = True , dataset_number = 0 , ensure_step_table = True , convert_date = False ) : if self . tester == "arbin" : convert_date = True if ensure_step_table is None : ensure_step_table = self . ensure_step_table if use_cellpy_stat_file is None : use_cellpy_stat_file = prms . Reader . use_cellpy_stat_file self . logger . debug ( "using use_cellpy_stat_file from prms" ) self . logger . debug ( f"use_cellpy_stat_file: {use_cellpy_stat_file}" ) if all_tests is True : for j in range ( len ( self . datasets ) ) : txt = "creating summary for file " test = self . datasets [ j ] if not self . _is_not_empty_dataset ( test ) : self . logger . info ( "empty test %i" % j ) return if isinstance ( test . loaded_from , ( list , tuple ) ) : for f in test . loaded_from : txt += f txt += "\n" else : txt += str ( test . loaded_from ) if not test . mass_given : txt += " mass for test %i is not given" % j txt += " setting it to %f mg" % test . mass self . logger . debug ( txt ) self . _make_summary ( j , find_ocv = find_ocv , find_ir = find_ir , find_end_voltage = find_end_voltage , use_cellpy_stat_file = use_cellpy_stat_file , ensure_step_table = ensure_step_table , convert_date = convert_date , ) else : self . logger . debug ( "creating summary for only one test" ) dataset_number = self . _validate_dataset_number ( dataset_number ) if dataset_number is None : self . _report_empty_dataset ( ) return self . _make_summary ( dataset_number , find_ocv = find_ocv , find_ir = find_ir , find_end_voltage = find_end_voltage , use_cellpy_stat_file = use_cellpy_stat_file , ensure_step_table = ensure_step_table , convert_date = convert_date , ) return self | Convenience function that makes a summary of the cycling data . |
41,179 | def single_html ( epub_file_path , html_out = sys . stdout , mathjax_version = None , numchapters = None , includes = None ) : epub = cnxepub . EPUB . from_file ( epub_file_path ) if len ( epub ) != 1 : raise Exception ( 'Expecting an epub with one book' ) package = epub [ 0 ] binder = cnxepub . adapt_package ( package ) partcount . update ( { } . fromkeys ( parts , 0 ) ) partcount [ 'book' ] += 1 html = cnxepub . SingleHTMLFormatter ( binder , includes = includes ) logger . debug ( 'Full binder: {}' . format ( cnxepub . model_to_tree ( binder ) ) ) if numchapters is not None : apply_numchapters ( html . get_node_type , binder , numchapters ) logger . debug ( 'Truncated Binder: {}' . format ( cnxepub . model_to_tree ( binder ) ) ) if mathjax_version : etree . SubElement ( html . head , 'script' , src = MATHJAX_URL . format ( mathjax_version = mathjax_version ) ) print ( str ( html ) , file = html_out ) if hasattr ( html_out , 'name' ) : html_out . close ( ) | Generate complete book HTML . |
41,180 | def _pack_prms ( ) : config_dict = { "Paths" : prms . Paths . to_dict ( ) , "FileNames" : prms . FileNames . to_dict ( ) , "Db" : prms . Db . to_dict ( ) , "DbCols" : prms . DbCols . to_dict ( ) , "DataSet" : prms . DataSet . to_dict ( ) , "Reader" : prms . Reader . to_dict ( ) , "Instruments" : prms . Instruments . to_dict ( ) , "Batch" : prms . Batch . to_dict ( ) , } return config_dict | if you introduce new save - able parameter dictionaries then you have to include them here |
41,181 | def _read_prm_file ( prm_filename ) : logger . debug ( "Reading config-file: %s" % prm_filename ) try : with open ( prm_filename , "r" ) as config_file : prm_dict = yaml . load ( config_file ) except yaml . YAMLError : raise ConfigFileNotRead else : _update_prms ( prm_dict ) | read the prm file |
41,182 | def _get_prm_file ( file_name = None , search_order = None ) : if file_name is not None : if os . path . isfile ( file_name ) : return file_name else : logger . info ( "Could not find the prm-file" ) default_name = prms . _prm_default_name prm_globtxt = prms . _prm_globtxt script_dir = os . path . abspath ( os . path . dirname ( __file__ ) ) search_path = dict ( ) search_path [ "curdir" ] = os . path . abspath ( os . path . dirname ( sys . argv [ 0 ] ) ) search_path [ "filedir" ] = script_dir search_path [ "userdir" ] = os . path . expanduser ( "~" ) if search_order is None : search_order = [ "userdir" , ] else : search_order = search_order prm_default = os . path . join ( script_dir , default_name ) search_dict = OrderedDict ( ) for key in search_order : search_dict [ key ] = [ None , None ] prm_directory = search_path [ key ] default_file = os . path . join ( prm_directory , default_name ) if os . path . isfile ( default_file ) : search_dict [ key ] [ 0 ] = default_file prm_globtxt_full = os . path . join ( prm_directory , prm_globtxt ) user_files = glob . glob ( prm_globtxt_full ) for f in user_files : if os . path . basename ( f ) != os . path . basename ( default_file ) : search_dict [ key ] [ 1 ] = f break prm_file = None for key , file_list in search_dict . items ( ) : if file_list [ - 1 ] : prm_file = file_list [ - 1 ] break else : if not prm_file : prm_file = file_list [ 0 ] if prm_file : prm_filename = prm_file else : prm_filename = prm_default return prm_filename | returns name of the prm file |
41,183 | def info ( ) : print ( "convenience function for listing prms" ) print ( type ( prms ) ) print ( prms . __name__ ) print ( f"prm file: {_get_prm_file()}" ) for key in prms . __dict__ : if isinstance ( prms . __dict__ [ key ] , box . Box ) : print ( ) print ( 80 * "=" ) print ( f"prms.{key}:" ) print ( 80 * "-" ) for subkey in prms . __dict__ [ key ] : print ( f"prms.{key}.{subkey} = " , f"{prms.__dict__[key][subkey]}" ) print ( 80 * "=" ) | this function will show only the box - type attributes and their content in the cellpy . prms module |
41,184 | def _replace_tex_math ( node , mml_url , mc_client = None , retry = 0 ) : math = node . attrib [ 'data-math' ] or node . text if math is None : return None eq = { } if mc_client : math_key = hashlib . md5 ( math . encode ( 'utf-8' ) ) . hexdigest ( ) eq = json . loads ( mc_client . get ( math_key ) or '{}' ) if not eq : res = requests . post ( mml_url , { 'math' : math . encode ( 'utf-8' ) , 'mathType' : 'TeX' , 'mml' : 'true' } ) if res : eq = res . json ( ) if mc_client : mc_client . set ( math_key , res . text ) if 'components' in eq and len ( eq [ 'components' ] ) > 0 : for component in eq [ 'components' ] : if component [ 'format' ] == 'mml' : mml = etree . fromstring ( component [ 'source' ] ) if node . tag . endswith ( 'span' ) : mml . set ( 'display' , 'inline' ) elif node . tag . endswith ( 'div' ) : mml . set ( 'display' , 'block' ) mml . tail = node . tail return mml else : logger . warning ( 'Retrying math TeX conversion: ' '{}' . format ( json . dumps ( eq , indent = 4 ) ) ) retry += 1 if retry < 2 : return _replace_tex_math ( node , mml_url , mc_client , retry ) return None | call mml - api service to replace TeX math in body of node with mathml |
41,185 | def exercise_callback_factory ( match , url_template , mc_client = None , token = None , mml_url = None ) : def _replace_exercises ( elem ) : item_code = elem . get ( 'href' ) [ len ( match ) : ] url = url_template . format ( itemCode = item_code ) exercise = { } if mc_client : mc_key = item_code + ( token or '' ) exercise = json . loads ( mc_client . get ( mc_key ) or '{}' ) if not exercise : if token : headers = { 'Authorization' : 'Bearer {}' . format ( token ) } res = requests . get ( url , headers = headers ) else : res = requests . get ( url ) if res : exercise = res . json ( ) if mc_client : mc_client . set ( mc_key , res . text ) if exercise [ 'total_count' ] == 0 : logger . warning ( 'MISSING EXERCISE: {}' . format ( url ) ) XHTML = '{{{}}}' . format ( HTML_DOCUMENT_NAMESPACES [ 'xhtml' ] ) missing = etree . Element ( XHTML + 'div' , { 'class' : 'missing-exercise' } , nsmap = HTML_DOCUMENT_NAMESPACES ) missing . text = 'MISSING EXERCISE: tag:{}' . format ( item_code ) nodes = [ missing ] else : html = EXERCISE_TEMPLATE . render ( data = exercise ) try : nodes = etree . fromstring ( '<div>{}</div>' . format ( html ) ) except etree . XMLSyntaxError : nodes = etree . HTML ( html ) [ 0 ] if mml_url : for node in nodes . xpath ( '//*[@data-math]' ) : mathml = _replace_tex_math ( node , mml_url , mc_client ) if mathml is not None : mparent = node . getparent ( ) mparent . replace ( node , mathml ) else : mathtext = node . get ( 'data-math' ) or node . text or '' logger . warning ( 'BAD TEX CONVERSION: "%s" URL: %s' % ( mathtext . encode ( 'utf-8' ) , url ) ) parent = elem . getparent ( ) if etree . QName ( parent . tag ) . localname == 'p' : elem = parent parent = elem . getparent ( ) parent . remove ( elem ) for child in nodes : parent . append ( child ) xpath = '//xhtml:a[contains(@href, "{}")]' . format ( match ) return ( xpath , _replace_exercises ) | Create a callback function to replace an exercise by fetching from a server . |
41,186 | def html_listify ( tree , root_xl_element , extensions , list_type = 'ol' ) : for node in tree : li_elm = etree . SubElement ( root_xl_element , 'li' ) if node [ 'id' ] not in extensions : span_elm = lxml . html . fragment_fromstring ( node [ 'title' ] , create_parent = 'span' ) li_elm . append ( span_elm ) else : a_elm = lxml . html . fragment_fromstring ( node [ 'title' ] , create_parent = 'a' ) a_elm . set ( 'href' , '' . join ( [ node [ 'id' ] , extensions [ node [ 'id' ] ] ] ) ) li_elm . append ( a_elm ) if node [ 'id' ] is not None and node [ 'id' ] != 'subcol' : li_elm . set ( 'cnx-archive-uri' , node [ 'id' ] ) if node [ 'shortId' ] is not None : li_elm . set ( 'cnx-archive-shortid' , node [ 'shortId' ] ) if 'contents' in node : elm = etree . SubElement ( li_elm , list_type ) html_listify ( node [ 'contents' ] , elm , extensions ) | Convert a node tree into an xhtml nested list - of - lists . |
41,187 | def _generate_ids ( self , document , content ) : existing_ids = content . xpath ( '//*/@id' ) elements = [ 'p' , 'dl' , 'dt' , 'dd' , 'table' , 'div' , 'section' , 'figure' , 'blockquote' , 'q' , 'code' , 'pre' , 'object' , 'img' , 'audio' , 'video' , ] elements_xpath = '|' . join ( [ './/{}|.//xhtml:{}' . format ( elem , elem ) for elem in elements ] ) data_types = [ 'equation' , 'list' , 'exercise' , 'rule' , 'example' , 'note' , 'footnote-number' , 'footnote-ref' , 'problem' , 'solution' , 'media' , 'proof' , 'statement' , 'commentary' ] data_types_xpath = '|' . join ( [ './/*[@data-type="{}"]' . format ( data_type ) for data_type in data_types ] ) xpath = '|' . join ( [ elements_xpath , data_types_xpath ] ) mapping = { } for node in content . xpath ( xpath , namespaces = HTML_DOCUMENT_NAMESPACES ) : old_id = node . attrib . get ( 'id' ) document_id = document . id . replace ( '_' , '' ) if old_id : new_id = 'auto_{}_{}' . format ( document_id , old_id ) else : random_number = random . randint ( 0 , 100000 ) new_id = 'auto_{}_{}' . format ( document_id , random_number ) while new_id in existing_ids : random_number = random . randint ( 0 , 100000 ) new_id = 'auto_{}_{}' . format ( document_id , random_number ) node . attrib [ 'id' ] = new_id if old_id : mapping [ old_id ] = new_id existing_ids . append ( new_id ) for a in content . xpath ( '//a[@href]|//xhtml:a[@href]' , namespaces = HTML_DOCUMENT_NAMESPACES ) : href = a . attrib [ 'href' ] if href . startswith ( '#' ) and href [ 1 : ] in mapping : a . attrib [ 'href' ] = '#{}' . format ( mapping [ href [ 1 : ] ] ) | Generate unique ids for html elements in page content so that it s possible to link to them . |
41,188 | def to_file ( package , directory ) : opf_filepath = os . path . join ( directory , package . name ) for name in ( 'contents' , 'resources' , ) : path = os . path . join ( directory , name ) if not os . path . exists ( path ) : os . mkdir ( path ) locations = { } for item in package : if item . media_type == 'application/xhtml+xml' : base = os . path . join ( directory , 'contents' ) else : base = os . path . join ( directory , 'resources' ) filename = item . name filepath = os . path . join ( base , filename ) locations [ item ] = os . path . relpath ( filepath , directory ) with open ( filepath , 'wb' ) as item_file : item_file . write ( item . data . read ( ) ) template = jinja2 . Template ( OPF_TEMPLATE , trim_blocks = True , lstrip_blocks = True ) with open ( opf_filepath , 'wb' ) as opf_file : opf = template . render ( package = package , locations = locations ) if not isinstance ( opf , bytes ) : opf = opf . encode ( 'utf-8' ) opf_file . write ( opf ) return opf_filepath | Write the package to the given directory . Returns the OPF filename . |
41,189 | def from_file ( self , file_name = None ) : file_name = self . _check_file_name ( file_name ) with open ( file_name , 'r' ) as infile : top_level_dict = json . load ( infile ) pages_dict = top_level_dict [ 'info_df' ] pages = pd . DataFrame ( pages_dict ) self . pages = pages self . file_name = file_name self . _prm_packer ( top_level_dict [ 'metadata' ] ) self . generate_folder_names ( ) self . paginate ( ) | Loads a DataFrame with all the needed info about the experiment |
41,190 | def to_file ( self , file_name = None ) : file_name = self . _check_file_name ( file_name ) pages = self . pages top_level_dict = { 'info_df' : pages , 'metadata' : self . _prm_packer ( ) } jason_string = json . dumps ( top_level_dict , default = lambda info_df : json . loads ( info_df . to_json ( ) ) ) self . paginate ( ) with open ( file_name , 'w' ) as outfile : outfile . write ( jason_string ) self . file_name = file_name logging . info ( "Saved file to {}" . format ( file_name ) ) | Saves a DataFrame with all the needed info about the experiment |
41,191 | def generate_folder_names ( self ) : self . project_dir = os . path . join ( prms . Paths . outdatadir , self . project ) self . batch_dir = os . path . join ( self . project_dir , self . name ) self . raw_dir = os . path . join ( self . batch_dir , "raw_data" ) | Set appropriate folder names . |
41,192 | def paginate ( self ) : project_dir = self . project_dir raw_dir = self . raw_dir batch_dir = self . batch_dir if project_dir is None : raise UnderDefined ( "no project directory defined" ) if raw_dir is None : raise UnderDefined ( "no raw directory defined" ) if batch_dir is None : raise UnderDefined ( "no batcb directory defined" ) if not os . path . isdir ( project_dir ) : os . mkdir ( project_dir ) logging . info ( f"created folder {project_dir}" ) if not os . path . isdir ( batch_dir ) : os . mkdir ( batch_dir ) logging . info ( f"created folder {batch_dir}" ) if not os . path . isdir ( raw_dir ) : os . mkdir ( raw_dir ) logging . info ( f"created folder {raw_dir}" ) return project_dir , batch_dir , raw_dir | Make folders where we would like to put results etc . |
41,193 | def generate_file_name ( self ) : if not self . project : raise UnderDefined ( "project name not given" ) out_data_dir = prms . Paths . outdatadir project_dir = os . path . join ( out_data_dir , self . project ) file_name = "cellpy_batch_%s.json" % self . name self . file_name = os . path . join ( project_dir , file_name ) | generate a suitable file name for the experiment |
41,194 | def info ( self ) : print ( "Sorry, but I don't have much to share." ) print ( "This is me:" ) print ( self ) print ( "And these are the experiments assigned to me:" ) print ( self . experiments ) | Delivers some info to you about the class . |
41,195 | def assign ( self , experiment ) : self . experiments . append ( experiment ) self . farms . append ( empty_farm ) | Assign an experiment . |
41,196 | def flatten_model ( model ) : yield model if isinstance ( model , ( TranslucentBinder , Binder , ) ) : for m in model : for x in flatten_model ( m ) : yield x | Flatten a model to a list of models . This is used to flatten a Binder ish model down to a list of contained models . |
41,197 | def _discover_uri_type ( uri ) : parsed_uri = urlparse ( uri ) if not parsed_uri . netloc : if parsed_uri . scheme == 'data' : type_ = INLINE_REFERENCE_TYPE else : type_ = INTERNAL_REFERENCE_TYPE else : type_ = EXTERNAL_REFERENCE_TYPE return type_ | Given a uri determine if it is internal or external . |
41,198 | def _parse_references ( xml ) : references = [ ] ref_finder = HTMLReferenceFinder ( xml ) for elm , uri_attr in ref_finder : type_ = _discover_uri_type ( elm . get ( uri_attr ) ) references . append ( Reference ( elm , type_ , uri_attr ) ) return references | Parse the references to Reference instances . |
41,199 | def _set_uri_from_bound_model ( self ) : value = self . _uri_template . format ( self . _bound_model . id ) self . elm . set ( self . _uri_attr , value ) | Using the bound model set the uri . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.