docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Executes a prepared CQL (Cassandra Query Language) statement by passing an id token and a list of variables to bind and returns a CqlResult containing the results. Parameters: - itemId - values
def execute_prepared_cql_query(self, itemId, values): self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_prepared_cql_query(itemId, values) return d
673,020
@deprecated This is now a no-op. Please use the CQL3 specific methods instead. Parameters: - version
def set_cql_version(self, version): self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_set_cql_version(version) return d
673,024
Return the topmost item located at ``pos`` (x, y). Parameters: - selected: if False returns first non-selected item - exclude: if specified don't check for these items
def get_item_at_point_exclude(self, pos, selected=True, exclude=None): items = self._qtree.find_intersect((pos[0], pos[1], 1, 1)) for item in self._canvas.sort(items, reverse=True): if not selected and item in self.selected_items: continue # skip selected items if item in exclude: continue v2i = self.get_matrix_v2i(item) ix, iy = v2i.transform_point(*pos) if item.point((ix, iy)) < 0.5: return item return None
675,178
Initialize a wrapper for the array Args: ary: (list-like, or ArrayWrapper)
def __init__(self, ary): self._dirty = True self._typed = None if isinstance(ary, (list, tuple, collections.Sequence)): self.data = ary elif isinstance(ary, ArrayWrapper): self.data = ary.data else: raise TypeError("Invalid value given to array validator: {0}" .format(ary)) logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary))
676,192
Create an object directly from a JSON string. Applies general validation after creating the object to check whether all required fields are present. Args: jsonmsg (str): An object encoded as a JSON string Returns: An object of the generated type Raises: ValidationError: if `jsonmsg` does not match the schema `cls` was generated from
def from_json(cls, jsonmsg): import json msg = json.loads(jsonmsg) obj = cls(**msg) obj.validate() return obj
676,241
Gets technical analysis features from market data JSONs Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: Dict of market features and their values
def eval_features(json): return {'close' : json[-1]['close'], 'sma' : SMA.eval_from_json(json), 'rsi' : RSI.eval_from_json(json), 'so' : SO.eval_from_json(json), 'obv' : OBV.eval_from_json(json)}
676,486
Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code.
def target_code_to_name(code): TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()} return TARGET_NAMES[code]
676,487
Initializes a machine learning model Args: x: Pandas DataFrame, X axis of features y: Pandas Series, Y axis of targets model_type: Machine Learning model to use Valid values: 'random_forest' seed: Random state to use when splitting sets and creating the model **kwargs: Scikit Learn's RandomForestClassifier kwargs Returns: Trained model instance of model_type
def setup_model(x, y, model_type='random_forest', seed=None, **kwargs): assert len(x) > 1 and len(y) > 1, 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y)) sets = namedtuple('Datasets', ['train', 'test']) x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=seed, shuffle=False) x = sets(x_train, x_test) y = sets(y_train, y_test) if model_type == 'random_forest' or model_type == 'rf': model = rf.RandomForest(x, y, random_state=seed, **kwargs) elif model_type == 'deep_neural_network' or model_type == 'dnn': model = dnn.DeepNeuralNetwork(x, y, **kwargs) else: raise ValueError('Invalid model type kwarg') return model
676,488
Parses market data JSON for technical analysis indicators Args: partition: Int of how many dates to take into consideration when evaluating technical analysis indicators. Returns: Pandas DataFrame instance with columns as numpy.float32 features.
def set_features(self, partition=1): if len(self.json) < partition + 1: raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition)) data = [] for offset in range(len(self.json) - partition): json = self.json[offset : offset + partition] data.append(eval_features(json)) return pd.DataFrame(data=data, dtype=np.float32)
676,491
Inits a Random Forest Classifier with a market attribute Args: **kwargs: Scikit Learn's RandomForestClassifier kwargs
def __init__(self, features, targets, **kwargs): # Init model super().__init__(**kwargs) # Set axes self.features = features self.targets = targets # Train model self.fit(features.train, targets.train)
676,495
Evaluates OBV Args: curr: Dict of current volume and close prev: Dict of previous OBV and close Returns: Float of OBV
def eval_algorithm(curr, prev): if curr['close'] > prev['close']: v = curr['volume'] elif curr['close'] < prev['close']: v = curr['volume'] * -1 else: v = 0 return prev['obv'] + v
676,522
Evaluates OBV from JSON (typically Poloniex API response) Args: json: List of dates where each entry is a dict of raw market data. Returns: Float of OBV
def eval_from_json(json): closes = poloniex.get_attribute(json, 'close') volumes = poloniex.get_attribute(json, 'volume') obv = 0 for date in range(1, len(json)): curr = {'close': closes[date], 'volume': volumes[date]} prev = {'close': closes[date - 1], 'obv': obv} obv = OBV.eval_algorithm(curr, prev) return obv
676,523
Evaluates the RS variable in RSI algorithm Args: gains: List of price gains. losses: List of prices losses. Returns: Float of average gains over average losses.
def eval_rs(gains, losses): # Number of days that the data was collected through count = len(gains) + len(losses) avg_gains = stats.avg(gains, count=count) if gains else 1 avg_losses = stats.avg(losses,count=count) if losses else 1 if avg_losses == 0: return avg_gains else: return avg_gains / avg_losses
676,596
Evaluates RSI from JSON (typically Poloniex API response) Args: json: List of dates where each entry is a dict of raw market data. Returns: Float between 0 and 100, momentum indicator of a market measuring the speed and change of price movements.
def eval_from_json(json): changes = poloniex.get_gains_losses(poloniex.parse_changes(json)) return RSI.eval_algorithm(changes['gains'], changes['losses'])
676,597
Converts a JSON to a URL by the Poloniex API Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. symbol: String of currency pair, like a ticker symbol. Returns: String URL to Poloniex API representing the given JSON.
def json_to_url(json, symbol): start = json[0]['date'] end = json[-1]['date'] diff = end - start # Get period by a ratio from calculated period to valid periods # Ratio closest to 1 is the period # Valid values: 300, 900, 1800, 7200, 14400, 86400 periods = [300, 900, 1800, 7200, 14400, 86400] diffs = {} for p in periods: diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio period = min(diffs, key=diffs.get) # Find closest period url = ('https://poloniex.com/public?command' '=returnChartData&currencyPair={0}&start={1}' '&end={2}&period={3}').format(symbol, start, end, period) return url
676,606
Gets price changes from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: List of floats of price changes between entries in JSON.
def parse_changes(json): changes = [] dates = len(json) for date in range(1, dates): last_close = json[date - 1]['close'] now_close = json[date]['close'] changes.append(now_close - last_close) logger.debug('Market Changes (from JSON):\n{0}'.format(changes)) return changes
676,608
Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive.
def get_gains_losses(changes): res = {'gains': [], 'losses': []} for change in changes: if change > 0: res['gains'].append(change) else: res['losses'].append(change * -1) logger.debug('Gains: {0}'.format(res['gains'])) logger.debug('Losses: {0}'.format(res['losses'])) return res
676,609
Gets the values of an attribute from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. attr: String of attribute in JSON file to collect. Returns: List of values of specified attribute from JSON
def get_attribute(json, attr): res = [json[entry][attr] for entry, _ in enumerate(json)] logger.debug('{0}s (from JSON):\n{1}'.format(attr, res)) return res
676,610
Evaluates the SO algorithm Args: closing: Float of current closing price. low: Float of lowest low closing price throughout some duration. high: Float of highest high closing price throughout some duration. Returns: Float SO between 0 and 100.
def eval_algorithm(closing, low, high): if high - low == 0: # High and low are the same, zero division error return 100 * (closing - low) else: return 100 * (closing - low) / (high - low)
676,629
Evaluates SO from JSON (typically Poloniex API response) Args: json: List of dates where each entry is a dict of raw market data. Returns: Float SO between 0 and 100.
def eval_from_json(json): close = json[-1]['close'] # Latest closing price low = min(poloniex.get_attribute(json, 'low')) # Lowest low high = max(poloniex.get_attribute(json, 'high')) # Highest high return SO.eval_algorithm(close, low, high)
676,630
Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count.
def avg(vals, count=None): sum = 0 for v in vals: sum += v if count is None: count = len(vals) return float(sum) / count
676,633
Converts date arguments to a Delorean instance in UTC Args: year: int between 1 and 9999. month: int between 1 and 12. day: int between 1 and 31. Returns: Delorean instance in UTC of date.
def date_to_delorean(year, month, day): return Delorean(datetime=dt(year, month, day), timezone='UTC')
676,648
Converts a date to epoch in UTC Args: year: int between 1 and 9999. month: int between 1 and 12. day: int between 1 and 31. Returns: Int epoch in UTC from date.
def date_to_epoch(year, month, day): return int(date_to_delorean(year, month, day).epoch)
676,649
Load a raw data-file Args: file_name (path) Returns: loaded test
def load(self, file_name): new_rundata = self.loader(file_name) new_rundata = self.inspect(new_rundata) return new_rundata
676,817
Loads data from biologics .mpr files. Args: file_name (str): path to .res file. bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c) to skip loading. Returns: new_tests (list of data objects)
def loader(self, file_name, bad_steps=None, **kwargs): new_tests = [] if not os.path.isfile(file_name): self.logger.info("Missing file_\n %s" % file_name) return None filesize = os.path.getsize(file_name) hfilesize = humanize_bytes(filesize) txt = "Filesize: %i (%s)" % (filesize, hfilesize) self.logger.debug(txt) # creating temporary file and connection temp_dir = tempfile.gettempdir() temp_filename = os.path.join(temp_dir, os.path.basename(file_name)) shutil.copy2(file_name, temp_dir) self.logger.debug("tmp file: %s" % temp_filename) self.logger.debug("HERE WE LOAD THE DATA") data = DataSet() fid = FileID(file_name) # div parameters and information (probably load this last) test_no = 1 data.test_no = test_no data.loaded_from = file_name # some overall prms data.channel_index = None data.channel_number = None data.creator = None data.item_ID = None data.schedule_file_name = None data.start_datetime = None data.test_ID = None data.test_name = None data.raw_data_files.append(fid) # --------- read raw-data (normal-data) ------------------------- self.logger.debug("reading raw-data") self.mpr_data = None self.mpr_log = None self.mpr_settings = None self._load_mpr_data(temp_filename, bad_steps) length_of_test = self.mpr_data.shape[0] self.logger.debug(f"length of test: {length_of_test}") self.logger.debug("renaming columns") self._rename_headers() # --------- stats-data (summary-data) ------------------------- summary_df = self._create_summary_data() if summary_df.empty: txt = "\nCould not find any summary (stats-file)!" txt += " (summary_df.empty = True)" txt += "\n -> issue make_summary(use_cellpy_stat_file=False)" warnings.warn(txt) data.dfsummary = summary_df data.dfdata = self.mpr_data data.raw_data_files_length.append(length_of_test) new_tests.append(data) self._clean_up(temp_filename) return new_tests
676,829
run dumber (once pr. engine) Args: dumper: dumper to run (function or method). The dumper takes the attributes experiments, farms, and barn as input. It does not return anything. But can, if the dumper designer feels in a bad and nasty mood, modify the input objects (for example experiments).
def run_dumper(self, dumper): logging.debug("start dumper::") dumper( experiments=self.experiments, farms=self.farms, barn=self.barn, engine=self.current_engine, ) logging.debug("::dumper ended")
676,865
Function for dumping values from a file. Should only be used by developers. Args: file_name: name of the file headers: list of headers to pick default: ["Discharge_Capacity", "Charge_Capacity"] Returns: pandas.DataFrame
def _iterdump(self, file_name, headers=None): if headers is None: headers = ["Discharge_Capacity", "Charge_Capacity"] step_txt = self.headers_normal['step_index_txt'] point_txt = self.headers_normal['data_point_txt'] cycle_txt = self.headers_normal['cycle_index_txt'] self.logger.debug("iterating through file: %s" % file_name) if not os.path.isfile(file_name): print("Missing file_\n %s" % file_name) filesize = os.path.getsize(file_name) hfilesize = humanize_bytes(filesize) txt = "Filesize: %i (%s)" % (filesize, hfilesize) self.logger.info(txt) table_name_global = TABLE_NAMES["global"] table_name_stats = TABLE_NAMES["statistic"] table_name_normal = TABLE_NAMES["normal"] # creating temporary file and connection temp_dir = tempfile.gettempdir() temp_filename = os.path.join(temp_dir, os.path.basename(file_name)) shutil.copy2(file_name, temp_dir) constr = self.__get_res_connector(temp_filename) if use_ado: conn = dbloader.connect(constr) else: conn = dbloader.connect(constr, autocommit=True) self.logger.debug("tmp file: %s" % temp_filename) self.logger.debug("constr str: %s" % constr) # --------- read global-data ------------------------------------ self.logger.debug("reading global data table") sql = "select * from %s" % table_name_global global_data_df = pd.read_sql_query(sql, conn) # col_names = list(global_data_df.columns.values) self.logger.debug("sql statement: %s" % sql) tests = global_data_df[self.headers_normal['test_id_txt']] number_of_sets = len(tests) self.logger.debug("number of datasets: %i" % number_of_sets) self.logger.debug("only selecting first test") test_no = 0 self.logger.debug("setting data for test number %i" % test_no) loaded_from = file_name # fid = FileID(file_name) start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no] test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no]) # OBS test_name = global_data_df[self.headers_global['test_name_txt']][test_no] # --------- read raw-data (normal-data) ------------------------- self.logger.debug("reading raw-data") columns = ["Data_Point", "Step_Index", "Cycle_Index"] columns.extend(headers) columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns) sql_1 = "select %s " % columns_txt sql_2 = "from %s " % table_name_normal sql_3 = "where %s=%s " % (self.headers_normal['test_id_txt'], test_ID) sql_5 = "order by %s" % self.headers_normal['data_point_txt'] import time info_list = [] info_header = ["cycle", "row_count", "start_point", "end_point"] info_header.extend(headers) self.logger.info(" ".join(info_header)) self.logger.info("-------------------------------------------------") for cycle_number in range(1, 2000): t1 = time.time() self.logger.debug("picking cycle %i" % cycle_number) sql_4 = "AND %s=%i " % (cycle_txt, cycle_number) sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5 self.logger.debug("sql statement: %s" % sql) normal_df = pd.read_sql_query(sql, conn) t2 = time.time() dt = t2 - t1 self.logger.debug("time: %f" % dt) if normal_df.empty: self.logger.debug("reached the end") break row_count, _ = normal_df.shape start_point = normal_df[point_txt].min() end_point = normal_df[point_txt].max() last = normal_df.iloc[-1, :] step_list = [cycle_number, row_count, start_point, end_point] step_list.extend([last[x] for x in headers]) info_list.append(step_list) self._clean_up_loadres(None, conn, temp_filename) info_dict = pd.DataFrame(info_list, columns=info_header) return info_dict
676,908
Loads data from arbin .res files. Args: file_name (str): path to .res file. bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c) to skip loading. Returns: new_tests (list of data objects)
def loader(self, file_name, bad_steps=None, **kwargs): # TODO: @jepe - insert kwargs - current chunk, only normal data, etc new_tests = [] if not os.path.isfile(file_name): self.logger.info("Missing file_\n %s" % file_name) return None self.logger.debug("in loader") self.logger.debug("filename: %s" % file_name) filesize = os.path.getsize(file_name) hfilesize = humanize_bytes(filesize) txt = "Filesize: %i (%s)" % (filesize, hfilesize) self.logger.debug(txt) if filesize > prms.Instruments["max_res_filesize"] and not prms.Reader["load_only_summary"]: error_message = "\nERROR (loader):\n" error_message += "%s > %s - File is too big!\n" % ( hfilesize, humanize_bytes(prms.Instruments["max_res_filesize"])) error_message += "(edit prms.Instruments['max_res_filesize'])\n" print(error_message) return None table_name_global = TABLE_NAMES["global"] table_name_stats = TABLE_NAMES["statistic"] table_name_normal = TABLE_NAMES["normal"] # creating temporary file and connection temp_dir = tempfile.gettempdir() temp_filename = os.path.join(temp_dir, os.path.basename(file_name)) shutil.copy2(file_name, temp_dir) self.logger.debug("tmp file: %s" % temp_filename) use_mdbtools = False if use_subprocess: use_mdbtools = True if is_posix: use_mdbtools = True # windows with same python bit as windows bit (the ideal case) if not use_mdbtools: constr = self.__get_res_connector(temp_filename) if use_ado: conn = dbloader.connect(constr) else: conn = dbloader.connect(constr, autocommit=True) self.logger.debug("constr str: %s" % constr) self.logger.debug("reading global data table") sql = "select * from %s" % table_name_global self.logger.debug("sql statement: %s" % sql) global_data_df = pd.read_sql_query(sql, conn) # col_names = list(global_data_df.columns.values) else: import subprocess if is_posix: if is_macos: self.logger.debug("\nMAC OSX USING MDBTOOLS") else: self.logger.debug("\nPOSIX USING MDBTOOLS") else: self.logger.debug("\nWINDOWS USING MDBTOOLS-WIN") # creating tmp-filenames temp_csv_filename_global = os.path.join(temp_dir, "global_tmp.csv") temp_csv_filename_normal = os.path.join(temp_dir, "normal_tmp.csv") temp_csv_filename_stats = os.path.join(temp_dir, "stats_tmp.csv") # making the cmds mdb_prms = [(table_name_global, temp_csv_filename_global), (table_name_normal, temp_csv_filename_normal), (table_name_stats, temp_csv_filename_stats)] # executing cmds for table_name, tmp_file in mdb_prms: with open(tmp_file, "w") as f: subprocess.call([sub_process_path, temp_filename, table_name], stdout=f) self.logger.debug(f"ran mdb-export {str(f)} {table_name}") # use pandas to load in the data global_data_df = pd.read_csv(temp_csv_filename_global) tests = global_data_df[self.headers_normal['test_id_txt']] # OBS number_of_sets = len(tests) self.logger.debug("number of datasets: %i" % number_of_sets) for counter, test_no in enumerate(range(number_of_sets)): if counter > 0: self.logger.warning("***MULTITEST-FILE (not recommended)") if not ALLOW_MULTI_TEST_FILE: break data = DataSet() data.test_no = test_no data.loaded_from = file_name fid = FileID(file_name) # data.parent_filename = os.path.basename(file_name)# name of the .res file it is loaded from data.channel_index = int(global_data_df[self.headers_global['channel_index_txt']][test_no]) data.channel_number = int(global_data_df[self.headers_global['channel_number_txt']][test_no]) data.creator = global_data_df[self.headers_global['creator_txt']][test_no] data.item_ID = global_data_df[self.headers_global['item_id_txt']][test_no] data.schedule_file_name = global_data_df[self.headers_global['schedule_file_name_txt']][test_no] data.start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no] data.test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no]) # OBS data.test_name = global_data_df[self.headers_global['test_name_txt']][test_no] data.raw_data_files.append(fid) self.logger.debug("reading raw-data") if not use_mdbtools: # --------- read raw-data (normal-data) ------------------------- length_of_test, normal_df = self._load_res_normal_table(conn, data.test_ID, bad_steps) # --------- read stats-data (summary-data) ---------------------- sql = "select * from %s where %s=%s order by %s" % (table_name_stats, self.headers_normal['test_id_txt'], data.test_ID, self.headers_normal['data_point_txt']) summary_df = pd.read_sql_query(sql, conn) if counter > number_of_sets: self._clean_up_loadres(None, conn, temp_filename) else: normal_df = pd.read_csv(temp_csv_filename_normal) # filter on test ID normal_df = normal_df[normal_df[self.headers_normal['test_id_txt']] == data.test_ID] # sort on data point if prms._sort_if_subprocess: normal_df = normal_df.sort_values(self.headers_normal['data_point_txt']) length_of_test = normal_df.shape[0] summary_df = pd.read_csv(temp_csv_filename_stats) # clean up for f in [temp_filename, temp_csv_filename_stats, temp_csv_filename_normal, temp_csv_filename_global]: if os.path.isfile(f): try: os.remove(f) except WindowsError as e: self.logger.warning( f"could not remove tmp-file\n{f} {e}" ) if summary_df.empty and prms.Reader.use_cellpy_stat_file: txt = "\nCould not find any summary (stats-file)!" txt += "\n -> issue make_summary(use_cellpy_stat_file=False)" logging.debug(txt) # normal_df = normal_df.set_index("Data_Point") data.dfsummary = summary_df data.dfdata = normal_df data.raw_data_files_length.append(length_of_test) new_tests.append(data) return new_tests
676,909
Create a pandas DataFrame with the info needed for ``cellpy`` to load the runs. Args: batch_name (str): Name of the batch. batch_col (str): The column where the batch name is in the db. reader (method): the db-loader method. reader_label (str): the label for the db-loader (if db-loader method is not given) Returns: info_df (pandas DataFrame)
def make_df_from_batch(batch_name, batch_col="b01", reader=None, reader_label=None): batch_name = batch_name batch_col = batch_col logger.debug(f"batch_name, batch_col: {batch_name}, {batch_col}") if reader is None: reader_obj = get_db_reader(reader_label) reader = reader_obj() srnos = reader.select_batch(batch_name, batch_col) logger.debug("srnos:" + str(srnos)) info_dict = _create_info_dict(reader, srnos) info_df = pd.DataFrame(info_dict) info_df = info_df.sort_values(["groups", "filenames"]) info_df = _make_unique_groups(info_df) info_df["labels"] = info_df["filenames"].apply(create_labels) info_df.set_index("filenames", inplace=True) return info_df
676,917
Writes the summaries to csv-files Args: frames: list of ``cellpy`` summary DataFrames keys: list of indexes (typically run-names) for the different runs selected_summaries: list defining which summary data to save batch_dir: directory to save to batch_name: the batch name (will be used for making the file-name(s)) Returns: a pandas DataFrame with your selected summaries.
def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name): if not frames: logger.info("Could save summaries - no summaries to save!") logger.info("You have no frames - aborting") return None if not keys: logger.info("Could save summaries - no summaries to save!") logger.info("You have no keys - aborting") return None selected_summaries_dict = create_selected_summaries_dict(selected_summaries) summary_df = pd.concat(frames, keys=keys, axis=1) # saving the selected summaries for key, value in selected_summaries_dict.items(): _summary_file_name = os.path.join(batch_dir, "summary_%s_%s.csv" % ( key, batch_name)) _summary_df = summary_df.iloc[:, summary_df.columns.get_level_values(1) == value] # include function to tweak headers here (need to learn MultiIndex) _header = _summary_df.columns _summary_df.to_csv(_summary_file_name, sep=";") logger.info( "saved summary (%s) to:\n %s" % (key, _summary_file_name)) logger.info("finished saving summaries") return summary_df
676,920
Exports dQ/dV data from a CellpyData instance. Args: cell_data: CellpyData instance savedir: path to the folder where the files should be saved sep: separator for the .csv-files. last_cycle: only export up to this cycle (if not None)
def export_dqdv(cell_data, savedir, sep, last_cycle=None): logger.debug("exporting dqdv") filename = cell_data.dataset.loaded_from no_merged_sets = "" firstname, extension = os.path.splitext(filename) firstname += no_merged_sets if savedir: firstname = os.path.join(savedir, os.path.basename(firstname)) logger.debug(f"savedir is true: {firstname}") outname_charge = firstname + "_dqdv_charge.csv" outname_discharge = firstname + "_dqdv_discharge.csv" list_of_cycles = cell_data.get_cycle_numbers() number_of_cycles = len(list_of_cycles) logger.debug("%s: you have %i cycles" % (filename, number_of_cycles)) # extracting charge out_data = _extract_dqdv(cell_data, cell_data.get_ccap, last_cycle) logger.debug("extracted ica for charge") try: _save_multi(data=out_data, file_name=outname_charge, sep=sep) except ExportFailed: logger.info("could not export ica for charge") else: logger.debug("saved ica for charge") # extracting discharge out_data = _extract_dqdv(cell_data, cell_data.get_dcap, last_cycle) logger.debug("extracxted ica for discharge") try: _save_multi(data=out_data, file_name=outname_discharge, sep=sep) except ExportFailed: logger.info("could not export ica for discharge") else: logger.debug("saved ica for discharge")
676,924
Plot summary graphs. Args: show: shows the figure if True. save: saves the figure if True. figure_type: optional, figure type to create.
def plot_summaries(self, show=False, save=True, figure_type=None): if not figure_type: figure_type = self.default_figure_type if not figure_type in self.default_figure_types: logger.debug("unknown figure type selected") figure_type = self.default_figure_type color_list, symbol_list = self._create_colors_markers_list() summary_df = self.summary_df selected_summaries = self.selected_summaries batch_dir = self.batch_dir batch_name = self.name fig, ax = plot_summary_figure(self.info_df, summary_df, color_list, symbol_list, selected_summaries, batch_dir, batch_name, show=show, save=save, figure_type=figure_type) self.figure[figure_type] = fig self.axes[figure_type] = ax
676,943
Updates the selected datasets. Args: all_in_memory (bool): store the cellpydata in memory (default False)
def update(self, all_in_memory=None): logging.info("[update experiment]") if all_in_memory is not None: self.all_in_memory = all_in_memory pages = self.journal.pages summary_frames = dict() cell_data_frames = dict() number_of_runs = len(pages) counter = 0 errors = [] for indx, row in pages.iterrows(): counter += 1 h_txt = "[" + counter * "|" + ( number_of_runs - counter) * "." + "]" l_txt = "starting to process file # %i (index=%s)" % (counter, indx) logging.debug(l_txt) print(h_txt) if not row.raw_file_names and not self.force_cellpy: logging.info("File(s) not found!") logging.info(indx) logging.debug("File(s) not found for index=%s" % indx) errors.append(indx) continue else: logging.info(f"Processing {indx}") cell_data = cellreader.CellpyData() if not self.force_cellpy or self.force_recalc: logging.info( "setting cycle mode (%s)..." % row.cell_type) cell_data.cycle_mode = row.cell_type logging.info("loading cell") if not self.force_cellpy: logging.debug("not forcing to load cellpy-file instead of raw file.") try: cell_data.loadcell( raw_files=row.raw_file_names, cellpy_file=row.cellpy_file_names, mass=row.masses, summary_on_raw=True, force_raw=self.force_raw_file, use_cellpy_stat_file=prms.Reader.use_cellpy_stat_file ) except Exception as e: logging.info('Failed to load: ' + str(e)) errors.append("loadcell:" + str(indx)) if not self.accept_errors: raise e continue else: logging.info("forcing") try: cell_data.load(row.cellpy_file_names, parent_level=self.parent_level) except Exception as e: logging.info( f"Critical exception encountered {type(e)} " "- skipping this file") logging.debug( 'Failed to load. Error-message: ' + str(e)) errors.append("load:" + str(indx)) if not self.accept_errors: raise e continue if not cell_data.check(): logging.info("...not loaded...") logging.debug( "Did not pass check(). Could not load cell!") errors.append("check:" + str(indx)) continue logging.info("...loaded successfully...") summary_tmp = cell_data.dataset.dfsummary logging.info("Trying to get summary_data") if cell_data.dataset.step_table is None or self.force_recalc: logging.info( "Running make_step_table" ) cell_data.make_step_table() if summary_tmp is None or self.force_recalc: logging.info( "Running make_summary" ) cell_data.make_summary(find_end_voltage=True, find_ir=True) if summary_tmp.index.name == b"Cycle_Index": logging.debug("Strange: 'Cycle_Index' is a byte-string") summary_tmp.index.name = 'Cycle_Index' if not summary_tmp.index.name == "Cycle_Index": logging.debug("Setting index to Cycle_Index") # check if it is a byte-string if b"Cycle_Index" in summary_tmp.columns: logging.debug( "Seems to be a byte-string in the column-headers") summary_tmp.rename( columns={b"Cycle_Index": 'Cycle_Index'}, inplace=True) summary_tmp.set_index("Cycle_Index", inplace=True) summary_frames[indx] = summary_tmp if self.all_in_memory: cell_data_frames[indx] = cell_data else: cell_data_frames[indx] = cellreader.CellpyData(initialize=True) cell_data_frames[indx].dataset.step_table = \ cell_data.dataset.step_table # cell_data_frames[indx].dataset.step_table_made = True if self.save_cellpy: logging.info("saving to cellpy-format") if not row.fixed: logging.info("saving cell to %s" % row.cellpy_file_names) cell_data.ensure_step_table = True cell_data.save(row.cellpy_file_names) else: logging.debug( "saving cell skipped (set to 'fixed' in info_df)") if self.export_raw or self.export_cycles: export_text = "exporting" if self.export_raw: export_text += " [raw]" if self.export_cycles: export_text += " [cycles]" logging.info(export_text) cell_data.to_csv( self.journal.raw_dir, sep=prms.Reader.sep, cycles=self.export_cycles, shifted=self.shifted_cycles, raw=self.export_raw, last_cycle=self.last_cycle ) if self.export_ica: logging.info("exporting [ica]") try: helper.export_dqdv( cell_data, savedir=self.journal.raw_dir, sep=prms.Reader.sep, last_cycle=self.last_cycle ) except Exception as e: logging.error( "Could not make/export dq/dv data" ) logging.debug( "Failed to make/export " "dq/dv data (%s): %s" % (indx, str(e)) ) errors.append("ica:" + str(indx)) self.errors["update"] = errors self.summary_frames = summary_frames self.cell_data_frames = cell_data_frames
676,945
Simply load an dataset based on serial number (srno). This convenience function reads a dataset based on a serial number. This serial number (srno) must then be defined in your database. It is mainly used to check that things are set up correctly. Args: prm_filename: name of parameter file (optional). srno (int): serial number Example: >>> srno = 918 >>> just_load_srno(srno) srno: 918 read prms ....
def just_load_srno(srno, prm_filename=None): from cellpy import dbreader, filefinder print("just_load_srno: srno: %i" % srno) # ------------reading parameters-------------------------------------------- # print "just_load_srno: read prms" # prm = prmreader.read(prm_filename) # # print prm print("just_load_srno: making class and setting prms") d = CellpyData() # ------------reading db---------------------------------------------------- print() print("just_load_srno: starting to load reader") # reader = dbreader.reader(prm_filename) reader = dbreader.Reader() print("------ok------") run_name = reader.get_cell_name(srno) print("just_load_srno: run_name:") print(run_name) m = reader.get_mass(srno) print("just_load_srno: mass: %f" % m) print() # ------------loadcell------------------------------------------------------ print("just_load_srno: getting file_names") raw_files, cellpy_file = filefinder.search_for_files(run_name) print("raw_files:", raw_files) print("cellpy_file:", cellpy_file) print("just_load_srno: running loadcell") d.loadcell(raw_files, cellpy_file, mass=m) print("------ok------") # ------------do stuff------------------------------------------------------ print("just_load_srno: getting step_numbers for charge") v = d.get_step_numbers("charge") print(v) print() print("just_load_srno: finding C-rates") d.find_C_rates(v, silent=False) print() print("just_load_srno: OK") return True
676,991
Load a raw data file and save it as cellpy-file. Args: mass (float): active material mass [mg]. outdir (path): optional, path to directory for saving the hdf5-file. outfile (str): optional, name of hdf5-file. filename (str): name of the resfile. Returns: out_file_name (str): name of saved file.
def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.00): d = CellpyData() if not outdir: outdir = prms.Paths["cellpydatadir"] if not outfile: outfile = os.path.basename(filename).split(".")[0] + ".h5" outfile = os.path.join(outdir, outfile) print("filename:", filename) print("outfile:", outfile) print("outdir:", outdir) print("mass:", mass, "mg") d.from_raw(filename) d.set_mass(mass) d.make_step_table() d.make_summary() d.save(filename=outfile) d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True) return outfile
676,992
Load a raw data file and print information. Args: filename (str): name of the resfile. info_dict (dict): Returns: info (str): string describing something.
def load_and_print_resfile(filename, info_dict=None): # self.test_no = None # self.mass = 1.0 # mass of (active) material (in mg) # self.no_cycles = 0.0 # self.charge_steps = None # not in use at the moment # self.discharge_steps = None # not in use at the moment # self.ir_steps = None # dict # not in use at the moment # self.ocv_steps = None # dict # not in use at the moment # self.nom_cap = 3579 # mAh/g (used for finding c-rates) # self.mass_given = False # self.c_mode = True # self.starts_with = "discharge" # self.material = "noname" # self.merged = False # self.file_errors = None # not in use at the moment # self.loaded_from = None # name of the .res file it is loaded from # (can be list if merged) # self.raw_data_files = [] # self.raw_data_files_length = [] # # self.parent_filename = None # name of the .res file it is loaded from # (basename) (can be list if merded) # # self.parent_filename = if listtype, for file in etc,,, # os.path.basename(self.loaded_from) # self.channel_index = None # self.channel_number = None # self.creator = None # self.item_ID = None # self.schedule_file_name = None # self.start_datetime = None # self.test_ID = None # self.name = None # NEXT: include nom_cap, tot_mass and parameters table in save/load hdf5 if info_dict is None: info_dict = dict() info_dict["mass"] = 1.23 # mg info_dict["nom_cap"] = 3600 # mAh/g (active material) info_dict["tot_mass"] = 2.33 # mAh/g (total mass of material) d = CellpyData() print("filename:", filename) print("info_dict in:", end=' ') print(info_dict) d.from_raw(filename) d.set_mass(info_dict["mass"]) d.make_step_table() d.make_summary() for test in d.datasets: print("newtest") print(test) return info_dict
676,993
CellpyData object Args: filenames: list of files to load. selected_scans: profile: experimental feature. filestatuschecker: property to compare cellpy and raw-files; default read from prms-file. fetch_one_liners: experimental feature. tester: instrument used (e.g. "arbin") (checks prms-file as default). initialize: create a dummy (empty) dataset; defaults to False.
def __init__(self, filenames=None, selected_scans=None, profile=False, filestatuschecker=None, # "modified" fetch_one_liners=False, tester=None, initialize=False, ): if tester is None: self.tester = prms.Instruments.tester else: self.tester = tester self.loader = None # this will be set in the function set_instrument self.logger = logging.getLogger(__name__) self.logger.debug("created CellpyData instance") self.name = None self.profile = profile self.minimum_selection = {} if filestatuschecker is None: self.filestatuschecker = prms.Reader.filestatuschecker else: self.filestatuschecker = filestatuschecker self.forced_errors = 0 self.summary_exists = False if not filenames: self.file_names = [] else: self.file_names = filenames if not self._is_listtype(self.file_names): self.file_names = [self.file_names] if not selected_scans: self.selected_scans = [] else: self.selected_scans = selected_scans if not self._is_listtype(self.selected_scans): self.selected_scans = [self.selected_scans] self.datasets = [] self.status_datasets = [] self.selected_dataset_number = 0 self.number_of_datasets = 0 self.capacity_modifiers = ['reset', ] self.list_of_step_types = ['charge', 'discharge', 'cv_charge', 'cv_discharge', 'charge_cv', 'discharge_cv', 'ocvrlx_up', 'ocvrlx_down', 'ir', 'rest', 'not_known'] # - options self.force_step_table_creation = \ prms.Reader.force_step_table_creation self.force_all = prms.Reader.force_all self.sep = prms.Reader.sep self._cycle_mode = prms.Reader.cycle_mode # self.max_res_filesize = prms.Reader.max_res_filesize self.load_only_summary = prms.Reader.load_only_summary self.select_minimal = prms.Reader.select_minimal # self.chunk_size = prms.Reader.chunk_size # 100000 # self.max_chunks = prms.Reader.max_chunks # self.last_chunk = prms.Reader.last_chunk self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles # self.load_until_error = prms.Reader.load_until_error self.ensure_step_table = prms.Reader.ensure_step_table self.daniel_number = prms.Reader.daniel_number # self.raw_datadir = prms.Reader.raw_datadir self.raw_datadir = prms.Paths.rawdatadir # self.cellpy_datadir = prms.Reader.cellpy_datadir self.cellpy_datadir = prms.Paths.cellpydatadir # search in prm-file for res and hdf5 dirs in loadcell: self.auto_dirs = prms.Reader.auto_dirs # - headers and instruments self.headers_normal = get_headers_normal() self.headers_summary = get_headers_summary() self.headers_step_table = get_headers_step_table() self.table_names = None # dictionary defined in set_instruments self.set_instrument() # - units used by cellpy self.cellpy_units = get_cellpy_units() if initialize: self.initialize()
676,996
Set the instrument (i.e. tell cellpy the file-type you use). Args: instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...] Sets the instrument used for obtaining the data (i.e. sets fileformat)
def set_instrument(self, instrument=None): if instrument is None: instrument = self.tester if instrument in ["arbin", "arbin_res"]: self._set_arbin() self.tester = "arbin" elif instrument == "arbin_sql": self._set_arbin_sql() self.tester = "arbin" elif instrument == "arbin_experimental": self._set_arbin_experimental() self.tester = "arbin" elif instrument in ["pec", "pec_csv"]: self._set_pec() self.tester = "pec" elif instrument in ["biologics", "biologics_mpr"]: self._set_biologic() self.tester = "biologic" elif instrument == "custom": self._set_custom() self.tester = "custom" else: raise Exception(f"option does not exist: '{instrument}'")
676,997
Set the directory containing .res-files. Used for setting directory for looking for res-files.@ A valid directory name is required. Args: directory (str): path to res-directory Example: >>> d = CellpyData() >>> directory = "MyData/Arbindata" >>> d.set_raw_datadir(directory)
def set_raw_datadir(self, directory=None): if directory is None: self.logger.info("no directory name given") return if not os.path.isdir(directory): self.logger.info(directory) self.logger.info("directory does not exist") return self.raw_datadir = directory
677,004
Set the directory containing .hdf5-files. Used for setting directory for looking for hdf5-files. A valid directory name is required. Args: directory (str): path to hdf5-directory Example: >>> d = CellpyData() >>> directory = "MyData/HDF5" >>> d.set_raw_datadir(directory)
def set_cellpy_datadir(self, directory=None): if directory is None: self.logger.info("no directory name given") return if not os.path.isdir(directory): self.logger.info("directory does not exist") return self.cellpy_datadir = directory
677,005
Load a raw data-file. Args: file_names (list of raw-file names): uses CellpyData.file_names if None. If the list contains more than one file name, then the runs will be merged together.
def from_raw(self, file_names=None, **kwargs): # This function only loads one test at a time (but could contain several # files). The function from_res() also implements loading several # datasets (using list of lists as input). if file_names: self.file_names = file_names if not isinstance(file_names, (list, tuple)): self.file_names = [file_names, ] # file_type = self.tester raw_file_loader = self.loader set_number = 0 test = None counter = 0 self.logger.debug("start iterating through file(s)") for f in self.file_names: self.logger.debug("loading raw file:") self.logger.debug(f"{f}") new_tests = raw_file_loader(f, **kwargs) if new_tests: if test is not None: self.logger.debug("continuing reading files...") _test = self._append(test[set_number], new_tests[set_number]) if not _test: self.logger.warning(f"EMPTY TEST: {f}") continue test[set_number] = _test self.logger.debug("added this test - started merging") for j in range(len(new_tests[set_number].raw_data_files)): raw_data_file = new_tests[set_number].raw_data_files[j] file_size = new_tests[set_number].raw_data_files_length[j] test[set_number].raw_data_files.append(raw_data_file) test[set_number].raw_data_files_length.append(file_size) counter += 1 if counter > 10: self.logger.debug("ERROR? Too many files to merge") raise ValueError("Too many files to merge - " "could be a p2-p3 zip thing") else: self.logger.debug("getting data from first file") if new_tests[set_number].no_data: self.logger.debug("NO DATA") else: test = new_tests else: self.logger.debug("NOTHING LOADED") self.logger.debug("finished loading the raw-files") test_exists = False if test: if test[0].no_data: self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty") else: test_exists = True if test_exists: if not prms.Reader.sorted_data: self.logger.debug("sorting data") test[set_number] = self._sort_data(test[set_number]) self.datasets.append(test[set_number]) else: self.logger.warning("No new datasets added!") self.number_of_datasets = len(self.datasets) self.status_datasets = self._validate_datasets() self._invent_a_name() return self
677,011
Loads a cellpy file. Args: cellpy_file (path, str): Full path to the cellpy file. parent_level (str, optional): Parent level
def load(self, cellpy_file, parent_level="CellpyData"): try: self.logger.debug("loading cellpy-file (hdf5):") self.logger.debug(cellpy_file) new_datasets = self._load_hdf5(cellpy_file, parent_level) self.logger.debug("cellpy-file loaded") except AttributeError: new_datasets = [] self.logger.warning("This cellpy-file version is not supported by" "current reader (try to update cellpy).") if new_datasets: for dataset in new_datasets: self.datasets.append(dataset) else: # raise LoadError self.logger.warning("Could not load") self.logger.warning(str(cellpy_file)) self.number_of_datasets = len(self.datasets) self.status_datasets = self._validate_datasets() self._invent_a_name(cellpy_file) return self
677,015
Load a cellpy-file. Args: filename (str): Name of the cellpy file. parent_level (str) (optional): name of the parent level (defaults to "CellpyData") Returns: loaded datasets (DataSet-object)
def _load_hdf5(self, filename, parent_level="CellpyData"): if not os.path.isfile(filename): self.logger.info(f"file does not exist: {filename}") raise IOError store = pd.HDFStore(filename) # required_keys = ['dfdata', 'dfsummary', 'fidtable', 'info'] required_keys = ['dfdata', 'dfsummary', 'info'] required_keys = ["/" + parent_level + "/" + _ for _ in required_keys] for key in required_keys: if key not in store.keys(): self.logger.info(f"This hdf-file is not good enough - " f"at least one key is missing: {key}") raise Exception(f"OH MY GOD! At least one crucial key" f"is missing {key}!") self.logger.debug(f"Keys in current hdf5-file: {store.keys()}") data = DataSet() if parent_level != "CellpyData": self.logger.debug("Using non-default parent label for the " "hdf-store: {}".format(parent_level)) # checking file version infotable = store.select(parent_level + "/info") try: data.cellpy_file_version = \ self._extract_from_dict(infotable, "cellpy_file_version") except Exception as e: data.cellpy_file_version = 0 warnings.warn(f"Unhandled exception raised: {e}") if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION: raise WrongFileVersion if data.cellpy_file_version > CELLPY_FILE_VERSION: raise WrongFileVersion data.dfsummary = store.select(parent_level + "/dfsummary") data.dfdata = store.select(parent_level + "/dfdata") try: data.step_table = store.select(parent_level + "/step_table") except Exception as e: self.logging.debug("could not get step_table from cellpy-file") data.step_table = pd.DataFrame() warnings.warn(f"Unhandled exception raised: {e}") try: fidtable = store.select( parent_level + "/fidtable") # remark! changed spelling from # lower letter to camel-case! fidtable_selected = True except Exception as e: self.logging.debug("could not get fid-table from cellpy-file") fidtable = [] warnings.warn("no fidtable - you should update your hdf5-file") fidtable_selected = False self.logger.debug(" h5") # this does not yet allow multiple sets newtests = [] # but this is ready when that time comes # The infotable stores "meta-data". The follwing statements loads the # content of infotable and updates div. DataSet attributes. # Maybe better use it as dict? data = self._load_infotable(data, infotable, filename) if fidtable_selected: data.raw_data_files, data.raw_data_files_length = \ self._convert2fid_list(fidtable) else: data.raw_data_files = None data.raw_data_files_length = None newtests.append(data) store.close() # self.datasets.append(data) return newtests
677,016
Returns voltage for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][voltage_header] Args: cycle: cycle number step: step number set_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty
def sget_voltage(self, cycle, step, set_number=None): time_00 = time.time() set_number = self._validate_dataset_number(set_number) if set_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt voltage_header = self.headers_normal.voltage_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[set_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f"The varialbe step is a list." f"Should be an integer." f"{step}") step = step[0] c = test[(test[cycle_index_header] == cycle) & (test[step_index_header] == step)] self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)") if not self.is_empty(c): v = c[voltage_header] return v else: return None
677,040
Returns voltage (in V). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
def get_voltage(self, cycle=None, dataset_number=None, full=True): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt voltage_header = self.headers_normal.voltage_txt # step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if cycle: self.logger.debug("getting voltage curve for cycle") c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[voltage_header] return v else: if not full: self.logger.debug( "getting list of voltage-curves for all cycles" ) v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[voltage_header]) else: self.logger.debug("getting frame of all voltage-curves") v = test[voltage_header] return v
677,041
Returns current (in mA). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
def get_current(self, cycle=None, dataset_number=None, full=True): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt current_header = self.headers_normal.current_txt # step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if cycle: self.logger.debug(f"getting current for cycle {cycle}") c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[current_header] return v else: if not full: self.logger.debug( "getting a list of current-curves for all cycles" ) v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[current_header]) else: self.logger.debug("getting all current-curves ") v = test[current_header] return v
677,042
Returns step time for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][step_time_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty
def sget_steptime(self, cycle, step, dataset_number=None): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt step_time_header = self.headers_normal.step_time_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f"The varialbe step is a list." f"Should be an integer." f"{step}") step = step[0] c = test.loc[ (test[cycle_index_header] == cycle) & (test[step_index_header] == step), : ] if not self.is_empty(c): t = c[step_time_header] return t else: return None
677,043
Returns timestamp for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][timestamp_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series
def sget_timestamp(self, cycle, step, dataset_number=None): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f"The varialbe step is a list." f"Should be an integer." f"{step}") step = step[0] c = test[(test[cycle_index_header] == cycle) & (test[step_index_header] == step)] if not self.is_empty(c): t = c[timestamp_header] return t else: return pd.Series()
677,044
Returns timestamps (in sec or minutes (if in_minutes==True)). Args: cycle: cycle number (all if None) dataset_number: first dataset if None in_minutes: return values in minutes instead of seconds if True full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
def get_timestamp(self, cycle=None, dataset_number=None, in_minutes=False, full=True): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt v = pd.Series() test = self.datasets[dataset_number].dfdata if cycle: c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[timestamp_header] else: if not full: self.logger.debug("getting timestapm for all cycles") v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[timestamp_header]) else: self.logger.debug("returning full timestamp col") v = test[timestamp_header] if in_minutes and v is not None: v /= 60.0 if in_minutes and v is not None: v /= 60.0 return v
677,045
Returns full cycle dqdv data for all cycles as one pd.DataFrame. Args: cell: CellpyData-object Returns: pandas.DataFrame with the following columns: cycle: cycle number voltage: voltage dq: the incremental capacity
def _dqdv_combinded_frame(cell, **kwargs): cycles = cell.get_cap( method="forth-and-forth", categorical_column=True, label_cycle_number=True, ) ica_df = dqdv_cycles(cycles, **kwargs) assert isinstance(ica_df, pd.DataFrame) return ica_df
677,186
Performing fit of the OCV steps in the cycles set by set_cycles() from the data set by set_data() r is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start). c is found from using tau / r --> err(c) = err(r) + err(tau) Args: cellpydata (CellpyData): data object from cellreader cycle (int): cycle number to get from CellpyData object Returns: None
def set_cellpydata(self, cellpydata, cycle): self.data = cellpydata self.step_table = self.data.dataset # hope it works... time_voltage = self.data.get_ocv(direction='up', cycles=cycle) time = time_voltage.Step_Time voltage = time_voltage.Voltage self.time = np.array(time) self.voltage = np.array(voltage)
677,227
Converts a xls date stamp to a more sensible format. Args: xldate (str): date stamp in Excel format. datemode (int): 0 for 1900-based, 1 for 1904-based. option (str): option in ("to_datetime", "to_float", "to_string"), return value Returns: datetime (datetime object, float, or string).
def xldate_as_datetime(xldate, datemode=0, option="to_datetime"): # This does not work for numpy-arrays if option == "to_float": d = (xldate - 25589) * 86400.0 else: try: d = datetime.datetime(1899, 12, 30) + \ datetime.timedelta(days=xldate + 1462 * datemode) # date_format = "%Y-%m-%d %H:%M:%S:%f" # with microseconds, # excel cannot cope with this! if option == "to_string": date_format = "%Y-%m-%d %H:%M:%S" # without microseconds d = d.strftime(date_format) except TypeError: logging.info(f'The date is not of correct type [{xldate}]') d = xldate return d
677,276
Finds the file-stats and populates the class with stat values. Args: filename (str): name of the file.
def populate(self, filename): if os.path.isfile(filename): fid_st = os.stat(filename) self.name = os.path.abspath(filename) self.full_name = filename self.size = fid_st.st_size self.last_modified = fid_st.st_mtime self.last_accessed = fid_st.st_atime self.last_info_changed = fid_st.st_ctime self.location = os.path.dirname(filename)
677,279
Select row for identification number serial_number Args: serial_number: serial number Returns: pandas.DataFrame
def select_serial_number_row(self, serial_number): sheet = self.table col = self.db_sheet_cols.id rows = sheet.loc[:, col] == serial_number return sheet.loc[rows, :]
677,295
Select rows for identification for a list of serial_number. Args: serial_numbers: list (or ndarray) of serial numbers Returns: pandas.DataFrame
def select_all(self, serial_numbers): sheet = self.table col = self.db_sheet_cols.id rows = sheet.loc[:, col].isin(serial_numbers) return sheet.loc[rows, :]
677,296
Print information about the run. Args: serial_number: serial number. print_to_screen: runs the print statement if True, returns txt if not. Returns: txt if print_to_screen is False, else None.
def print_serial_number_info(self, serial_number, print_to_screen=True): r = self.select_serial_number_row(serial_number) if r.empty: warnings.warn("missing serial number") return txt1 = 80 * "=" txt1 += "\n" txt1 += f" serial number {serial_number}\n" txt1 = 80 * "-" txt1 += "\n" txt2 = "" for label, value in zip(r.columns, r.values[0]): if label in self.headers: txt1 += f"{label}: \t {value}\n" else: txt2 += f"({label}: \t {value})\n" if print_to_screen: print(txt1) print(80 * "-") print(txt2) print(80 * "=") return else: return txt1
677,297
Filters sheet/table by slurry name. Input is slurry name or list of slurry names, for example 'es030' or ["es012","es033","es031"]. Args: slurry (str or list of strings): slurry names. appender (chr): char that surrounds slurry names. Returns: List of serial_number (ints).
def filter_by_slurry(self, slurry, appender="_"): sheet = self.table identity = self.db_sheet_cols.id exists = self.db_sheet_cols.exists cellname = self.db_sheet_cols.cell_name search_string = "" if not isinstance(slurry, (list, tuple)): slurry = [slurry, ] first = True for slur in slurry: s_s = appender + slur + appender if first: search_string = s_s first = False else: search_string += "|" search_string += s_s criterion = sheet.loc[:, cellname].str.contains( search_string ) exists = sheet.loc[:, exists] > 0 sheet = sheet[criterion & exists] return sheet.loc[:, identity].values.astype(int)
677,315
filters sheet/table by columns (input is column header) The routine returns the serial numbers with values>1 in the selected columns. Args: column_names (list): the column headers. Returns: pandas.DataFrame
def filter_by_col(self, column_names): if not isinstance(column_names, (list, tuple)): column_names = [column_names, ] sheet = self.table identity = self.db_sheet_cols.id exists = self.db_sheet_cols.exists criterion = True for column_name in column_names: _criterion = sheet.loc[:, column_name] > 0 _exists = sheet.loc[:, exists] > 0 criterion = criterion & _criterion & _exists return sheet.loc[criterion, identity].values.astype(int)
677,316
filters sheet/table by column. The routine returns the serial-numbers with min_val <= values >= max_val in the selected column. Args: column_name (str): column name. min_val (int): minimum value of serial number. max_val (int): maximum value of serial number. Returns: pandas.DataFrame
def filter_by_col_value(self, column_name, min_val=None, max_val=None): sheet = self.table identity = self.db_sheet_cols.id exists_col_number = self.db_sheet_cols.exists exists = sheet.loc[:, exists_col_number] > 0 if min_val is not None and max_val is not None: criterion1 = sheet.loc[:, column_name] >= min_val criterion2 = sheet.loc[:, column_name] <= max_val sheet = sheet[criterion1 & criterion2 & exists] elif min_val is not None or max_val is not None: if min_val is not None: criterion = sheet.loc[:, column_name] >= min_val if max_val is not None: criterion = sheet.loc[:, column_name] <= max_val # noinspection PyUnboundLocalVariable sheet = sheet[criterion & exists] else: sheet = sheet[exists] return sheet.loc[:, identity].values.astype(int)
677,317
Creates an embed UI containing a hex color message Args: channel (discord.Channel): The Discord channel to bind the embed to image (str): The url of the image to add hex_str (str): The hex value Returns: ui (ui_embed.UI): The embed UI object that was created
def success(channel, image, hex_str): hex_number = int(hex_str, 16) # Create embed UI object gui = ui_embed.UI( channel, "", "#{}".format(hex_str), modulename=modulename, colour=hex_number, thumbnail=image, ) return gui
677,325
The on_message event handler for this module Args: message (discord.Message): Input message
async def on_message(message): # Simplify message info server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return # Only reply to server messages and don't reply to myself if server is not None and author != channel.server.me: # Commands section prefix = data["discord"]["servers"][server.id]["prefix"] if content.startswith(prefix): # Parse message package = content.split(" ") command = package[0][len(prefix):] args = package[1:] alias_steam = ["steam", "pc"] alias_ps = ["ps", "psn", "playstation", "ps4", "playstation 4"] alias_xbox = ["xbox", "xb", "xb1", "xbone", "xbox one", "xbox one"] platform = "steam" if len(args) > 0: player_name = args[0] else: return if len(args) > 1: platform = ' '.join(args[1:]).lower() if platform in alias_steam: platform = "steam" elif platform in alias_ps: platform = "ps" elif platform in alias_xbox: platform = "xbox" # Commands if command == 'rlstats': await client.send_typing(channel) # Get Rocket League stats from stats API success, rldata = api_rocketleaguestats.check_rank(player_name, platform) # Create embed UI if success: embed = ui_embed.success(channel, rldata[0], rldata[1], rldata[2], rldata[3]) else: embed = ui_embed.fail_api(channel) await embed.send()
677,331
Create a new UI for the module Args: parent: A tk or ttk object
def __init__(self, parent): super(ModuleUIFrame, self).__init__(parent) self.columnconfigure(0, weight=1) self.rowconfigure(1, weight=1) # Set default values from ....datatools import get_data data = get_data() # API Frame api_frame = ttk.LabelFrame(self, padding=8, text="Google API") api_frame.grid(row=0, column=0, sticky="W E N S") api_frame.columnconfigure(0, weight=1) # Add key fields self.google_api_key = tk.StringVar() ttk.Label(api_frame, text="Google API Key").grid(column=0, row=0, sticky="W E N S") ttk.Entry(api_frame, textvariable=self.google_api_key).grid( column=0, row=1, padx=0, pady=4, sticky="W E N S") self.soundcloud_client_id = tk.StringVar() ttk.Label(api_frame, text="SoundCloud Client ID").grid(column=0, row=2, sticky="W E N S") ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid( column=0, row=3, padx=0, pady=4, sticky="W E N S") ttk.Button(api_frame, command=lambda: self.update_keys(), text="Update API Data").grid( column=0, row=4, padx=0, pady=4, sticky="W E N S") if "google_api_key" in data["discord"]["keys"]: self.google_api_key.set(data["discord"]["keys"]["google_api_key"]) if "soundcloud_client_id" in data["discord"]["keys"]: self.soundcloud_client_id.set(data["discord"]["keys"]["soundcloud_client_id"])
677,332
Creates an embed UI containing the module modified message Args: channel (discord.Channel): The Discord channel to bind the embed to module_name (str): The name of the module that was updated module_state (bool): The current state of the module Returns: embed: The created embed
def modify_module(channel, module_name, module_state): # Create embed UI object gui = ui_embed.UI( channel, "{} updated".format(module_name), "{} is now {}".format(module_name, "activated" if module_state else "deactivated"), modulename=modulename ) return gui
677,334
Creates an embed UI containing the prefix modified message Args: channel (discord.Channel): The Discord channel to bind the embed to new_prefix (str): The value of the new prefix Returns: embed: The created embed
def modify_prefix(channel, new_prefix): # Create embed UI object gui = ui_embed.UI( channel, "Prefix updated", "Modis prefix is now `{}`".format(new_prefix), modulename=modulename ) return gui
677,335
Creates an embed UI containing an user warning message Args: channel (discord.Channel): The Discord channel to bind the embed to user (discord.User): The user to warn warnings (str): The warnings for the user max_warnings (str): The maximum warnings for the user Returns: ui (ui_embed.UI): The embed UI object
def user_warning(channel, user, warnings, max_warnings): username = user.name if isinstance(user, discord.Member): if user.nick is not None: username = user.nick warning_count_text = "warnings" if warnings != 1 else "warning" warning_text = "{} {}".format(warnings, warning_count_text) result_text = "at {} you will be banned".format(max_warnings) if warnings >= max_warnings: result_text = "you are being banned because you have more than the maximum warnings" # Create embed UI object gui = ui_embed.UI( channel, "Warning {}".format(username), "You now have {} {}, {}".format(warning_text, username, result_text), modulename=modulename ) return gui
677,336
Creates an embed UI containing an user warning message Args: channel (discord.Channel): The Discord channel to bind the embed to user (discord.User): The user to ban Returns: ui (ui_embed.UI): The embed UI object
def user_ban(channel, user): username = user.name if isinstance(user, discord.Member): if user.nick is not None: username = user.nick # Create embed UI object gui = ui_embed.UI( channel, "Banned {}".format(username), "{} has been banned from this server".format(username), modulename=modulename ) return gui
677,337
Creates an embed UI containing an error message Args: channel (discord.Channel): The Discord channel to bind the embed to max_warnings (int): The new maximum warnings Returns: ui (ui_embed.UI): The embed UI object
def warning_max_changed(channel, max_warnings): # Create embed UI object gui = ui_embed.UI( channel, "Maximum Warnings Changed", "Users must now have {} warnings to be banned " "(this won't ban existing users with warnings)".format(max_warnings), modulename=modulename ) return gui
677,338
Creates an embed UI containing an error message Args: channel (discord.Channel): The Discord channel to bind the embed to title (str): The title of the embed description (str): The description for the error Returns: ui (ui_embed.UI): The embed UI object
def error(channel, title, description): # Create embed UI object gui = ui_embed.UI( channel, title, description, modulename=modulename ) return gui
677,339
Updates the server info for the given server Args: server: The Discord server to update info for
async def update_server_data(server): data = datatools.get_data() # Add the server to server data if it doesn't yet exist send_welcome_message = False if server.id not in data["discord"]["servers"]: logger.debug("Adding new server to serverdata") data["discord"]["servers"][server.id] = {"prefix": "!"} if "mute_intro" not in data or not data["mute_intro"]: send_welcome_message = True # Make sure all modules are in the server _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) _dir_modules = "{}/../".format(_dir) for module_name in os.listdir(_dir_modules): if module_name.startswith("_") or module_name.startswith("!"): continue if not os.path.isfile("{}/{}/_data.py".format(_dir_modules, module_name)): logger.warning("No _data.py file found for module {}".format(module_name)) continue try: import_name = ".discord_modis.modules.{}.{}".format(module_name, "_data") _data = importlib.import_module(import_name, "modis") if _data.modulename not in data["discord"]["servers"][server.id]: data["discord"]["servers"][server.id][_data.modulename] = _data.sd_structure datatools.write_data(data) except Exception as e: logger.error("Could not initialise module {}".format(module_name)) logger.exception(e) datatools.write_data(data) # Send a welcome message now if send_welcome_message: default_channel = server.default_channel if not default_channel: for channel in server.channels: if channel.name == "general": default_channel = channel break if not default_channel: for channel in server.channels: if "general" in channel.name: default_channel = channel break if not default_channel: for channel in server.channels: if channel.type == discord.ChannelType.text: default_channel = channel break # Display a welcome message if default_channel: hello_message = "Hello! I'm Modis.\n\n" + \ "The prefix is currently `!`, and can be changed at any time using `!prefix`\n\n" + \ "You can use `!help` to get help commands for all modules, " + \ "or {} me to get the server prefix and help commands.".format(server.me.mention) await client.send_message(default_channel, hello_message)
677,340
Remove a server from the server data Args: server_id (int): The server to remove from the server data
def remove_server_data(server_id): logger.debug("Removing server from serverdata") # Remove the server from data data = datatools.get_data() if server_id in data["discord"]["servers"]: data["discord"]["servers"].pop(server_id) datatools.write_data(data)
677,341
Create a new main window frame. Args: parent: A tk or ttk object
def __init__(self, parent, discord_token, discord_client_id): super(Frame, self).__init__(parent) logger.debug("Initialising frame") # Status bar statusbar = StatusBar(self) statusbar.grid(column=0, row=1, sticky="W E S") # Create the main control panel nav = ttk.Notebook(self) module_frame = ModuleFrame(nav) nav.add(GlobalFrame(nav, discord_token, discord_client_id, module_frame, statusbar), text="Global") nav.add(module_frame, text="Modules") nav.grid(column=0, row=0, sticky="W E N S") def on_closing(): try: from ._client import client if client.loop: asyncio.run_coroutine_threadsafe(client.logout(), client.loop) except RuntimeError: pass except Exception as e: logger.exception(e) parent.destroy() import sys sys.exit(0) parent.protocol("WM_DELETE_WINDOW", on_closing) # Configure stretch ratios self.columnconfigure(0, weight=1) self.rowconfigure(0, weight=1) # Welcome! logger.info("Welcome to Modis v{} ({})".format(datatools.version, datatools.version_nickname)) # Update with version data state, response = datatools.get_compare_version() logger.info("{}\n".format(response))
677,343
Create a new module frame and add it to the given parent. Args: parent: A tk or ttk object
def __init__(self, parent): super(ModuleFrame, self).__init__(parent) logger.debug("Initialising module tabs") # Setup styles style = ttk.Style() style.configure("Module.TFrame", background="white") self.module_buttons = {} self.current_button = None # Module view self.module_list = ttk.Frame(self, width=150, style="Module.TFrame") self.module_list.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S") self.module_list.columnconfigure(0, weight=1) self.module_list.rowconfigure(0, weight=0) self.module_list.rowconfigure(1, weight=1) # Header header = tk.Label(self.module_list, text="Modules", bg="white", fg="#484848") header.grid(column=0, row=0, padx=0, pady=0, sticky="W E N") # Module selection list self.module_selection = ttk.Frame(self.module_list, style="Module.TFrame") self.module_selection.grid(column=0, row=1, padx=0, pady=0, sticky="W E N S") self.module_selection.columnconfigure(0, weight=1) # Module UI view self.module_ui = ttk.Frame(self) self.module_ui.grid(column=1, row=0, padx=0, pady=0, sticky="W E N S") self.module_ui.columnconfigure(0, weight=1) self.module_ui.rowconfigure(0, weight=1) self.clear_modules() # Configure stretch ratios self.columnconfigure(0, minsize=150) self.columnconfigure(1, weight=1) self.rowconfigure(0, weight=1)
677,345
Adds a module to the list Args: module_name (str): The name of the module module_ui: The function to call to create the module's UI
def add_module(self, module_name, module_ui): m_button = tk.Label(self.module_selection, text=module_name, bg="white", anchor="w") m_button.grid(column=0, row=len(self.module_selection.winfo_children()), padx=0, pady=0, sticky="W E N S") self.module_buttons[module_name] = m_button m_button.bind("<Button-1>", lambda e: self.module_selected(module_name, module_ui))
677,347
Called when a module is selected Args: module_name (str): The name of the module module_ui: The function to call to create the module's UI
def module_selected(self, module_name, module_ui): if self.current_button == self.module_buttons[module_name]: return self.module_buttons[module_name].config(bg="#cacaca") if self.current_button is not None: self.current_button.config(bg="white") self.current_button = self.module_buttons[module_name] self.clear_ui() try: # Create the UI module_ui_frame = ModuleUIBaseFrame(self.module_ui, module_name, module_ui) module_ui_frame.grid(column=0, row=0, sticky="W E N S") except Exception as e: logger.error("Could not load UI for {}".format(module_name)) logger.exception(e) # Create a error UI tk.Label(self.module_ui, text="Could not load UI for {}".format(module_name)).grid( column=0, row=0, padx=0, pady=0, sticky="W E N S")
677,348
Create a new base for a module UI Args: parent: A tk or ttk object module_name (str): The name of the module module_ui: The _ui.py file to add for the module
def __init__(self, parent, module_name, module_ui): super(ModuleUIBaseFrame, self).__init__(parent, padding=8) self.columnconfigure(0, weight=1) self.rowconfigure(1, weight=1) if module_ui is not None: # Module UI frame module_ui.ModuleUIFrame(self).grid(row=0, column=0, sticky="W E N S") else: logger.debug("No _ui.py found for '{}'".format(module_name)) # Help frame help_frame = ttk.LabelFrame(self, padding=8, text="Help") help_frame.grid(row=1, column=0, sticky="W E N S") help_frame.columnconfigure(0, weight=1) help_frame.rowconfigure(0, weight=1) # Find the help path _dir = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) help_path = "{}/modules/{}/{}".format(_dir, module_name, "_help.json") if os.path.isfile(help_path): # Load the text helptools.add_help_text(help_frame, help_path) else: # Default message tk.Label(help_frame, text="No _help.json file found for '{}'".format(module_name)).grid(row=0, column=0, sticky="W E N S")
677,349
Create a new control panel and add it to the parent. Args: parent: A tk or ttk object
def __init__(self, parent, discord_token, discord_client_id, module_frame, status_bar): logger.debug("Initialising main control panel") super(BotControl, self).__init__( parent, padding=8, text="Modis control panel") self.discord_thread = None # Key name self.key_name = tk.StringVar() ttk.Label(self, text="API Key Name:").grid(column=0, row=0, padx=4, pady=4, sticky="W E S") self.text_key_name = ttk.Entry(self, textvariable=self.key_name) self.text_key_name.grid(column=0, row=1, padx=4, pady=4, sticky="W E N S") # Key value self.key_val = tk.StringVar() ttk.Label(self, text="API Key Value:").grid(column=1, row=0, padx=4, pady=4, sticky="W E S") self.text_key_value = ttk.Entry(self, textvariable=self.key_val) self.text_key_value.grid(column=1, row=1, padx=4, pady=4, sticky="W E N S") # Callbacks for text edit self.key_name.trace("w", lambda name, index, mode, sv=self.key_name: self.key_changed()) self.key_val.trace("w", lambda name, index, mode, sv=self.key_val: self.key_changed()) # Add key button self.button_key_add = ttk.Button( self, command=lambda: self.key_add(), text="Add API Key") self.button_key_add.grid(column=2, row=1, padx=4, pady=4, sticky="W E N S") self.button_key_add.state(["disabled"]) # Module frame self.module_frame = module_frame # Status bar self.status_bar = status_bar # Toggle button self.state = "off" self.button_toggle_text = tk.StringVar(value="Start Modis") self.button_toggle = ttk.Button( self, command=lambda: self.toggle(discord_token, discord_client_id), textvariable=self.button_toggle_text) self.button_toggle.grid(column=3, row=1, padx=4, pady=4, sticky="W E N S") # Configure stretch ratios self.columnconfigure(0, weight=1) self.columnconfigure(1, weight=2)
677,350
Create a new text box for the console log. Args: parent: A tk or ttk object
def __init__(self, parent): logger.debug("Initialising log panel") super(Log, self).__init__(parent, padding=8, text="Python console log") # Log text box log = tk.Text(self, wrap="none") log.grid(column=0, row=0, sticky="W E N S") # Config tags log.tag_config('critical', foreground="red", underline=True) log.tag_config('error', foreground="red") log.tag_config('warning', foreground="orange") log.tag_config('info') log.tag_config('debug', foreground="#444444") # Vertical Scrollbar scrollbar = ttk.Scrollbar(self, orient="vertical", command=log.yview) scrollbar.grid(column=1, row=0, sticky="N S") log['yscrollcommand'] = scrollbar.set # Horizontal Scrollbar scrollbar = ttk.Scrollbar(self, orient="horizontal", command=log.xview) scrollbar.grid(column=0, row=1, sticky="W E") log['xscrollcommand'] = scrollbar.set # Rediect Python console output to log text box class LogHandler(logging.Handler): def __init__(self, text_widget): logging.Handler.__init__(self) self.text_widget = text_widget self.text_widget.config(state=tk.DISABLED) def flush(self): try: self.text_widget.see("end") except: pass def emit(self, record): msg = self.format(record) msg = msg[:9] + msg[29:] tags = () if msg.startswith("CRITICAL"): tags = 'critical' if msg.startswith("ERROR"): tags = 'error' if msg.startswith("WARNING"): tags = 'warning' if msg.startswith("INFO"): tags = 'info' if msg.startswith("DEBUG"): tags = 'debug' self.text_widget.config(state=tk.NORMAL) self.text_widget.insert("end", msg + "\n", tags) self.text_widget.config(state=tk.DISABLED) self.flush() discord_logger = logging.getLogger("modis.discord_modis") formatter = logging.Formatter( "{levelname:8} {name} - {message}", style="{") discord_handler = LogHandler(log) discord_handler.setFormatter(formatter) discord_logger.addHandler(discord_handler) # Configure stretch ratios self.columnconfigure(0, weight=1) self.rowconfigure(0, weight=1)
677,356
Create a new status bar. Args: parent: A tk or ttk object
def __init__(self, parent): logger.debug("Initialising status bar") super(StatusBar, self).__init__(parent) self.status = tk.StringVar() # Status bar self.statusbar = ttk.Label(self, textvariable=self.status, padding=2, anchor="center") self.statusbar.grid(column=0, row=0, sticky="W E") # Configure stretch ratios self.columnconfigure(0, weight=1) # Set default status self.set_status(False)
677,357
Updates the status text Args: status (int): The offline/starting/online status of Modis 0: offline, 1: starting, 2: online
def set_status(self, status): text = "" colour = "#FFFFFF" if status == 0: text = "OFFLINE" colour = "#EF9A9A" elif status == 1: text = "STARTING" colour = "#FFE082" elif status == 2: text = "ONLINE" colour = "#A5D6A7" self.status.set(text) self.statusbar.config(background=colour)
677,358
Get the json data from a help file Args: filepath (str): The file path for the help file Returns: data: The json data from a help file
def get_help_data(filepath): try: with open(filepath, 'r') as file: return _json.load(file, object_pairs_hook=OrderedDict) except Exception as e: logger.error("Could not load file {}".format(filepath)) logger.exception(e) return {}
677,359
Load help text from a file and give it as datapacks Args: filepath (str): The file to load help text from prefix (str): The prefix to use for commands Returns: datapacks (list): The datapacks from the file
def get_help_datapacks(filepath, prefix="!"): help_contents = get_help_data(filepath) datapacks = [] # Add the content for d in help_contents: heading = d content = "" if "commands" in d.lower(): for c in help_contents[d]: if "name" not in c: continue content += "- `" command = prefix + c["name"] content += "{}".format(command) if "params" in c: for param in c["params"]: content += " [{}]".format(param) content += "`: " if "description" in c: content += c["description"] content += "\n" else: content += help_contents[d] datapacks.append((heading, content, False)) return datapacks
677,360
Load help text from a file and adds it to the parent Args: parent: A tk or ttk object filepath (str): The file to load help text from prefix (str): The prefix to use for commands
def add_help_text(parent, filepath, prefix="!"): import tkinter as tk import tkinter.ttk as ttk help_contents = get_help_data(filepath) text = tk.Text(parent, wrap='word', font=("Helvetica", 10)) text.grid(row=0, column=0, sticky="W E N S") text.tag_config("heading", font=("Helvetica", 14)) text.tag_config("command", font=("Courier", 10)) text.tag_config("param", font=("Courier", 10)) text.tag_config("description") # Vertical Scrollbar scrollbar = ttk.Scrollbar(parent, orient="vertical", command=text.yview) scrollbar.grid(column=1, row=0, sticky="N S") text['yscrollcommand'] = scrollbar.set # Add the content for d in help_contents: text.insert('end', d, "heading") text.insert('end', '\n') if "commands" in d.lower(): for c in help_contents[d]: if "name" not in c: continue command = prefix + c["name"] text.insert('end', command, ("command", "description")) if "params" in c: for param in c["params"]: text.insert('end', " [{}]".format(param), ("param", "description")) text.insert('end', ": ") if "description" in c: text.insert('end', c["description"], "description") text.insert('end', '\n') text.insert('end', '\n') else: text.insert('end', help_contents[d], "description") text.insert('end', '\n\n') text.config(state=tk.DISABLED)
677,361
The on_message event handler for this module Args: reaction (discord.Reaction): Input reaction user (discord.User): The user that added the reaction
async def on_reaction_add(reaction, user): # Simplify reaction info server = reaction.message.server emoji = reaction.emoji data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return # Commands section if user != reaction.message.channel.server.me: if server.id not in _data.cache or _data.cache[server.id].state == 'destroyed': return try: valid_reaction = reaction.message.id == _data.cache[server.id].embed.sent_embed.id except AttributeError: pass else: if valid_reaction: # Remove reaction try: await client.remove_reaction(reaction.message, emoji, user) except discord.errors.NotFound: pass except discord.errors.Forbidden: pass # Commands if emoji == "⏯": await _data.cache[server.id].toggle() if emoji == "⏹": await _data.cache[server.id].stop() if emoji == "⏭": await _data.cache[server.id].skip("1") if emoji == "⏮": await _data.cache[server.id].rewind("1") if emoji == "🔀": await _data.cache[server.id].shuffle() if emoji == "🔉": await _data.cache[server.id].setvolume('-') if emoji == "🔊": await _data.cache[server.id].setvolume('+')
677,362
Start Modis in console format. Args: discord_token (str): The bot token for your Discord application discord_client_id: The bot's client ID
def console(discord_token, discord_client_id): state, response = datatools.get_compare_version() logger.info("Starting Modis in console") logger.info(response) import threading import asyncio logger.debug("Loading packages") from modis.discord_modis import main as discord_modis_console from modis.reddit_modis import main as reddit_modis_console from modis.facebook_modis import main as facebook_modis_console # Create threads logger.debug("Initiating threads") loop = asyncio.get_event_loop() discord_thread = threading.Thread( target=discord_modis_console.start, args=[discord_token, discord_client_id, loop]) reddit_thread = threading.Thread( target=reddit_modis_console.start, args=[]) facebook_thread = threading.Thread( target=facebook_modis_console.start, args=[]) # Run threads logger.debug("Starting threads") discord_thread.start() reddit_thread.start() facebook_thread.start() logger.debug("Root startup completed")
677,363
Start Modis in gui format. Args: discord_token (str): The bot token for your Discord application discord_client_id: The bot's client ID
def gui(discord_token, discord_client_id): logger.info("Starting Modis in GUI") import tkinter as tk logger.debug("Loading packages") from modis.discord_modis import gui as discord_modis_gui from modis.reddit_modis import gui as reddit_modis_gui from modis.facebook_modis import gui as facebook_modis_gui logger.debug("Initialising window") # Setup the root window root = tk.Tk() root.minsize(width=800, height=400) root.geometry("800x600") root.title("Modis Control Panel") # Icon root.iconbitmap(r"{}/assets/modis.ico".format(file_dir)) # Setup the notebook discord = discord_modis_gui.Frame(root, discord_token, discord_client_id) discord.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S") # Configure stretch ratios root.columnconfigure(0, weight=1) root.rowconfigure(0, weight=1) discord.columnconfigure(0, weight=1) discord.rowconfigure(0, weight=1) logger.debug("GUI initialised") # Run the window UI root.mainloop()
677,364
Write the data to the data.json file Args: data (dict): The updated data dictionary for Modis
def write_data(data): sorted_dict = sort_recursive(data) with open(_datafile, 'w') as file: _json.dump(sorted_dict, file, indent=2)
677,365
Recursively sorts all elements in a dictionary Args: data (dict): The dictionary to sort Returns: sorted_dict (OrderedDict): The sorted data dict
def sort_recursive(data): newdict = {} for i in data.items(): if type(i[1]) is dict: newdict[i[0]] = sort_recursive(i[1]) else: newdict[i[0]] = i[1] return OrderedDict(sorted(newdict.items(), key=lambda item: (compare_type(type(item[1])), item[0])))
677,366
Creates an embed UI containing the Rocket League stats Args: channel (discord.Channel): The Discord channel to bind the embed to stats (tuple): Tuples of (field, value, percentile) name (str): The name of the player platform (str): The playfor to search on, can be 'steam', 'ps', or 'xbox' dp (str): URL to the player's dp Returns: (discord.Embed): The created embed
def success(channel, stats, name, platform, dp): # Create datapacks datapacks = [("Platform", platform, False)] for stat in stats: # Add stats if stat[0] in ("Duel 1v1", "Doubles 2v2", "Solo Standard 3v3", "Standard 3v3"): stat_name = "__" + stat[0] + "__" stat_value = "**" + stat[1] + "**" else: stat_name = stat[0] stat_value = stat[1] # Add percentile if it exists if stat[2]: stat_value += " *(Top " + stat[2] + "%)*" datapacks.append((stat_name, stat_value, True)) # Create embed UI object gui = ui_embed.UI( channel, "Rocket League Stats: {}".format(name), "*Stats obtained from [Rocket League Tracker Network](https://rocketleague.tracker.network/)*", modulename=modulename, colour=0x0088FF, thumbnail=dp, datapacks=datapacks ) return gui
677,369
Creates an embed UI for invalid SteamIDs Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
def fail_steamid(channel): gui = ui_embed.UI( channel, "That SteamID doesn't exist.", "You can get your SteamID by going to your profile page and looking at the url, " "or you can set a custom ID by going to edit profile on your profile page.", modulename=modulename, colour=0x0088FF ) return gui
677,370
Creates an embed UI for when the API call didn't work Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
def fail_api(channel): gui = ui_embed.UI( channel, "Couldn't get stats off RLTrackerNetwork.", "Maybe the API changed, please tell Infraxion.", modulename=modulename, colour=0x0088FF ) return gui
677,371
The on_message event handler for this module Args: message (discord.Message): Input message
async def on_message(message): # Simplify message info server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return # Only reply to server messages and don't reply to myself if server is not None and author != channel.server.me: # Commands section prefix = data["discord"]["servers"][server.id]["prefix"] if content.startswith(prefix): # Parse message package = content.split(" ") command = package[0][len(prefix):] args = package[1:] arg = ' '.join(args) # Commands if command == 'hex': await client.send_typing(channel) # Parse message hex_strs = api_hexconvert.convert_hex_value(arg) # Create embed UI if len(hex_strs) > 0: for hex_str in hex_strs: image_url = convert_hex_to_url(hex_str) embed = ui_embed.success(channel, image_url, hex_str) await embed.send() else: embed = ui_embed.fail_api(channel) await embed.send() else: # Parse message hex_strs = api_hexconvert.convert_hex_value(content) # Create embed UI if len(hex_strs) > 0: for hex_str in hex_strs: await client.send_typing(channel) image_url = convert_hex_to_url(hex_str) embed = ui_embed.success(channel, image_url, hex_str) await embed.send()
677,372
The on_message event handler for this module Args: message (discord.Message): Input message
async def on_message(message): # Simplify message info server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return # Only reply to server messages and don't reply to myself if server is not None and author != channel.server.me: # Commands section prefix = data["discord"]["servers"][server.id]["prefix"] if content.startswith(prefix): # Parse message package = content.split(" ") command = package[0][len(prefix):] # Commands if command == 'gamedeals': await client.send_typing(channel) # Get posts from Reddit API posts = api_reddit.get_top10() if posts: for post in posts: # Create embed UI embed = ui_embed.success(channel, post) await embed.send() else: embed = ui_embed.no_results(channel) await embed.send()
677,374
Creates an embed UI containing the Reddit posts Args: channel (discord.Channel): The Discord channel to bind the embed to post (tuple): Tuples of (field, value, percentile) Returns:
def success(channel, post): # Create datapacks datapacks = [("Game", post[0], True), ("Upvotes", post[2], True)] # Create embed UI object gui = ui_embed.UI( channel, "Link", post[1], modulename=modulename, colour=0xFF8800, thumbnail=post[1], datapacks=datapacks ) return gui
677,376
Creates an embed UI for when there were no results Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
def no_results(channel): gui = ui_embed.UI( channel, "No results", ":c", modulename=modulename, colour=0xFF8800 ) return gui
677,377
Makes a new time bar string Args: progress: How far through the current song we are (in seconds) duration: The duration of the current song (in seconds) Returns: timebar (str): The time bar string
def make_timebar(progress=0, duration=0): duration_string = api_music.duration_to_string(duration) if duration <= 0: return "---" time_counts = int(round((progress / duration) * TIMEBAR_LENGTH)) if time_counts > TIMEBAR_LENGTH: time_counts = TIMEBAR_LENGTH if duration > 0: bar = "│" + (TIMEBAR_PCHAR * time_counts) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts)) + "│" time_bar = "{} {}".format(bar, duration_string) else: time_bar = duration_string return time_bar
677,378
The on_message event handler for this module Args: message (discord.Message): Input message
async def on_message(message): # Simplify message info server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return # Only reply to server messages and don't reply to myself if server is not None and author != channel.server.me: # Only reply to mentions if channel.server.me in message.mentions: logger.info("Bot was mentioned, summoning Mitsuku") await client.send_typing(channel) # Get new botcust2 from Mitsuku if does not exist for channel in serverdata if channel.id not in data["discord"]["servers"][server.id][_data.modulename]["channels"]: new_serverdata = data new_serverdata["discord"]["servers"][server.id][_data.modulename]["channels"][channel.id] = \ api_mitsuku.get_botcust2() datatools.write_data(new_serverdata) # Get botcust2 from serverdata botcust2 = data["discord"]["servers"][server.id][_data.modulename]["channels"][channel.id] # Remove mention from message content so Mitsuku doesn't see it content = content.replace("<@{}>".format(str(channel.server.me.id)), ' ') content = content.replace("<@!{}>".format(str(channel.server.me.id)), ' ') # Send Mitsuku's reply if botcust2: response = api_mitsuku.query(botcust2, content) if response: await client.send_message(channel, response) else: await client.send_message(channel, "```Couldn't get readable response from Mitsuku.```") else: await client.send_message(channel, "```Couldn't initialise with Mitsuku.```")
677,379
Updates a particular datapack's data Args: index (int): The index of the datapack data (str): The new value to set for this datapack
def update_data(self, index, data): datapack = self.built_embed.to_dict()["fields"][index] self.built_embed.set_field_at(index, name=datapack["name"], value=data, inline=datapack["inline"])
677,383
Gets the Rocket League stats and name and dp of a UserID Args: player (str): The UserID of the player we want to rank check platform (str): The platform to check for, can be 'steam', 'ps', or 'xbox' Returns: success (bool): Whether the rank check was successful package (tuple): If successful, the retrieved stats, in order (stats, name, dp)
def check_rank(player, platform="steam"): # Get player ID and name Rocket League Tracker Network webpage = requests.get( "https://rocketleague.tracker.network/profile/{}/{}".format(platform, player) ).text try: # Get player ID playerid_index = webpage.index("/live?ids=") + len("/live?ids=") playerid_end_index = webpage.index(, playerid_index) playerid = webpage[playerid_index:playerid_end_index] # Get player name name_index = webpage.index("Stats Profile : ") + len("Stats Profile : ") name_end_index = webpage.index(, name_index) name = webpage[name_index:name_end_index] except (ValueError, IndexError): return False, () # Get player stats from Rocket League Tracker Network livedata = json.loads( requests.post( "https://rocketleague.tracker.network/live/data", json={"playerIds": [playerid]} ).text ) stats = [] try: for statpack in livedata['players'][0]['Stats']: field = statpack['Value']['Label'] value = str(statpack['Value']['DisplayValue']) if statpack['Value']['Percentile']: percentile = str(statpack['Value']['Percentile']) else: percentile = None stats.append((field, value, percentile)) except (IndexError, KeyError): return False, () dp = "https://rocketleague.media.zestyio.com/rocket-league-logos-vr-white.f1cb27a519bdb5b6ed34049a5b86e317.png" platform_display = platform if platform == "steam": platform_display = "Steam" elif platform == "ps": platform_display = "PlayStation" elif platform == "xbox": platform_display = "Xbox" return True, (stats, name, platform_display, dp)
677,388
Send a message to a channel Args: channel_id (str): The id of the channel to send the message to message (str): The message to send to the channel
def send_message(channel_id, message): channel = client.get_channel(channel_id) if channel is None: logger.info("{} is not a channel".format(channel_id)) return # Check that it's enabled in the server data = datatools.get_data() if not data["discord"]["servers"][channel.server.id][modulename]["activated"]: logger.info("This module has been disabled in {} ({})".format(channel.server.name, channel.server.id)) try: runcoro(client.send_message(channel, message)) except Exception as e: logger.exception(e)
677,389
Runs an asynchronous function without needing to use await - useful for lambda Args: async_function (Coroutine): The asynchronous function to run
def runcoro(async_function): future = _asyncio.run_coroutine_threadsafe(async_function, client.loop) result = future.result() return result
677,390