code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
import os import numpy as np import pandas as pd import scipy.io.wavfile import scipy.signal as signal from scipy.fft import rfft, fftfreq from libquantum import synthetics import matplotlib.pyplot as plt from typing import List, Optional # Supported wav sample rates permitted_wav_fs_values = 8000., 16000., 48000., 96000., 192000. exception_str = "Wav sample rate must be 8000, 16000, 48000, 96000, or 192000 Hz" lowest_wav_fs_value = 8000. def stretch_factor_str(sig_sample_rate_hz: float, wav_sample_rate_hz: float) -> str: """ Compute file string for speedup and slowdown options :param sig_sample_rate_hz: input signal sample rate :param wav_sample_rate_hz: wav sample rate; supports permitted_wav_fs_values :return: """ stretch_factor = wav_sample_rate_hz / sig_sample_rate_hz # If stretch factor is unity, no change stretch_str = '_preserve' if stretch_factor > 1: stretch_str = '_speedup_' + str(int(10.*stretch_factor)/10) + 'x_to' elif 1 > stretch_factor > 0: stretch_str = '_slowdown_' + str(int(10./stretch_factor)/10) + 'x_to' else: print("Stretch factor is zero or negative, address") return stretch_str def resample_factor_str(sig_sample_rate_hz: float, wav_sample_rate_hz: float) -> str: """ Compute file string for oversample and downsample options :param sig_sample_rate_hz: input signal sample rate :param wav_sample_rate_hz: wav sample rate; supports permitted_wav_fs_values :return: string with resample factor """ resample_factor = wav_sample_rate_hz / sig_sample_rate_hz # If resample factor is unity, no change resample_str = '_preserve' if resample_factor > 1: resample_str = '_upsample_' + str(int(10.*resample_factor)/10) + 'x_to' elif 1 > resample_factor > 0: resample_str = '_decimate_' + str(int(10./resample_factor)/10) + 'x_to' elif resample_factor < 0: print("Resample factor is negative: address") return resample_str def sample_rate_str(wav_sample_rate_hz: float) -> str: """ Generate the sample rate string for the exported sound file :param wav_sample_rate_hz: target wav sample rate :return: string with sample rate in kHz """ wav_fs_str = '_' + str(int(wav_sample_rate_hz / 1000)) + 'khz.wav' return wav_fs_str def resample_fourier(sig_wf: np.ndarray, sig_sample_rate_hz: float, new_sample_rate_hz: float = 8000.) -> np.ndarray: """ Resample the Fourier way; can upsample or downsample. Downsample will be aliased, so use decimate in that case. :param sig_wf: input signal waveform, reasonably well preprocessed :param sig_sample_rate_hz: signal sample rate :param new_sample_rate_hz: resampling sample rate :return: resampled signal """ sig_len = len(sig_wf) new_len = int(sig_len * new_sample_rate_hz / sig_sample_rate_hz) sig_resampled = signal.resample(x=sig_wf, num=new_len) return sig_resampled def decimate_to_aud(sig_wf: np.ndarray, sig_sample_rate_hz: float, new_sample_rate_hz: float = 8000.) -> np.ndarray: """ Decimate with AA, min of 8 kHz. Assumed preprocessed for gaps, DC offset, slope, etc. :param sig_wf: input signal waveform, reasonably well preprocessed :param sig_sample_rate_hz: signal sample rate :param new_sample_rate_hz: target wav sample rate :return: decimated signal """ decimation_factor = int(np.round(sig_sample_rate_hz/new_sample_rate_hz)) if decimation_factor >= 2: sig_resampled = signal.decimate(x=sig_wf, q=decimation_factor, zero_phase=True) return sig_resampled else: print("Should not have gotten this far, check code") exit() def save_to_elastic_wav(sig_wf: np.ndarray, sig_sample_rate_hz: float, wav_filename: str, wav_sample_rate_hz: float = 8000.) -> None: """ Save input signal to wav file :param sig_wf: input signal waveform, reasonably well preprocessed :param sig_sample_rate_hz: input signal sample rate :param wav_filename: wav file name, with directory path :param wav_sample_rate_hz: wav sample rate; supports permitted_wav_fs_values :return: Export to wav file """ if int(wav_sample_rate_hz) in permitted_wav_fs_values: stretch_str = stretch_factor_str(sig_sample_rate_hz=sig_sample_rate_hz, wav_sample_rate_hz=wav_sample_rate_hz) khz_str = sample_rate_str(wav_sample_rate_hz=wav_sample_rate_hz) export_filename = wav_filename + stretch_str + khz_str synth_wav = 0.9 * np.real(sig_wf) / np.max(np.abs((np.real(sig_wf)))) scipy.io.wavfile.write(export_filename, int(wav_sample_rate_hz), synth_wav) else: print(exception_str) def save_to_resampled_wav(sig_wf: np.ndarray, sig_sample_rate_hz: float, wav_filename: str, wav_sample_rate_hz: float = 8000.) -> None: """ Save input signal to wav file :param sig_wf: input signal waveform, reasonably well preprocessed :param sig_sample_rate_hz: input signal sample rate :param wav_filename: wav file name, with directory path :param wav_sample_rate_hz: wav sample rate; only 8kHz, 16kHz, and 48kHz :return: Export to wav file """ # Export to wav directory if int(wav_sample_rate_hz) in permitted_wav_fs_values: resample_str = resample_factor_str(sig_sample_rate_hz=sig_sample_rate_hz, wav_sample_rate_hz=wav_sample_rate_hz) khz_str = sample_rate_str(wav_sample_rate_hz=wav_sample_rate_hz) export_filename = wav_filename + resample_str + khz_str synth_wav = 0.9 * np.real(sig_wf) / np.max(np.abs((np.real(sig_wf)))) scipy.io.wavfile.write(export_filename, int(wav_sample_rate_hz), synth_wav) else: print(exception_str) def pandas_to_resampled_wav(df: pd.DataFrame, sig_wf_label: str, sig_sample_rate_hz_label: str, output_wav_directory: str, output_wav_prefix: str = 'redvox', sig_id_label: str = "index", wav_sample_rate_hz: float = 8000., sample_rate_tolerance_percent: float = 1.) -> None: """ Ensonify a pandas data frame Tested for REDVOX AUDIO :param df: data frame :param sig_wf_label: label of signal to be ensonified :param sig_sample_rate_hz_label: label of sample rate :param sig_id_label: label to be used to id the signal :param output_wav_directory: output directory where .wav files are stored :param output_wav_prefix: output name prefix for .wav files :param wav_sample_rate_hz: nominal wav sample rate, default of 8 kHz :param sample_rate_tolerance_percent: percent of permitted difference in sig and wav sample rates :return: export to .wav """ wav_directory = os.path.join(output_wav_directory, "wav") os.makedirs(wav_directory, exist_ok=True) for n in df.index: sig_sample_rate_hz = df[sig_sample_rate_hz_label][n] if sig_id_label == "index": sig_id_str = str(df.index[n]) else: sig_id_str = df[sig_id_label][n] wav_prefix = output_wav_prefix + sig_id_str wav_pd_filename = os.path.join(wav_directory, wav_prefix) # Criteria to decimate to downsample or resample to upsample decimation_factor = int(np.round(sig_sample_rate_hz/wav_sample_rate_hz)) # Some variability is expected; don't resample if difference is less than tolerance threshold = sample_rate_tolerance_percent/100.*sig_sample_rate_hz if np.abs(sig_sample_rate_hz - wav_sample_rate_hz) > threshold: if decimation_factor >= 2: # Decimate sig_resampled = \ decimate_to_aud(sig_wf=df[sig_wf_label][n], sig_sample_rate_hz=df[sig_sample_rate_hz_label][n], new_sample_rate_hz=wav_sample_rate_hz) else: # Resample sig_resampled = \ resample_fourier(sig_wf=df[sig_wf_label][n], sig_sample_rate_hz=df[sig_sample_rate_hz_label][n], new_sample_rate_hz=wav_sample_rate_hz) save_to_resampled_wav(sig_wf=sig_resampled, sig_sample_rate_hz=df[sig_sample_rate_hz_label][n], wav_filename=wav_pd_filename, wav_sample_rate_hz=wav_sample_rate_hz) else: # Save unchanged waveform save_to_resampled_wav(sig_wf=df[sig_wf_label][n], sig_sample_rate_hz=df[sig_sample_rate_hz_label][n], wav_filename=wav_pd_filename, wav_sample_rate_hz=wav_sample_rate_hz) def pandas_to_elastic_wav(df: pd.DataFrame, sig_wf_label: str, sig_sample_rate_hz_label: str, output_wav_directory: str, output_wav_prefix: str = 'redvox', sig_id_label: str = "index", wav_sample_rate_hz: float = 8000.) -> None: """ Ensonify a pandas data frame Tested for REDVOX AUDIO :param df: data frame :param sig_wf_label: label of signal to be ensonified :param sig_sample_rate_hz_label: label of sample rate :param sig_id_label: label to be used to id the signal :param output_wav_directory: output directory where .wav files are stored :param output_wav_prefix: output name prefix for .wav files :param wav_sample_rate_hz: nominal wav sample rate, default of 8 kHz :return: export to .wav """ wav_directory = os.path.join(output_wav_directory, "wav") os.makedirs(wav_directory, exist_ok=True) for n in df.index: if sig_id_label == "index": sig_id_str = str(df.index[n]) else: sig_id_str = df[sig_id_label][n] wav_prefix = output_wav_prefix + sig_id_str wav_pd_filename = os.path.join(wav_directory, wav_prefix) save_to_elastic_wav(sig_wf=df[sig_wf_label][n], sig_sample_rate_hz=df[sig_sample_rate_hz_label][n], wav_filename=wav_pd_filename, wav_sample_rate_hz=wav_sample_rate_hz) def dual_tone_test(): """ Sound check :return: """ dir_filename = "./test" # Test tone sample_rate = 48000. new_rate = 8000. duration_s = 1. center_frequency = np.min([sample_rate, new_rate]) / 8. t = np.arange(0, duration_s, 1 / sample_rate) peak_amp = np.sqrt(2) y = peak_amp * np.sin(2 * np.pi * center_frequency * t) + \ peak_amp * np.sin(2 * np.pi * new_rate/2. * t) z = synthetics.antialias_halfNyquist(y) lz = len(z) print('Original Number of Points: ', lz) fz = 2 * rfft(z) / lz fz_f = fftfreq(lz, 1 / sample_rate) z_rs = resample_fourier(sig_wf=z, sig_sample_rate_hz=sample_rate, new_sample_rate_hz=new_rate) lz_rs = len(z_rs) print('Resampled Number of Points: ', lz_rs) t_rs = np.arange(lz_rs) / new_rate fz_rs = 2 * rfft(z_rs) / lz_rs fz_rs_f = fftfreq(lz_rs, 1 / new_rate) plt.figure() plt.subplot(211), plt.plot(t, z) plt.title('Unit rms test tone, fc = ' + str(int(center_frequency)) + ' Hz') plt.subplot(212), plt.loglog(fz_f[1:lz // 2], np.abs(fz[1:lz // 2])) plt.figure() plt.subplot(211), plt.plot(t_rs, z_rs) plt.title('Resampled test tone, fc = ' + str(int(center_frequency)) + ' Hz + Nyquist at new rate') plt.subplot(212), plt.loglog(fz_rs_f[1:lz_rs // 2], np.abs(fz_rs[1:lz_rs // 2])) save_to_resampled_wav(sig_wf=z_rs, sig_sample_rate_hz=sample_rate, wav_filename=dir_filename, wav_sample_rate_hz=new_rate) save_to_elastic_wav(sig_wf=z, sig_sample_rate_hz=sample_rate, wav_filename=dir_filename, wav_sample_rate_hz=new_rate) plt.show() def ensonify_sensors_pandas(df: pd.DataFrame, sig_id_label: str, sensor_column_label_list: List[str], sig_sample_rate_label_list: List[str], wav_sample_rate_hz: float, output_wav_directory: str, output_wav_filename: str = 'redvox', sensor_name_list: Optional[List[str]] = None) -> None: """ Channel sensor data sonification Tested for REDVOX SENSOR (API M) :param df: input pandas data frame :param sig_id_label: string for column name with station ids in df :param sensor_column_label_list: list of strings with column name with sensor waveform data in df :param sig_sample_rate_label_list: list of strings with the sensor sample rate in Hz column name in df :param wav_sample_rate_hz: sample rate in Hz which to resample to. One of: 8000., 16000., 48000., 96000., 192000. :param output_wav_directory: output directory where .wav files are stored :param output_wav_filename: output name for .wav files :param sensor_name_list: optional list of strings with channel names per sensor :return: .wav files, plot """ wav_directory = os.path.join(output_wav_directory, "wav") os.makedirs(wav_directory, exist_ok=True) print("Exporting wav files to " + wav_directory) # sensor_channel_index = 0 for station in df.index: print(f'\nStation: {df[sig_id_label][station]}') sensor_channel_index = 0 for index_sensor_label, sensor_label in enumerate(sensor_column_label_list): sensor_fs_column_label = sig_sample_rate_label_list[index_sensor_label] sig_j = df[sensor_label][station] fs_j = df[sensor_fs_column_label][station] print(f'\nSensor for {sensor_label}') print('Sample rate:', fs_j) if sig_j.ndim == 1: # audio basically print('Sensor signal shape:', sig_j.shape) # Exporting .wav if sensor_name_list is None: full_filename = f"{output_wav_filename}_{df[sig_id_label][station]}_{sensor_label}" else: full_filename = f"{output_wav_filename}_{df[sig_id_label][station]}_{sensor_name_list[sensor_channel_index]}" filename_with_path = os.path.join(output_wav_directory, full_filename) print(filename_with_path) # Save to 48, 96, 192 kHz save_to_elastic_wav(sig_wf=sig_j, sig_sample_rate_hz=fs_j, wav_filename=filename_with_path, wav_sample_rate_hz=wav_sample_rate_hz) sensor_channel_index += 1 else: print('Sensor signal shape:', sig_j.shape) names_index_channel = ['_X', '_Y', '_Z'] for index_channel, _ in enumerate(sig_j): sig_j_ch_m = sig_j[index_channel] # get x,y,z of sensor # Exporting .wav if sensor_name_list is None: full_filename = f"{output_wav_filename}_{df[sig_id_label][station]}_{sensor_label + names_index_channel[index_channel]}" else: full_filename = f"{output_wav_filename}_{df[sig_id_label][station]}_{sensor_name_list[sensor_channel_index]}" filename_with_path = os.path.join(output_wav_directory, full_filename) print(filename_with_path) # Save to 48, 96, 192 kHz save_to_elastic_wav(sig_wf=sig_j_ch_m, sig_sample_rate_hz=fs_j, wav_filename=filename_with_path, wav_sample_rate_hz=192000.) sensor_channel_index += 1
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_ensonify.py
0.802865
0.394376
redpd_ensonify.py
pypi
import numpy as np import matplotlib.pyplot as plt from matplotlib.figure import Figure from typing import List # RedVox modules from redvox.common.data_window import DataWindow, DataWindowConfig from redvox.common.station import Station from redvox.common.date_time_utils import MICROSECONDS_IN_SECOND import redvox.common.date_time_utils as dt_utils # RedPandas config from redpandas.redpd_config import RedpdConfig def dw_from_redpd_config(config: RedpdConfig) -> DataWindow: """ Create RedVox DataWindow object from RedPandas configuration file with start/end times in epoch s :param config: RedpdConfig. REQUIRED :return: RedVox DataWindow object """ api_input_directory: str = config.input_dir redvox_station_ids: List[str] = config.station_ids start_epoch_s: float = config.event_start_epoch_s end_epoch_s: float = config.event_end_epoch_s start_buffer_minutes: int = config.start_buffer_minutes end_buffer_minutes: int = config.end_buffer_minutes event_name_from_config = config.event_name DWAConfig = DataWindowConfig(input_dir=api_input_directory, station_ids=redvox_station_ids, start_datetime=dt_utils.datetime_from_epoch_seconds_utc(start_epoch_s), end_datetime=dt_utils.datetime_from_epoch_seconds_utc(end_epoch_s), structured_layout=True, apply_correction=True, start_buffer_td=dt_utils.timedelta(minutes=start_buffer_minutes), end_buffer_td=dt_utils.timedelta(minutes=end_buffer_minutes)) # Load RedVox Datawindow rdvx_data: DataWindow = DataWindow(event_name=event_name_from_config, # event_origin=, config=DWAConfig, # out_dir=, # out_type=, debug=False) return rdvx_data def plot_dw_mic(data_window: DataWindow) -> Figure: """ Plot audio data for all stations in RedVox DataWindow :param data_window: RedVox DataWindow object. REQUIRED :return: matplotlib figure instance """ station: Station f1, ax1 = plt.subplots(figsize=(10, 8)) # Adjust to your screen for k, station in enumerate(data_window.stations()): if station.has_audio_data(): mic_wf_raw = station.audio_sensor().get_data_channel("microphone") mic_epoch_s = station.audio_sensor().data_timestamps() / MICROSECONDS_IN_SECOND ax1.plot(mic_epoch_s-mic_epoch_s[0], mic_wf_raw/np.nanmax(np.abs(mic_wf_raw)), label=station.id()) ax1.legend(loc='upper right') ax1.set_title("Audio raw normalized waveforms") ax1.set_xlabel("Time from record start, s") return f1 def plot_dw_baro(data_window: DataWindow) -> Figure: """ Plot barometer data for all stations in RedVox DataWindow :param data_window: RedVox DataWindow object. REQUIRED :return: matplotlib figure instance """ station: Station f1, ax1 = plt.subplots(figsize=(10, 8)) # Adjust to your screen for k, station in enumerate(data_window.stations()): if station.has_barometer_data(): baro_wf_raw = station.barometer_sensor().get_data_channel("pressure") baro_epoch_s = station.barometer_sensor().data_timestamps() / MICROSECONDS_IN_SECOND baro_wf = baro_wf_raw - np.nanmean(baro_wf_raw) ax1.plot(baro_epoch_s-baro_epoch_s[0], baro_wf/np.nanmax(np.abs(baro_wf)), label=station.id()) ax1.legend(loc='upper right') ax1.set_title("Pressure raw normalized waveforms") ax1.set_xlabel("Time from record start, s") return f1
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_datawin.py
0.884389
0.450178
redpd_datawin.py
pypi
from typing import Optional import numpy as np import pandas as pd from libquantum import atoms, spectra, utils import redpandas.redpd_preprocess as rpd_prep from redpandas.redpd_preprocess import find_nearest_idx def frame_panda_no_offset(df: pd.DataFrame, sig_wf_label: str, sig_epoch_s_label: str, sig_epoch_s_start: float, sig_epoch_s_end: float, new_column_aligned_wf: str = 'sig_aligned_wf', new_column_aligned_epoch: str = 'sig_aligned_epoch_s') -> pd.DataFrame: """ Align signals in dataframe (no seconds offset) :param df: input pandas data frame :param sig_wf_label: string for the waveform column name in df :param sig_epoch_s_label: string for column name with the waveform timestamp (in epoch s) in df :param sig_epoch_s_start: first timestamp in epoch s :param sig_epoch_s_end: last timestamp in epoch s :param new_column_aligned_wf: label for new column containing aligned waveform :param new_column_aligned_epoch: label for new column containing aligned timestamps in epoch s :return: input df with new columns """ aligned_wf = [] aligned_epoch_s = [] for n in df.index: if sig_wf_label not in df.columns or type(df[sig_wf_label][n]) == float: aligned_wf.append(float("NaN")) aligned_epoch_s.append(float("NaN")) continue if df[sig_wf_label][n].ndim == 1: sig_wf, sig_epoch_s = \ utils.sig_frame(sig=df[sig_wf_label][n], time_epoch_s=df[sig_epoch_s_label][n], epoch_s_start=sig_epoch_s_start, epoch_s_stop=sig_epoch_s_end) aligned_wf.append(sig_wf) aligned_epoch_s.append(sig_epoch_s) else: aligned_wf_3c = [] for index_sensor_array, _ in enumerate(df[sig_wf_label][n]): sig_wf, sig_epoch_s = \ utils.sig_frame(sig=df[sig_wf_label][n][index_sensor_array], time_epoch_s=df[sig_epoch_s_label][n], epoch_s_start=sig_epoch_s_start, epoch_s_stop=sig_epoch_s_end) aligned_wf_3c.append(sig_wf) aligned_wf.append(np.array(aligned_wf_3c)) aligned_epoch_s.append(sig_epoch_s) df[new_column_aligned_wf] = aligned_wf df[new_column_aligned_epoch] = aligned_epoch_s return df def frame_panda(df: pd.DataFrame, sig_wf_label: str, sig_epoch_s_label: str, sig_epoch_s_start: float, sig_epoch_s_end: float, offset_seconds_label: str = "xcorr_offset_seconds", new_column_aligned_wf: str = 'sig_aligned_wf', new_column_aligned_epoch: str = 'sig_aligned_epoch_s') -> pd.DataFrame: """ Align signals in dataframe (with seconds offset) :param df: input pandas data frame :param sig_wf_label: string for the waveform column name in df :param sig_epoch_s_label: string for column name with the waveform timestamp (in epoch s) in df :param sig_epoch_s_start: first timestamp in epoch s :param sig_epoch_s_end: last timestamp in epoch s :param offset_seconds_label: time offset correction in seconds :param new_column_aligned_wf: label for new column containing aligned waveform :param new_column_aligned_epoch: label for new column containing aligned timestamps in epoch s :return: input df with new columns """ aligned_wf = [] aligned_epoch_s = [] for n in df.index: if sig_wf_label not in df.columns or type(df[sig_wf_label][n]) == float: aligned_wf.append(float("NaN")) aligned_epoch_s.append(float("NaN")) continue sig_wf, sig_epoch_s = \ utils.sig_frame(sig=df[sig_wf_label][n], time_epoch_s=df[sig_epoch_s_label][n] + df[offset_seconds_label][n], epoch_s_start=sig_epoch_s_start, epoch_s_stop=sig_epoch_s_end) aligned_wf.append(sig_wf) aligned_epoch_s.append(sig_epoch_s) df[new_column_aligned_wf] = aligned_wf df[new_column_aligned_epoch] = aligned_epoch_s return df # INPUT ALIGNED DATA def tfr_bits_panda(df: pd.DataFrame, sig_wf_label: str, sig_sample_rate_label: str, order_number_input: float = 3, tfr_type: str = 'cwt', new_column_tfr_bits: str = 'tfr_bits', new_column_tfr_time_s: str = 'tfr_time_s', new_column_tfr_frequency_hz: str = 'tfr_frequency_hz') -> pd.DataFrame: """ Calculate Time Frequency Representation for a signal :param df: input pandas data frame :param sig_wf_label: string for the waveform column name in df :param sig_sample_rate_label: string for column name with sample rate in Hz information in df :param order_number_input: band order Nth :param tfr_type: 'cwt' or 'stft' :param new_column_tfr_bits: label for new column containing tfr in bits :param new_column_tfr_time_s: label for new column containing tfr timestamps in epoch s :param new_column_tfr_frequency_hz: label for new column containing tfr frequency in Hz :return: input dataframe with new columns """ tfr_bits = [] tfr_time_s = [] tfr_frequency_hz = [] for n in df.index: if sig_wf_label not in df.columns or type(df[sig_wf_label][n]) == float: tfr_bits.append(float("NaN")) tfr_time_s.append(float("NaN")) tfr_frequency_hz.append(float("NaN")) continue if df[sig_wf_label][n].ndim == 1: # audio basically sig_wf_n = np.copy(df[sig_wf_label][n]) sig_wf_n *= rpd_prep.taper_tukey(sig_wf_or_time=sig_wf_n, fraction_cosine=0.1) if tfr_type == "cwt": # Compute complex wavelet transform (cwt) from signal duration sig_cwt, sig_cwt_bits, sig_cwt_time_s, sig_cwt_frequency_hz = \ atoms.cwt_chirp_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_bits.append(sig_cwt_bits) tfr_time_s.append(sig_cwt_time_s) tfr_frequency_hz.append(sig_cwt_frequency_hz) if tfr_type == "stft": # Compute complex wavelet transform (cwt) from signal duration sig_stft, sig_stft_bits, sig_stft_time_s, sig_stft_frequency_hz = \ spectra.stft_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_bits.append(sig_stft_bits) tfr_time_s.append(sig_stft_time_s) tfr_frequency_hz.append(sig_stft_frequency_hz) else: # sensor that is acceleration/gyroscope/magnetometer/barometer tfr_3c_bits = [] tfr_3c_time = [] tfr_3c_frequency = [] for index_dimension, _ in enumerate(df[sig_wf_label][n]): sig_wf_n = np.copy(df[sig_wf_label][n][index_dimension]) sig_wf_n *= rpd_prep.taper_tukey(sig_wf_or_time=sig_wf_n, fraction_cosine=0.1) if tfr_type == "cwt": # Compute complex wavelet transform (cwt) from signal duration sig_cwt, sig_cwt_bits, sig_cwt_time_s, sig_cwt_frequency_hz = \ atoms.cwt_chirp_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_3c_bits.append(sig_cwt_bits) tfr_3c_time.append(sig_cwt_time_s) tfr_3c_frequency.append(sig_cwt_frequency_hz) if tfr_type == "stft": # Compute complex wavelet transform (cwt) from signal duration sig_stft, sig_stft_bits, sig_stft_time_s, sig_stft_frequency_hz = \ spectra.stft_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_3c_bits.append(sig_stft_bits) tfr_3c_time.append(sig_stft_time_s) tfr_3c_frequency.append(sig_stft_frequency_hz) # append 3c tfr into 'main' list tfr_bits.append(np.array(tfr_3c_bits)) tfr_time_s.append(np.array(tfr_3c_time)) tfr_frequency_hz.append(np.array(tfr_3c_frequency)) df[new_column_tfr_bits] = tfr_bits df[new_column_tfr_time_s] = tfr_time_s df[new_column_tfr_frequency_hz] = tfr_frequency_hz return df # INPUT ALIGNED DATA def tfr_bits_panda_window(df: pd.DataFrame, sig_wf_label: str, sig_sample_rate_label: str, sig_timestamps_label: str, order_number_input: float = 3, tfr_type: str = 'cwt', new_column_tfr_bits: str = 'tfr_bits', new_column_tfr_time_s: str = 'tfr_time_s', new_column_tfr_frequency_hz: str = 'tfr_frequency_hz', start_time_window: Optional[float] = 0.0, end_time_window: Optional[float] = 0.0) -> pd.DataFrame: """ Calculate Time Frequency Representation for a signal withing a time window :param df: input pandas data frame :param sig_wf_label: string for the waveform column name in df :param sig_sample_rate_label: string for column name with sample rate in Hz information in df :param sig_timestamps_label: string for timestamp column name in df :param order_number_input: band order Nth :param tfr_type: 'cwt' or 'stft' :param new_column_tfr_bits: label for new column containing tfr in bits :param new_column_tfr_time_s: label for new column containing tfr timestamps in epoch s :param new_column_tfr_frequency_hz: label for new column containing tfr frequency in Hz :param start_time_window: float, start time window (within sig_timestamps_label) :param end_time_window: float, end time window (within sig_timestamps_label) :return: input dataframe with new columns """ tfr_bits = [] tfr_time_s = [] tfr_frequency_hz = [] # Check zooming window if start_time_window > 0.0 and end_time_window > 0.0: if end_time_window <= start_time_window: raise ValueError(f"end_time_window parameter ('{end_time_window}') " f"cannot be smaller than start_time_window parameter ('{start_time_window}')") for n in df.index: if sig_wf_label not in df.columns or type(df[sig_wf_label][n]) == float: tfr_bits.append(float("NaN")) tfr_time_s.append(float("NaN")) tfr_frequency_hz.append(float("NaN")) continue if df[sig_wf_label][n].ndim == 1: # audio basically timestamps = df[sig_timestamps_label][n] if start_time_window > 0.0 and end_time_window > 0.0: idx_time_start = find_nearest_idx(timestamps, start_time_window) idx_time_end = find_nearest_idx(timestamps, end_time_window) elif start_time_window > 0.0 and end_time_window == 0.0: idx_time_start = find_nearest_idx(timestamps, start_time_window) idx_time_end = -1 elif end_time_window > 0.0 and start_time_window == 0.0: idx_time_start = 0 idx_time_end = find_nearest_idx(timestamps, end_time_window) else: idx_time_start = 0 idx_time_end = -1 sig_wf = df[sig_wf_label][n][idx_time_start:idx_time_end] sig_wf_n = np.copy(sig_wf) sig_wf_n *= rpd_prep.taper_tukey(sig_wf_or_time=sig_wf_n, fraction_cosine=0.1) if tfr_type == "cwt": # Compute complex wavelet transform (cwt) from signal duration sig_cwt, sig_cwt_bits, sig_cwt_time_s, sig_cwt_frequency_hz = \ atoms.cwt_chirp_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_bits.append(sig_cwt_bits) tfr_time_s.append(sig_cwt_time_s) tfr_frequency_hz.append(sig_cwt_frequency_hz) if tfr_type == "stft": # Compute complex wavelet transform (cwt) from signal duration sig_stft, sig_stft_bits, sig_stft_time_s, sig_stft_frequency_hz = \ spectra.stft_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_bits.append(sig_stft_bits) tfr_time_s.append(sig_stft_time_s) tfr_frequency_hz.append(sig_stft_frequency_hz) else: # sensor that is acceleration/gyroscope/magnetometer/barometer tfr_3c_bits = [] tfr_3c_time = [] tfr_3c_frequency = [] for index_dimension, _ in enumerate(df[sig_wf_label][n]): sig_wf_n = np.copy(df[sig_wf_label][n][index_dimension]) sig_wf_n *= rpd_prep.taper_tukey(sig_wf_or_time=sig_wf_n, fraction_cosine=0.1) if tfr_type == "cwt": # Compute complex wavelet transform (cwt) from signal duration sig_cwt, sig_cwt_bits, sig_cwt_time_s, sig_cwt_frequency_hz = \ atoms.cwt_chirp_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_3c_bits.append(sig_cwt_bits) tfr_3c_time.append(sig_cwt_time_s) tfr_3c_frequency.append(sig_cwt_frequency_hz) if tfr_type == "stft": # Compute complex wavelet transform (cwt) from signal duration sig_stft, sig_stft_bits, sig_stft_time_s, sig_stft_frequency_hz = \ spectra.stft_from_sig(sig_wf=sig_wf_n, frequency_sample_rate_hz=df[sig_sample_rate_label][n], band_order_Nth=order_number_input) tfr_3c_bits.append(sig_stft_bits) tfr_3c_time.append(sig_stft_time_s) tfr_3c_frequency.append(sig_stft_frequency_hz) # append 3c tfr into 'main' list tfr_bits.append(np.array(tfr_3c_bits)) tfr_time_s.append(np.array(tfr_3c_time)) tfr_frequency_hz.append(np.array(tfr_3c_frequency)) df[new_column_tfr_bits] = tfr_bits df[new_column_tfr_time_s] = tfr_time_s df[new_column_tfr_frequency_hz] = tfr_frequency_hz return df
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_tfr.py
0.933385
0.410343
redpd_tfr.py
pypi
import numpy as np from scipy.integrate import cumulative_trapezoid from typing import List, Tuple def remove_dc_offset(sensor_wf: np.ndarray, start_loc: int = None, end_loc: int = None) -> np.ndarray: """ removes "DC offset" from the data by subtracting the mean of the specified subsection of the data. If start and end location is None, it uses the whole array, if one is given it will take the other to the max. :param sensor_wf: data to remove the "DC offset" :param start_loc: location of the start of the DC offset subset :param end_loc: location of the end of the DC offset subset :return: data with DC offset removed """ if start_loc and end_loc is None: removed = sensor_wf - np.nanmean(sensor_wf) elif start_loc is None: removed = sensor_wf - np.nanmean(sensor_wf[:end_loc]) elif end_loc is None: removed = sensor_wf - np.nanmean(sensor_wf[start_loc:]) else: removed = sensor_wf - np.nanmean(sensor_wf[start_loc:end_loc]) return removed def remove_dc_offset_s(timestamps_s: np.ndarray, sensor_wf: np.ndarray, start_s: int = None, end_s: int = None) -> np.ndarray: """ removes "DC offset" from the data by subtracting the mean of the specified subsection of the data. If start and end time is None, it uses the whole array, if one is given it will take the other to the max. :param timestamps_s: timestamps corresponding to the data in seconds :param sensor_wf: data to remove the "DC offset" :param start_s: seconds from the first timestamp to use as the start of the range for the DC offset subset :param end_s: seconds from the first timestamp to use as the end of the range for the DC offset subset :return: data with DC offset removed """ # adjust timestamps to be relative from the start timestamps_s_adj = timestamps_s - timestamps_s[0] # find location closest to the given start and end if start_s and end_s is None: start_loc = None end_loc = None elif start_s is None: start_loc = None end_loc = np.abs(timestamps_s_adj - end_s).argmin() elif end_s is None: start_loc = np.abs(timestamps_s_adj - start_s).argmin() end_loc = None else: start_loc = np.abs(timestamps_s_adj - start_s).argmin() end_loc = np.abs(timestamps_s_adj - end_s).argmin() # use remove_dc_offset to find the offset return remove_dc_offset(sensor_wf=sensor_wf, start_loc=start_loc, end_loc=end_loc) def integrate_cumtrapz(timestamps_s: np.ndarray, sensor_wf: np.ndarray, initial_value: float = 0) -> np.ndarray: """ cumulative trapazoid integration using scipy.integrate.cumulative_trapezoid :param timestamps_s: timestamps corresponding to the data in seconds :param sensor_wf: data to integrate using cumulative trapezoid :param initial_value: the value to add in the initial of the integrated data to match length of input (default is 0) :return: integrated data with the same length as the input """ integrated_data = cumulative_trapezoid(x=timestamps_s, y=sensor_wf, initial=initial_value) return integrated_data def get_roll_pitch(accel_x: float, accel_y: float, accel_z: float) -> Tuple[float, float]: """ Returns the pitch (rotation around y axis) and roll (rotation around x axis) from accelerometer data http://www.geekmomprojects.com/gyroscopes-and-accelerometers-on-a-chip/ :param accel_x: x-axis acceleration value :param accel_y: y-axis acceleration value :param accel_z: z-axis acceleration value :return: pitch, roll """ # get angle in radians roll = np.arctan2(accel_y, np.sqrt(accel_x * accel_x + accel_z * accel_z)) pitch = np.arctan2(-accel_x, np.sqrt(accel_y * accel_y + accel_z * accel_z)) # convert to degrees return roll, pitch def get_yaw(roll: float, pitch: float, mag_x: float, mag_y: float, mag_z: float) -> np.ndarray: """ Returns yaw based on roll / pitch data and the magnetometer data https://roboticsclubiitk.github.io/2017/12/21/Beginners-Guide-to-IMU.html :param roll: rotation around the x-axis :param pitch: rotation around the y-axis :param mag_x: x-axis magnetometer value :param mag_y: y-axis magnetometer value :param mag_z: z-axis magnetometer value :return: yaw """ mag_x_adj = mag_x*np.cos(pitch) + mag_y*np.sin(roll)*np.sin(pitch) + mag_z*np.cos(roll)*np.sin(pitch) mag_y_adj = mag_y * np.cos(roll) - mag_z*np.sin(roll) return np.arctan2(-mag_y_adj, mag_x_adj) def get_roll_pitch_array(accelerometers: List) -> Tuple[np.ndarray, np.ndarray]: """ Returns the pitch (rotation around y axis) and roll (rotation around x axis) array from accelerometer data Loops through the get_pitch_and_roll function :param accelerometers: List of the xyz components of accelerometer data :return: pitch_array, roll_array """ # Loop through get_xy_rotation roll_array = [] pitch_array = [] for i in range(len(accelerometers[0])): pitch, roll = get_roll_pitch(accel_x=accelerometers[0][i], accel_y=accelerometers[1][i], accel_z=accelerometers[2][i]) roll_array.append(roll) pitch_array.append(pitch) return np.array(roll_array), np.array(pitch_array) def get_yaw_array(roll_array: np.ndarray, pitch_array: np.ndarray, magnetometers: List) -> np.ndarray: """ Returns the yaw array from roll (rotation around x axis), pitch (rotation around y axis), and gyroscope data :param roll_array: roll (rotation around x axis) calculated from sensors :param pitch_array: pitch (rotation around y axis) calculated from sensors :param magnetometers: List of xyz components of magnetometer data :return: yaw_array """ # Loop through get_xy_rotation yaw_array = [] for i in range(len(magnetometers[0])): yaw = get_yaw(roll=roll_array[i], pitch=pitch_array[i], mag_x=magnetometers[0][i], mag_y=magnetometers[1][i], mag_z=magnetometers[2][i]) yaw_array.append(yaw) return np.array(yaw_array) def complimentary_filtering(gyroscope_time_s: np.ndarray, gyroscope_angle: np.ndarray, accelerometer_angle: np.ndarray, smoothing_factor: float) -> np.ndarray: """ Complimentary Filter for Accelereometer and Gyroscope. Returns filtered angle Based on the works from https://stackoverflow.com/questions/1586658/combine-gyroscope-and-accelerometer-data and http://blog.bitify.co.uk/2013/11/using-complementary-filter-to-combine.html :param gyroscope_time_s: timestamps corresponding to the gyroscope data in seconds :param gyroscope_angle: the calculated angle from the gyroscope (roll, pitch, yaw) :param accelerometer_angle: the calculated angle from the accelerometer (roll, pitch, yaw) :param smoothing_factor: determines the sensitivity of the accelerometer :return: filtered angle """ # Get the change in gyroscope angle initiate with zero gyroscope_angle_change = np.diff(gyroscope_angle) gyroscope_time_delta = np.diff(gyroscope_time_s) # Loop through the data to apply complimentary filter filtered_angle = gyroscope_angle for i in range(len(accelerometer_angle) - 1): filtered_angle[i + 1] = \ smoothing_factor * (filtered_angle[i] + gyroscope_angle_change[i] * gyroscope_time_delta[i]) \ + smoothing_factor * accelerometer_angle[i + 1] return filtered_angle
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_orientation.py
0.93213
0.843766
redpd_orientation.py
pypi
import os from typing import List, Optional import numpy as np import pandas as pd from redvox.common.data_window import DataWindow import redpandas.redpd_build_station as rpd_build_sta import redpandas.redpd_scales as rpd_scales def redpd_dataframe(input_dw: DataWindow, sensor_labels: Optional[List[str]] = None, highpass_type: Optional[str] = 'obspy', frequency_filter_low: Optional[float] = 1./rpd_scales.Slice.T100S, filter_order: Optional[int] = 4) -> pd.DataFrame: """ Construct pandas dataframe from RedVox DataWindow. Default sensor extracted is audio, for more options see sensor_labels parameter. :param input_dw: REQUIRED. Redvox DataWindow :param sensor_labels: optional list of strings, list of sensors available ['audio', 'barometer', 'accelerometer', 'gyroscope', 'magnetometer', 'health', 'location', 'synchronization', 'clock', 'best_location']. For example: sensor_labels = ['audio', 'accelerometer']. Default is ["audio"] :param highpass_type: optional string, type of highpass applied. One of: 'obspy', 'butter', or 'rc'. Default is 'obspy' :param frequency_filter_low: optional float, lowest frequency for highpass filter. Default is 100 second periods :param filter_order: optional integer, the order of the filter. Default is 4 :return: pd.DataFrame """ # TODO: make debug/verbose option print("Initiating conversion from RedVox DataWindow to RedPandas:") rdvx_data: DataWindow = input_dw if type(sensor_labels) is not list: sensor_labels = ["audio"] # BEGIN RED PANDAS print("\nInitiating RedVox Redpandas:") df_all_sensors_all_stations = pd.DataFrame([rpd_build_sta.station_to_dict_from_dw(station=station, sdk_version=rdvx_data.sdk_version(), sensor_labels=sensor_labels, highpass_type=highpass_type, frequency_filter_low=frequency_filter_low, filter_order=filter_order) for station in rdvx_data.stations()]) df_all_sensors_all_stations.sort_values(by="station_id", ignore_index=True, inplace=True) # Offer glimpse of what the DataFrame contains print(f"\nTotal stations in DataFrame: {len(df_all_sensors_all_stations['station_id'])}") print(f"Available stations: \n{df_all_sensors_all_stations['station_id'].to_string(index=False)}") print(f"Total columns in DataFrame: {len(df_all_sensors_all_stations.columns)}") return df_all_sensors_all_stations def export_df_to_parquet(df: pd.DataFrame, output_dir_pqt: str, output_filename_pqt: Optional[str] = None, event_name: Optional[str] = "Redvox") -> str: """ Export RedPandas DataFrame to parquet :param df: input pandas DataFrame. REQUIRED :param output_dir_pqt: string, output directory for parquet. REQUIRED :param output_filename_pqt: optional string for parquet filename. Default is None :param event_name: optional string with name of event. Default is "Redvox" :return: string with full path (output directory and filename) of parquet """ for column in df.columns: for row in df.index: # check it is array by cheking all rows, look into dtypes check = np.shape(df[column][row]) if len(check) >= 2: # Create new columns with shape tuple for future unflattening/reshaping df[[f'{column}_ndim']] = df[[f'{column}']].applymap(np.shape) # Change tuples to 1D np.array to save it to parquet df[[f'{column}_ndim']] = df[[f'{column}_ndim']].applymap(np.asarray) # Flatten each row in wf columns df[[f'{column}']] = df[[f'{column}']].applymap(np.ravel) break # Make filename if non given if output_filename_pqt is None: output_filename_pqt: str = event_name + "_df.parquet" if output_filename_pqt.find(".parquet") == -1 and output_filename_pqt.find(".pqt") == -1: full_output_dir_path_parquet = os.path.join(output_dir_pqt, output_filename_pqt + ".parquet") else: full_output_dir_path_parquet = os.path.join(output_dir_pqt, output_filename_pqt) df.to_parquet(full_output_dir_path_parquet) print(f"\nExported Parquet RedPandas DataFrame to {full_output_dir_path_parquet}") return full_output_dir_path_parquet
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_df.py
0.732209
0.40592
redpd_df.py
pypi
import numpy as np import pandas as pd from scipy import signal from libquantum import utils import redpandas.redpd_plot.coherence as rpd_plt def coherence_numpy(sig_in: np.ndarray, sig_in_ref: np.ndarray, sig_sample_rate_hz: int, sig_ref_sample_rate_hz: int, window_seconds: float = 2., window_overlap_fractional: float = 0.5, frequency_ref_hz: float = 40., frequency_min_hz: float = 1., frequency_max_hz: float = 320., sig_calib: float = 1., sig_ref_calib: float = 1.): """ Find coherence between a signal and a reference signal, plot results :param sig_in: signal :param sig_in_ref: reference signal :param sig_sample_rate_hz: sample rate of signal in Hz :param sig_ref_sample_rate_hz: sample rate of reference signal in Hz :param window_seconds: seconds duration of window. Default is 2.0 :param window_overlap_fractional: number of points to overlap between segments in window. Default is 0.5 :param frequency_ref_hz: reference frequency in Hz. Default is 40.0 :param frequency_min_hz: minimum frequency to plot in Hz (x min limit). Default is 1.0 :param frequency_max_hz: maximum frequency to plot in Hz (x max limit). Default is 320. :param sig_calib: calibration of signal. Default is 1.0 :param sig_ref_calib: calibration of reference signal. Default is 1.0 :return: plots """ # Stated with WACT IMS ref code, increased consistency. # Core computation is standard scipy.signal. # Compute PSDs and response - /4 calib divider removed sig_ref = np.copy(sig_in_ref) * sig_ref_calib sig = np.copy(sig_in) * sig_calib window_points = int(window_seconds*sig_sample_rate_hz) window_overlap_points = int(window_overlap_fractional*window_points) # Compute PSDs for each and coherence between the two f, pxx_ref = signal.welch(x=sig_ref, fs=sig_ref_sample_rate_hz, nperseg=window_points, noverlap=window_overlap_points) f, pxx_sig = signal.welch(x=sig, fs=sig_sample_rate_hz, nperseg=window_points, noverlap=window_overlap_points) # dB, absolute - add EPSILON psd_ref_bits = 0.5 * np.log2(abs(pxx_ref)) psd_sig_bits = 0.5 * np.log2(abs(pxx_sig)) # Compute cross-power spectral density with ref sample rate # Original code had no overlap - fixed f, Pxy = signal.csd(x=sig, y=sig_ref, fs=sig_ref_sample_rate_hz, nperseg=window_points, noverlap=window_overlap_points) cross_spectrum_bits = 0.5 * np.log2(abs(Pxy)) # Coherence, same as from PSD f, Cxy = signal.coherence(x=sig, y=sig_ref, fs=sig_ref_sample_rate_hz, nperseg=window_points, noverlap=window_overlap_points) # Compute response assuming incoherent comp in ref. # Ref sensor response already removed H_x = pxx_sig / Pxy # compute magnitude and phase in deg mag = np.abs(H_x) ph = np.unwrap(180 / np.pi * np.angle(H_x)) # get new mag and phase values at frequency closest to ref frequency frequency_ref_index = np.argmin(np.abs(f - frequency_ref_hz)) frequency_coherence_max_index = np.argmax(Cxy) calmag = mag[frequency_ref_index] calph = ph[frequency_ref_index] calcoh = Cxy[frequency_ref_index] maxcoh_f = f[frequency_coherence_max_index] maxcoh = Cxy[frequency_coherence_max_index] calflab = '%s, %.2f Hz' % ('Ref frequency', frequency_ref_hz) calmaglab = 'Mag=%.2f' % calmag calphlab = 'Phase=%.2f' % calph calcohlab = 'Coherence=%.2f' % calcoh print(calflab) print(calmaglab) print(calphlab) print(calcohlab) print('Max coherence frequency, level:') print(maxcoh_f, maxcoh) rpd_plt.plot_psd_coh(psd_sig=psd_sig_bits, psd_ref=psd_ref_bits, coherence_sig_ref=Cxy, f_hz=f, f_min_hz=frequency_min_hz, f_max_hz=frequency_max_hz, f_scale='linear') rpd_plt.plot_psd_coh(psd_sig=cross_spectrum_bits, psd_ref=psd_ref_bits, coherence_sig_ref=Cxy, f_hz=f, f_min_hz=frequency_min_hz, f_max_hz=frequency_max_hz, f_scale='linear', sig_label='Cross spectrum') rpd_plt.plot_response_scatter(h_magnitude=mag, h_phase_deg=ph, color_guide=Cxy, f_hz=f, f_min_hz=frequency_min_hz, f_max_hz=frequency_max_hz, f_scale='linear') def coherence_re_ref_pandas(df: pd.DataFrame, ref_id: str, sig_id_label: str, sig_wf_label: str, sig_sample_rate_label: str, fs_fractional_tolerance: float = 0.02, window_seconds: float = 2., window_overlap_fractional: float = 0.5, frequency_ref_hz: float = 40., frequency_min_hz: float = 1, frequency_max_hz: float = 320., sig_calib: float = 1., sig_ref_calib: float = 1., export_option: str = 'max_coherence', plot_response: bool = False, new_column_label_cohere_frequency: str = 'coherence_frequency', new_column_label_cohere_value: str = 'coherence_value', new_column_label_cohere_response_magnitude_bits: str = 'coherence_response_magnitude_bits', new_column_label_cohere_response_phase_degrees: str = 'coherence_response_phase_degrees' ) -> pd.DataFrame: """ Find coherence between signals stored in dataframe, plot results :param df: input pandas DataFrame :param ref_id: name of reference signal in sig_id_label column in df :param sig_id_label: string for column name with station ids in df :param sig_wf_label: string for column name with waveform data in df :param sig_sample_rate_label: string for column name with sample rate in Hz information in df :param fs_fractional_tolerance: difference in sample rate (in Hz) tolerated. Default is 0.02 :param window_seconds: seconds duration of window. Default is 2.0 :param window_overlap_fractional: number of points to overlap between segments in window. Default is 0.5 :param frequency_ref_hz: reference frequency in Hz. Default is 40.0 :param frequency_min_hz: minimum frequency to plot in Hz (x min limit). Default is 1.0 :param frequency_max_hz: maximum frequency to plot in Hz (x max limit). Default is 320. :param sig_calib: calibration of signal. Default is 1.0 :param sig_ref_calib: sample rate of reference signal in Hz :param export_option: 'max_coherence' or 'ref_frequency'. Default is 'max_coherenece' :param plot_response: plot results. Default is False :param new_column_label_cohere_frequency: string for new column containing coherence frequency :param new_column_label_cohere_value: string for new column containing coherence values :param new_column_label_cohere_response_magnitude_bits: string for new column containing coherence response in bits :param new_column_label_cohere_response_phase_degrees: string for new column containing coherence phase in degrees :return: input pandas dataframe with new columns """ number_sig = len(df.index) print("Coherence, number of signals excluding reference:", number_sig-1) print("Reference station: ", ref_id) # exit() # Is there a better way? m_list = df.index[df[sig_id_label] == ref_id] m = m_list[0] # print("m", m) if len(m_list) > 1: raise Warning("More than one station meets the id spec. Picked first instance") # Initialize coherence_frequency = [] coherence_value = [] coherence_response_magnitude_bits = [] coherence_response_phase_degrees = [] if m is not None: print("Coherence Reference station ", df[sig_id_label][m]) sig_m = np.copy(df[sig_wf_label][m]) * sig_ref_calib for n in df.index: sample_rate_condition = np.abs(df[sig_sample_rate_label][m] - df[sig_sample_rate_label][n]) \ > fs_fractional_tolerance*df[sig_sample_rate_label][m] if sample_rate_condition: print("Sample rates out of tolerance") continue else: # Generalized sensor cross correlations, including unequal lengths sig_n = np.copy(df[sig_wf_label][n]) * sig_calib # Compute PSDs for each and coherence between the two window_points = int(window_seconds * df[sig_sample_rate_label][m]) window_overlap_points = int(window_overlap_fractional*window_points) frequency_auto, auto_spectrum_sig = signal.welch(x=sig_n, fs=df[sig_sample_rate_label][n], nperseg=window_points, noverlap=window_overlap_points) _, auto_spectrum_ref = signal.welch(x=sig_m, fs=df[sig_sample_rate_label][m], nperseg=window_points, noverlap=window_overlap_points) # Compute cross-power spectral density with ref sample rate frequency_cross, cross_spectrum = signal.csd(x=sig_n, y=sig_m, fs=df[sig_sample_rate_label][m], nperseg=window_points, noverlap=window_overlap_points) psd_ref_bits = 0.5 * utils.log2epsilon(abs(auto_spectrum_ref)) psd_sig_bits = 0.5 * utils.log2epsilon(abs(auto_spectrum_sig)) cross_spectrum_bits = 0.5 * utils.log2epsilon(abs(cross_spectrum)) # Coherence, same as coherence from PSD frequency_coherence, coherence_welch = signal.coherence(x=sig_n, y=sig_m, fs=df[sig_sample_rate_label][m], nperseg=window_points, noverlap=window_overlap_points) # Compute response h_complex_response_sig = auto_spectrum_sig / cross_spectrum # Compute magnitude and phase in degrees magnitude_norm = np.abs(h_complex_response_sig) phase_degrees = np.unwrap(180 / np.pi * np.angle(h_complex_response_sig)) # Assumes all the frequencies are the same - must verify frequency_ref_index = np.argmin(np.abs(frequency_coherence - frequency_ref_hz)) frequency_coherence_max_index = np.argmax(coherence_welch) # New magnitude_norm and phase values at coherence frequency closest to ref frequency ref_frequency_hz = frequency_coherence[frequency_coherence_max_index] ref_frequency_coherence = coherence_welch[frequency_ref_index] ref_frequency_response_magnitude_bits = 0.5*utils.log2epsilon(magnitude_norm[frequency_ref_index]) ref_frequency_response_phase_degrees = phase_degrees[frequency_ref_index] # Return max coherence values max_coherence_frequency_hz = frequency_coherence[frequency_coherence_max_index] max_coherence = np.max(coherence_welch) max_coherence_response_magnitude_bits = 0.5*utils.log2epsilon(magnitude_norm[frequency_coherence_max_index]) max_coherence_response_phase_degrees = phase_degrees[frequency_coherence_max_index] if n == m: max_coherence_frequency_hz = np.nan if 'max_coherence' == export_option: coherence_frequency.append(max_coherence_frequency_hz) coherence_value.append(max_coherence) coherence_response_magnitude_bits.append(max_coherence_response_magnitude_bits) coherence_response_phase_degrees.append(max_coherence_response_phase_degrees) if 'ref_frequency' == export_option: coherence_frequency.append(ref_frequency_hz) coherence_value.append(ref_frequency_coherence) coherence_response_magnitude_bits.append(ref_frequency_response_magnitude_bits) coherence_response_phase_degrees.append(ref_frequency_response_phase_degrees) if plot_response: rpd_plt.plot_psd_coh(psd_sig=psd_sig_bits, psd_ref=psd_ref_bits, coherence_sig_ref=coherence_welch, f_hz=frequency_coherence, f_min_hz=frequency_min_hz, f_max_hz=frequency_max_hz, f_scale='linear') rpd_plt.plot_psd_coh(psd_sig=cross_spectrum_bits, psd_ref=psd_ref_bits, coherence_sig_ref=coherence_welch, f_hz=frequency_coherence, f_min_hz=frequency_min_hz, f_max_hz=frequency_max_hz, f_scale='linear', sig_label='Cross spectrum') rpd_plt.plot_response_scatter(h_magnitude=magnitude_norm, h_phase_deg=phase_degrees, color_guide=coherence_welch, f_hz=frequency_coherence, f_min_hz=frequency_min_hz, f_max_hz=frequency_max_hz, f_scale='linear') df[new_column_label_cohere_frequency] = coherence_frequency df[new_column_label_cohere_value] = coherence_value df[new_column_label_cohere_response_magnitude_bits] = coherence_response_magnitude_bits df[new_column_label_cohere_response_phase_degrees] = coherence_response_phase_degrees return df
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_cohere.py
0.863837
0.616186
redpd_cohere.py
pypi
import numpy as np from typing import Tuple, Iterator # RC filter response: mag first contribution to stack overflow as slipstream # https://stackoverflow.com/questions/62448904/how-to-implement-continuous-time-high-low-pass-filter-in-python def rc_high_pass(x_new, x_old, y_old, sample_rate_hz: int, frequency_cut_low_hz: float) -> float: """ High pass RC filter :param x_new: new x :param x_old: old x :param y_old: old y :param sample_rate_hz: sample rate in Hz :param frequency_cut_low_hz: low cutoff frequency in Hz :return: new y """ sample_interval_s = 1/sample_rate_hz rc = 1/(2 * np.pi * frequency_cut_low_hz) alpha = rc/(rc + sample_interval_s) y_new = alpha * (y_old + x_new - x_old) return y_new def rc_low_pass(x_new, y_old, sample_rate_hz: int, frequency_cut_high_hz: float) -> float: """ Low pass RC filter :param x_new: new x :param y_old: old y :param sample_rate_hz: sample rate in Hz :param frequency_cut_high_hz: high cutoff frequency in Hz :return: new y """ sample_interval_s = 1/sample_rate_hz rc = 1/(2 * np.pi * frequency_cut_high_hz) alpha = sample_interval_s/(rc + sample_interval_s) y_new = x_new * alpha + (1 - alpha) * y_old return y_new def rc_iterator_highlow(sig_wf: np.ndarray, sample_rate_hz: int, frequency_cut_low_hz: float, frequency_cut_high_hz: float) -> Iterator[Tuple[float, float]]: """ RC filter high and low pass iterator :param sig_wf: signal waveform :param sample_rate_hz: sample rate in Hz :param frequency_cut_low_hz: low cutoff frequency in Hz :param frequency_cut_high_hz: high cutoff frequency in Hz :return: yield new y high pass, new y low pass """ # Initialize. This can be improved to match wikipedia. x_prev = 0 y_prev_high = 0 y_prev_low = 0 for x in sig_wf: y_prev_high = rc_high_pass(x, x_prev, y_prev_high, sample_rate_hz, frequency_cut_low_hz) y_prev_low = rc_low_pass(x, y_prev_low, sample_rate_hz, frequency_cut_high_hz) x_prev = x yield y_prev_high, y_prev_low def rc_iterator_high_pass(sig_wf: np.ndarray, sample_rate_hz: int, frequency_cut_low_hz: float) -> Iterator[Tuple[float, float]]: """ RC filter high pass iterator :param sig_wf: signal waveform :param sample_rate_hz: sample rate in Hz :param frequency_cut_low_hz: low cutoff frequency in Hz :return: new y high pass """ # Initialize. This can be improved to match wikipedia. # x_prev = np.mean(sensor_wf) x_prev = 0 y_prev_high = 0 for x in sig_wf: y_prev_high = rc_high_pass(x, x_prev, y_prev_high, sample_rate_hz, frequency_cut_low_hz) x_prev = x yield y_prev_high def rc_iterator_lowpass(sig_wf: np.ndarray, sample_rate_hz: int, frequency_cut_high_hz: float) -> Iterator[float]: """ RC filter low pass iterator :param sig_wf: signal waveform :param sample_rate_hz: sample rate in Hz :param frequency_cut_high_hz: low cutoff frequency in Hz :return: new y low pass """ # Initialize. This can be improved to match wikipedia. y_prev_low = 0 for x in sig_wf: y_prev_low = rc_low_pass(x, y_prev_low, sample_rate_hz, frequency_cut_high_hz) yield y_prev_low
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_iterator.py
0.943601
0.581838
redpd_iterator.py
pypi
import os import numpy as np import pandas as pd import pymap3d as pm from typing import Any from redpandas.redpd_scales import EPSILON, NANOS_TO_S, DEGREES_TO_METERS, PRESSURE_SEA_LEVEL_KPA def redvox_loc(df_pqt_path: str) -> pd.DataFrame: """ Extract the location, temperature, and DC pressure payload from the microphones :param df_pqt_path: path/to/parquet file with data stored in a pd.DataFrame :return: pd. DataFrame with columns {'station_id', 'location_epoch_s', 'location_latitude', 'location_longitude', 'location_altitude', 'location_speed', 'location_horizontal_accuracy', 'barometer_epoch_s', 'barometer_wf_raw'} """ # Check if not os.path.exists(df_pqt_path): print("Input file does not exist, check path:") print(df_pqt_path) exit() df = pd.read_parquet(df_pqt_path) print('Read parquet with pandas DataFrame') # Extract selected fields loc_fields = ['station_id', 'location_epoch_s', 'location_latitude', 'location_longitude', 'location_altitude', 'location_speed', 'location_horizontal_accuracy', 'barometer_epoch_s', 'barometer_wf_raw'] df_loc = df[loc_fields] return df_loc def bounder_data(path_bounder_csv: str, file_bounder_csv: str, file_bounder_parquet: str) -> None: """ Load data from balloon-based Bounder platform :param path_bounder_csv: path/to/bounder csv and parquet files :param file_bounder_csv: name bounder csv file :param file_bounder_parquet: name bounder parquet file :return: save as parquet """ # Event-specific start date and curated file # Bounder Skyfall starts at 13:45:00, end at 14:16:00 yyyymmdd = "2020-10-27 " rows = np.arange(5320, 7174) input_path = os.path.join(path_bounder_csv, file_bounder_csv) print('Input', input_path) output_path = os.path.join(path_bounder_csv, file_bounder_parquet) df = pd.read_csv(input_path, usecols=[5, 6, 7, 8, 9, 10, 11], skiprows=lambda x: x not in rows, names=['Pres_kPa', 'Temp_C', 'Batt_V', 'Lon_deg', 'Lat_deg', 'Alt_m', 'Time_hhmmss']) dtime = pd.to_datetime(yyyymmdd + df['Time_hhmmss'], origin='unix') # Convert datetime to unix nanoseconds, then to seconds # dtime_unix_s = dtime.astype('int64')*NANOS_TO_S # Deprecated dtime_unix_s = dtime.view('int64')*NANOS_TO_S # Python 3.9 skyfall_bounder_loc = df.filter(['Lat_deg', 'Lon_deg', 'Alt_m', 'Pres_kPa', 'Temp_C', 'Batt_V']) skyfall_bounder_loc.insert(0, 'Epoch_s', dtime_unix_s) skyfall_bounder_loc.insert(1, 'Datetime', dtime) print(skyfall_bounder_loc['Epoch_s']) # Save to parquet skyfall_bounder_loc.to_parquet(output_path) def bounder_model_height_from_pressure(pressure_kPa: np.ndarray) -> np.ndarray: """ Returns empirical height in m from input pressure :param pressure_kPa: Atmospheric pressure in kPa :return: Height in m above WGS84 Geoid """ pressure_ref_kPa = PRESSURE_SEA_LEVEL_KPA scaled_pressure = -np.log(pressure_kPa/pressure_ref_kPa) # Empirical model constructed from c = [1.52981286e+02, 7.39552295e+03, 2.44663285e+03, -3.57402081e+03, 2.02653051e+03, -6.26581722e+02, 1.11758211e+02, -1.08674469e+01, 4.46784010e-01] elevation_m = np.polynomial.polynomial.polyval(scaled_pressure, c, tensor=False) return elevation_m def compute_t_xyz_uvw(unix_s: Any, lat_deg: Any, lon_deg: Any, alt_m: Any, ref_unix_s: Any, ref_lat_deg: Any, ref_lon_deg: Any, ref_alt_m: Any, geodetic_type: str = 'enu') -> pd.DataFrame: """ Compute time and location relative to a reference value; compute speed. :param unix_s: target timestamp :param lat_deg: target geodetic latitude :param lon_deg: target geodetic longitude :param alt_m: target altitude above ellipsoid (meters) :param ref_unix_s: observer timestamp :param ref_lat_deg: observer geodetic latitude :param ref_lon_deg: observer geodetic longitude :param ref_alt_m: observer altitude above geodetic ellipsoid (meters) :param geodetic_type: 'enu' or 'ned' :return: pandas DataFrame with columns: {'T_s', 'X_m', 'Y_m', 'Z_m', 'U_mps', 'V_mps', 'W_mps', 'Speed_mps'} """ if geodetic_type == 'enu': x_m, y_m, z_m = pm.geodetic2enu(lat=lat_deg, lon=lon_deg, h=alt_m, lat0=ref_lat_deg, lon0=ref_lon_deg, h0=ref_alt_m) t_s = (unix_s - ref_unix_s).astype(float) elif geodetic_type == 'ned': y_m, x_m, z_m = pm.geodetic2ned(lat=lat_deg, lon=lon_deg, h=alt_m, lat0=ref_lat_deg, lon0=ref_lon_deg, h0=ref_alt_m) t_s = (unix_s - ref_unix_s).astype(float) else: x_m = (lon_deg - ref_lon_deg).astype(float) * DEGREES_TO_METERS y_m = (lat_deg - ref_lat_deg).astype(float) * DEGREES_TO_METERS z_m = (alt_m - ref_alt_m).astype(float) t_s = (unix_s - ref_unix_s).astype(float) # Speed in mps. Compute diff, add EPSILON to avoid divide by zero on repeat values u_mps = np.gradient(x_m)/(np.gradient(t_s)+EPSILON) v_mps = np.gradient(y_m)/(np.gradient(t_s)+EPSILON) w_mps = np.gradient(z_m)/(np.gradient(t_s)+EPSILON) speed_mps = np.sqrt(u_mps**2 + v_mps**2 + w_mps**2) t_xyzuvw_s_m = pd.DataFrame(data={'T_s': t_s, 'X_m': x_m, 'Y_m': y_m, 'Z_m': z_m, 'U_mps': u_mps, 'V_mps': v_mps, 'W_mps': w_mps, 'Speed_mps': speed_mps}) return t_xyzuvw_s_m def compute_t_r_z_speed(unix_s: Any, lat_deg: Any, lon_deg: Any, alt_m: Any, ref_unix_s: Any, ref_lat_deg: Any, ref_lon_deg: Any, ref_alt_m: Any, geodetic_type: str = 'enu') -> pd.DataFrame: """ Compute time and location relative to a reference value; compute speed. :param unix_s: target timestamp :param lat_deg: target geodetic latitude :param lon_deg: target geodetic longitude :param alt_m: target altitude above ellipsoid (meters) :param ref_unix_s: observer timestamp :param ref_lat_deg: observer geodetic latitude :param ref_lon_deg: observer geodetic longitude :param ref_alt_m: observer altitude above geodetic ellipsoid (meters) :param geodetic_type: 'enu' or 'ned' :return: pandas DataFrame with columns: {'Elapsed_s', 'Range_m', 'Z_m', 'LatLon_speed_mps'} """ if geodetic_type == 'enu': x_m, y_m, z_m = pm.geodetic2enu(lat=lat_deg, lon=lon_deg, h=alt_m, lat0=ref_lat_deg, lon0=ref_lon_deg, h0=ref_alt_m) t_s = (unix_s - ref_unix_s).astype(float) elif geodetic_type == 'ned': y_m, x_m, z_m = pm.geodetic2ned(lat=lat_deg, lon=lon_deg, h=alt_m, lat0=ref_lat_deg, lon0=ref_lon_deg, h0=ref_alt_m) t_s = (unix_s - ref_unix_s).astype(float) else: x_m = (lon_deg - ref_lon_deg).astype(float) * DEGREES_TO_METERS y_m = (lat_deg - ref_lat_deg).astype(float) * DEGREES_TO_METERS z_m = (alt_m - ref_alt_m).astype(float) t_s = (unix_s - ref_unix_s).astype(float) # Speed in mps. Compute diff, add EPSILON to avoid divide by zero on repeat values u_mps = np.gradient(x_m)/(np.gradient(t_s)+EPSILON) v_mps = np.gradient(y_m)/(np.gradient(t_s)+EPSILON) w_mps = np.gradient(z_m)/(np.gradient(t_s)+EPSILON) range_m = np.sqrt(x_m**2 + y_m**2) speed_mps = np.ma.sqrt(u_mps**2 + v_mps**2 + w_mps**2) time_range_z_speed_s_m = pd.DataFrame(data={'Elapsed_s': t_s, 'Range_m': range_m, 'Z_m': z_m, 'LatLon_speed_mps': speed_mps}) return time_range_z_speed_s_m
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_geospatial.py
0.806472
0.447521
redpd_geospatial.py
pypi
import os import enum from typing import List, Optional, Dict, Union import pprint class DataLoadMethod(enum.Enum): UNKNOWN = 0 DATAWINDOW = 1 PICKLE = 2 PARQUET = 3 @staticmethod def method_from_str(method_str: str) -> "DataLoadMethod": if method_str.lower() == "datawindow": return DataLoadMethod.DATAWINDOW elif method_str.lower() == "pickle": return DataLoadMethod.PICKLE elif method_str.lower() == "parquet": return DataLoadMethod.PARQUET else: return DataLoadMethod.UNKNOWN class RedpdConfig: def __init__(self, input_directory: str, event_name: str = "Redvox", output_directory: Optional[str] = None, output_filename_pkl_pqt: Optional[str] = None, station_ids: Optional[List[str]] = None, sensor_labels: Optional[List[str]] = None, event_start_epoch_s: Optional[float] = None, duration_s: Optional[int] = None, start_buffer_minutes: Optional[int] = 3, end_buffer_minutes: Optional[int] = 3, tdr_load_method: Optional[str] = "datawindow"): """ Configuration parameters for RedPandas :param input_directory: string, directory that contains the files to read data from. REQUIRED :param event_name: optional string, name of event. Default is "Redvox" :param output_directory: optional string, directory to created save pickle/JSON/parquet :param output_filename_pkl_pqt: optional string, name of created parquet and pickle files :param station_ids: optional list of strings, list of station ids to filter on :param sensor_labels: optional list of strings, list of sensors. Default is "audio" :param event_start_epoch_s: optional float, start time in epoch s. Default is None :param duration_s: optional int, durtion of event in minutes. Default is None :param start_buffer_minutes: float representing the amount of minutes to include before the start datetime when filtering data. Default is 3 :param end_buffer_minutes: float representing the amount of minutes to include before the end datetime when filtering data. Default is 3 :param tdr_load_method: optional string, chose loading data method: "datawindow", "pickle", or "parquet". Default is "datawindow" """ self.input_dir = input_directory self.event_name = event_name # Check if input and output dir exists if not os.path.exists(self.input_dir): print(f"Input directory does not exist, check path: {self.input_dir}") exit() if output_directory is not None: self.output_dir = output_directory if not os.path.exists(self.output_dir): print(f"Creating output directory: {self.output_dir}") os.mkdir(self.output_dir) else: self.output_dir = os.path.join(self.input_dir, "rpd_files") if output_filename_pkl_pqt is None: self.output_filename_pkl_pqt = event_name else: self.output_filename_pkl_pqt = output_filename_pkl_pqt self.dw_file: str = self.output_filename_pkl_pqt + ".pkl" self.pd_pqt_file: str = self.output_filename_pkl_pqt + "_df.parquet" self.station_ids = station_ids if sensor_labels is not None: self.sensor_labels = sensor_labels else: self.sensor_labels = ["audio"] self.event_start_epoch_s = event_start_epoch_s self.duration_s = duration_s if duration_s is not None: self.event_end_epoch_s: float = self.event_start_epoch_s + self.duration_s else: self.event_end_epoch_s = None self.start_buffer_minutes = start_buffer_minutes self.end_buffer_minutes = end_buffer_minutes self.tdr_load_method = DataLoadMethod.method_from_str(tdr_load_method) def pretty(self) -> str: # noinspection Mypy return pprint.pformat(vars(self)) class TFRConfig: def __init__(self, tfr_type: str, tfr_order_number_N: int, show_fig_titles: bool, mesh_color_scale: Optional[Union[Dict[str, str] or str]] = 'range', mesh_color_range: Optional[Union[Dict[str, float] or float]] = 18., sensor_highpass: Optional[Union[Dict[str, bool] or bool]] = True, tfr_load_method: Optional[str] = "datawindow"): """ Configuration parameters for skyfall_tfr_rpd :param tfr_type: string, 'stft' or 'cwt' :param tfr_order_number_N: int, order number of the transform :param show_fig_titles: bool, display or hide figure titles :param mesh_color_scale: string or dictionary of strings, color scale mode for spectrograms :param mesh_color_range: float or dictionary of floats, color range for spectrograms :param sensor_highpass: boolean or dictionary of booleans, use highpass of data if available :param tfr_load_method: optional string, chose loading data method: "datawindow", "pickle", or "parquet" """ self.tfr_type = tfr_type self.tfr_order_number_N = tfr_order_number_N self.show_fig_titles = show_fig_titles self.tfr_load_method = DataLoadMethod.method_from_str(tfr_load_method) self.sensor_labels = ['Audio', 'Bar', 'Acc', 'Gyr', 'Mag'] n = len(self.sensor_labels) if type(mesh_color_scale) == str: self.mc_scale = dict(zip(self.sensor_labels, n*[mesh_color_scale])) else: self.mc_scale = dict(zip(self.sensor_labels, n*['range'])) for label in mesh_color_scale.keys(): self.mc_scale[label] = mesh_color_scale[label] if type(mesh_color_range) == float: self.mc_range = dict(zip(self.sensor_labels, n*[mesh_color_range])) else: self.mc_range = dict(zip(self.sensor_labels, n*[18.])) for label in mesh_color_range.keys(): self.mc_range[label] = mesh_color_range[label] if type(sensor_highpass) == bool: self.sensor_hp = dict(zip(self.sensor_labels[1:], (n-1)*[sensor_highpass])) else: self.sensor_hp = dict(zip(self.sensor_labels[1:], (n-1)*[True])) for label in sensor_highpass.keys(): self.sensor_hp[label] = sensor_highpass[label] self.sensor_3d = dict(zip(['Audio', 'Bar', 'Acc', 'Gyr', 'Mag'], [False, False, True, True, True]))
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_config.py
0.788543
0.337422
redpd_config.py
pypi
from typing import Tuple import pandas as pd from matplotlib.figure import Figure import redpandas.redpd_tfr as rpd_tfr from redpandas import redpd_scales as rpd_scales from redpandas.redpd_plot.mesh import plot_mesh_pandas from redpandas.redpd_plot.wiggles import plot_wiggles_pandas, plot_wiggles_3c_pandas def plot_wiggles_pandas_audio(df: pd.DataFrame, start_time_window: float = 0.0, end_time_window: float = 0.0) -> Figure: """ Plot wiggles, set for Audio :param df: input pandas dataframe :param start_time_window: float, epoch s :param end_time_window: float, epoch s :return: matplotlib figure instance """ fig_audio = plot_wiggles_pandas(df=df, sig_wf_label="audio_wf", sig_timestamps_label="audio_epoch_s", sig_id_label="station_id", show_figure=False, fig_title="Normalized Signals for Audio", start_time_window=start_time_window, end_time_window=end_time_window) return fig_audio def plot_wiggles_pandas_bar(df: pd.DataFrame, start_time_window: float = 0.0, end_time_window: float = 0.0) -> Figure: """ Plot wiggles, set for Barometer :param df: input pandas dataframe :param start_time_window: float, epoch s :param end_time_window: float, epoch s :return: matplotlib figure instance """ fig_bar = plot_wiggles_pandas(df=df, sig_wf_label="barometer_wf_highpass", sig_timestamps_label="barometer_epoch_s", sig_id_label="station_id", show_figure=False, fig_title="Normalized Signals for Barometer", start_time_window=start_time_window, end_time_window=end_time_window) return fig_bar def plot_wiggles_pandas_acc(df: pd.DataFrame, start_time_window: float = 0.0, end_time_window: float = 0.0) -> Tuple[Figure, Figure, Figure]: """ Plot wiggles, set for Accelerometer :param df: input pandas dataframe :param start_time_window: float, epoch s :param end_time_window: float, epoch s :return: matplotlib figure instance x3 """ fig_x, fig_y, fig_z = plot_wiggles_3c_pandas(df=df, sig_wf_label='accelerometer_wf_highpass', sig_timestamps_label='accelerometer_epoch_s', fig_title="Normalized Signals for Accelerometer", show_figure=False, start_time_window=start_time_window, end_time_window=end_time_window) return fig_x, fig_y, fig_z def plot_wiggles_pandas_gyr(df: pd.DataFrame, start_time_window: float = 0.0, end_time_window: float = 0.0) -> Tuple[Figure, Figure, Figure]: """ Plot wiggles, set for Gyroscope :param df: input pandas dataframe :param start_time_window: float, epoch s :param end_time_window: float, epoch s :return: matplotlib figure instance x3 """ fig_x, fig_y, fig_z = plot_wiggles_3c_pandas(df=df, sig_wf_label='gyroscope_wf_highpass', sig_timestamps_label='gyroscope_epoch_s', fig_title="Normalized Signals for Gyroscope", show_figure=False, start_time_window=start_time_window, end_time_window=end_time_window) return fig_x, fig_y, fig_z def plot_wiggles_pandas_mag(df: pd.DataFrame, start_time_window: float = 0.0, end_time_window: float = 0.0) -> Tuple[Figure, Figure, Figure]: """ Plot wiggles, set for Magnetometer :param df: input pandas dataframe :param start_time_window: float, epoch s :param end_time_window: float, epoch s :return: matplotlib figure instance x3 """ fig_x, fig_y, fig_z = plot_wiggles_3c_pandas(df=df, sig_wf_label='magnetometer_wf_highpass', sig_timestamps_label='magnetometer_epoch_s', fig_title="Normalized Signals for Magnetometer", show_figure=False, start_time_window=start_time_window, end_time_window=end_time_window) return fig_x, fig_y, fig_z def tfr_bits_panda_audio(df: pd.DataFrame, start_time_window: float = 0.0, end_time_window: float = 0.0, tfr_type: str = 'stft', order_number_input: int = 12): """ Calculate TFR, set for Audio :param df: input pandas dataframe :param start_time_window: float, epoch s :param end_time_window: float, epoch s :param tfr_type: 'stft' or 'cwt'. Default is 'stft' :param order_number_input: default is 12 :return: """ # Audio TFR df_tfr = rpd_tfr.tfr_bits_panda_window(df=df, sig_wf_label="audio_wf", sig_sample_rate_label="audio_sample_rate_nominal_hz", sig_timestamps_label="audio_epoch_s", order_number_input=order_number_input, tfr_type=tfr_type, new_column_tfr_bits="audio_tfr_bits", new_column_tfr_frequency_hz="audio_tfr_frequency_hz", new_column_tfr_time_s="audio_tfr_time_s", start_time_window=start_time_window, end_time_window=end_time_window) return df_tfr def plot_mesh_pandas_audio(df: pd.DataFrame, frequency_hz_ymin: float = 1.0, frequency_hz_ymax: float = 0.0, frequency_scaling: str = "log", start_time_window: float = 0.0,): """ Plot mesh, set for Audio :param df: input pandas dataframe :param frequency_hz_ymin: float, default is 1.0 :param frequency_hz_ymax: float, sets to Nyquist if 0.0 :param frequency_scaling: 'log' or 'lin', default is 'log' :param start_time_window: float, epoch s :return: matplotlib figure instance """ if frequency_hz_ymax == 0.0: frequency_hz_ymax = rpd_scales.Slice.F0 if frequency_hz_ymin == 0.0: frequency_hz_ymin = rpd_scales.Slice.FU if frequency_hz_ymin >= frequency_hz_ymax and frequency_hz_ymin != 0.0: frequency_hz_ymin = 1.0 frequency_hz_ymax = rpd_scales.Slice.F0 if start_time_window == 0.0: start_mesh = df['audio_epoch_s'][0][0] else: start_mesh = start_time_window spectr_audio = plot_mesh_pandas(df=df, mesh_time_label="audio_tfr_time_s", mesh_frequency_label="audio_tfr_frequency_hz", mesh_tfr_label="audio_tfr_bits", sig_id_label='station_id', t0_sig_epoch_s=start_mesh, fig_title="STFT Audio", frequency_scaling=frequency_scaling, frequency_hz_ymin=frequency_hz_ymin, frequency_hz_ymax=frequency_hz_ymax, mesh_color_scaling="range", mesh_color_range=16.0, show_figure=False) return spectr_audio
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_report.py
0.936836
0.479626
redpd_report.py
pypi
from enum import Enum from typing import Tuple, Union import numpy as np from scipy import signal import obspy.signal.filter import pandas as pd # RedVox and RedPandas from redvox.common import date_time_utils as dt import redpandas.redpd_iterator as rdp_iter import redpandas.redpd_scales as rpd_scales # Define classes class NormType(Enum): """ Enumeration of normalization types. """ MAX: str = "max" L1: str = "l1" L2: str = "l2" OTHER: str = "other" # Auxiliary modules for building stations def find_nearest_idx(array: np.ndarray, value: float): """ Return nearest idx for value in array :param array: np.array :param value: float/int :return: """ array = np.asarray(array) idx_array = (np.abs(array - value)).argmin() return idx_array def datetime_now_epoch_s() -> float: """ Returns the invocation Unix time in seconds :return: The current epoch timestamp as seconds since the epoch UTC """ return dt.datetime_to_epoch_seconds_utc(dt.now()) def datetime_now_epoch_micros() -> float: """ Returns the invocation Unix time in microseconds :return: The current epoch timestamp as microseconds since the epoch UTC """ return dt.datetime_to_epoch_microseconds_utc(dt.now()) def normalize(sig_wf: np.ndarray, scaling: float = 1., norm_type: NormType = NormType.MAX) -> np.ndarray: """ Scale a 1D time series :param sig_wf: signal waveform :param scaling: scaling parameter, division :param norm_type: {'max', l1, l2}, optional :return: The scaled series """ if norm_type == NormType.MAX: return sig_wf / np.nanmax(np.abs(sig_wf)) elif norm_type == NormType.L1: return sig_wf / np.nansum(sig_wf) elif norm_type == NormType.L2: return sig_wf / np.sqrt(np.nansum(sig_wf * sig_wf)) else: # Must be NormType.Other return sig_wf / scaling def demean_nan(sig_wf: np.ndarray) -> np.ndarray: """ Detrend and normalize a 1D time series :param sig_wf: signal waveform :return: Detrended and normalized time series """ return np.nan_to_num(sig_wf - np.nanmean(sig_wf)) def detrend_nan(sig_wf: np.ndarray) -> np.ndarray: """ Detrend and normalize a 1D time series :param sig_wf: signal waveform :return: Detrended and normalized time series """ return signal.detrend(demean_nan(sig_wf)) def demean_nan_norm(sig_wf: np.ndarray, scaling: float = 1., norm_type: NormType = NormType.MAX) -> np.ndarray: """ Detrend and normalize a 1D time series :param sig_wf: signal waveform :param scaling: scaling parameter, division :param norm_type: {'max', l1, l2}, overrides scikit default of 'l2' by 'max' :return: The detrended and denormalized series. """ return normalize(demean_nan(sig_wf), scaling=scaling, norm_type=norm_type) def demean_nan_matrix(sig_wf: np.ndarray) -> np.ndarray: """ Detrend and normalize a matrix of time series :param sig_wf: signal waveform :return: The detrended and normalized signature """ return np.nan_to_num(np.subtract(sig_wf.transpose(), np.nanmean(sig_wf, axis=1))).transpose() def taper_tukey(sig_wf_or_time: np.ndarray, fraction_cosine: float) -> np.ndarray: """ Constructs a symmetric Tukey window with the same dimensions as a time or signal numpy array. fraction_cosine = 0 is a rectangular window, 1 is a Hann window :param sig_wf_or_time: input signal or time :param fraction_cosine: fraction of the window inside the cosine tapered window, shared between the head and tail :return: tukey taper window amplitude """ return signal.windows.tukey(M=np.size(sig_wf_or_time), alpha=fraction_cosine, sym=True) def pad_reflection_symmetric(sig_wf: np.ndarray) -> Tuple[np.ndarray, int]: """ Apply reflection transformation :param sig_wf: signal waveform :return: input signal with reflected edges, numbers of points folded per edge """ number_points_to_flip_per_edge = int(len(sig_wf)//2) wf_folded = np.pad(np.copy(sig_wf), (number_points_to_flip_per_edge, number_points_to_flip_per_edge), 'reflect') wf_folded *= taper_tukey(wf_folded, fraction_cosine=0.5) return wf_folded, number_points_to_flip_per_edge def filter_reflection_highpass(sig_wf: np.ndarray, sample_rate_hz: Union[float, int], filter_cutoff_hz: float) -> np.ndarray: """ Apply fold filter to input signal (edges reflected) and highpass :param sig_wf: signal waveform :param filter_cutoff_hz: filter corner frequency in Hz :param sample_rate_hz: sampling rate in Hz :return: signal folded and filtered """ wf_folded, number_points_to_flip_per_edge = pad_reflection_symmetric(sig_wf) sig_folded_filtered = obspy.signal.filter.highpass(np.copy(wf_folded), filter_cutoff_hz, sample_rate_hz, corners=4, zerophase=True) return sig_folded_filtered[number_points_to_flip_per_edge:-number_points_to_flip_per_edge] def height_asl_from_pressure_below10km(bar_waveform: np.ndarray) -> np.ndarray: """ Simple model for troposphere :param bar_waveform: barometric pressure in kPa :return: height ASL in m """ return -np.log(bar_waveform/rpd_scales.Slice.PREF_KPA)/rpd_scales.MG_RT def model_height_from_pressure_skyfall(pressure_kpa: np.ndarray) -> np.ndarray: """ Returns empirical height in m from input pressure :param pressure_kpa: barometric pressure in kPa :return: height in m """ scaled_pressure = -np.log(pressure_kpa / rpd_scales.PRESSURE_REF_kPa) # Empirical model constructed from # c, stats = np.polynomial.polynomial.polyfit(poly_x, bounder_loc['Alt_m'], 8, full=True) c = [1.52981286e+02, 7.39552295e+03, 2.44663285e+03, -3.57402081e+03, 2.02653051e+03, -6.26581722e+02, 1.11758211e+02, -1.08674469e+01, 4.46784010e-01] return np.polynomial.polynomial.polyval(scaled_pressure, c, tensor=False) def rc_high_pass_signal(sig_wf: np.ndarray, sample_rate_hz: int, highpass_cutoff: float) -> np.ndarray: """ Apply RC high pass filter to signal :param sig_wf: signal waveform :param sample_rate_hz: sampling rate in Hz :param highpass_cutoff: filter corner frequency in Hz :return: highpass signal """ return np.array([[high] for high in rdp_iter.rc_iterator_high_pass(sig_wf, sample_rate_hz, highpass_cutoff)]) # "Traditional" solution, up to Nyquist def bandpass_butter_uneven(sig_wf: np.ndarray, sample_rate_hz: int, frequency_cut_low_hz: float, filter_order: int) -> np.ndarray: """ Apply butterworth filter to a 1D signal :param sig_wf: signal waveform :param sample_rate_hz: sampling rate in Hz :param frequency_cut_low_hz: filter corner frequency in Hz :param filter_order: filter corners / order :return: bandpassed signal """ # Frequencies are scaled by Nyquist, with 1 = Nyquist # filter_order = 4, nyquist = 0.5 * sample_rate_hz edge_low = frequency_cut_low_hz / nyquist edge_high = 0.5 [b, a] = signal.butter(N=filter_order, Wn=[edge_low, edge_high], btype='bandpass') return signal.filtfilt(b, a, np.copy(sig_wf)) def xcorr_uneven(sig_x: np.ndarray, sig_ref: np.ndarray): """ Variation of cross-correlation function cross_stas.xcorr_all for unevenly sampled data with identical sampling and duration. :param sig_x: processed signal :param sig_ref: reference signal :return: cross-correlation metrics """ nx = len(sig_x) nref = len(sig_ref) if nx > nref: print('Vectors must have equal sampling and lengths') elif nx < nref: print('Vectors must have equal sampling and lengths') elif nx == nref: """Cross correlation is centered in the middle of the record and has length NX""" # Fastest, o(NX) and can use FFT solution if nx % 2 == 0: xcorr_indexes = np.arange(-int(nx/2), int(nx/2)) else: xcorr_indexes = np.arange(-int(nx/2), int(nx/2)+1) xcorr = signal.correlate(sig_ref, sig_x, mode='same') # Normalize xcorr /= nx * sig_x.std() * sig_ref.std() xcorr_offset_index = np.argmax(np.abs(xcorr)) xcorr_offset_samples = xcorr_indexes[xcorr_offset_index] xcorr_peak = xcorr[xcorr_offset_index] return xcorr, xcorr_indexes, xcorr_peak, xcorr_offset_index, xcorr_offset_samples else: print('One of the waveforms is broken') return np.array([]), np.array([]), np.nan, np.nan, np.array([]) def highpass_from_diff(sig_wf: np.ndarray, sig_epoch_s: np.ndarray, sample_rate_hz: int or float, fold_signal: bool = True, highpass_type: str = 'obspy', frequency_filter_low: float = 1./rpd_scales.Slice.T100S, filter_order: int = 4) -> Tuple[np.ndarray, float]: """ Preprocess barometer data: - remove nans and DC offset by getting the differential pressure in kPa - apply highpass filter at 100 second periods - reconstruct Pressure in kPa from differential pressure: P(i) = dP(i) + P(i-1) :param sig_wf: signal waveform :param sig_epoch_s: signal time in epoch s :param sample_rate_hz: sampling rate in Hz :param fold_signal: apply reflection transformation and fold edges :param highpass_type: 'obspy', 'butter', 'rc' :param frequency_filter_low: apply highpass filter. Default is 100 second periods :param filter_order: filter corners / order. Default is 4. :zero phase filters are acausal :return: filtered signal waveform, frequency_filter_low value used """ # Apply diff to remove DC offset; difference of nans is a nan # Replace nans with zeros, otherwise most things don't run # Using gradient instead of diff seems to fix off by zero issue! sensor_waveform_grad_dm = demean_nan(np.gradient(sig_wf)) # Override default high pass at 100 seconds if signal is too short # May be able to zero pad ... with ringing. Or fold as needed. if sig_epoch_s[-1] - sig_epoch_s[0] < 2/frequency_filter_low: frequency_filter_low = 2/(sig_epoch_s[-1] - sig_epoch_s[0]) print('Default 100s highpass override. New highpass period = ', 1/frequency_filter_low) # Fold edges of wf if fold_signal is True: sensor_waveform_fold, number_points_folded = pad_reflection_symmetric(sensor_waveform_grad_dm) else: sensor_waveform_fold = sensor_waveform_grad_dm if highpass_type == "obspy": # Zero phase, acausal sensor_waveform_dp_filtered = \ obspy.signal.filter.highpass(corners=filter_order, data=np.copy(sensor_waveform_fold), freq=frequency_filter_low, df=sample_rate_hz, zerophase=True) elif highpass_type == "butter": [b, a] = signal.butter(N=filter_order, Wn=frequency_filter_low, fs=sample_rate_hz, btype='highpass', output='ba') # Zero phase, acausal sensor_waveform_dp_filtered = signal.filtfilt(b, a, sensor_waveform_fold) elif highpass_type == "rc": # RC is slow and not zero-phase, does not need a taper to work (but it doesn't hurt) sensor_waveform_dp_filtered = \ rc_high_pass_signal(sig_wf=np.copy(sensor_waveform_fold), sample_rate_hz=sample_rate_hz, highpass_cutoff=frequency_filter_low) else: raise Exception("No filter selected. Type 'obspy', 'butter', or 'rc'.") if fold_signal is True: # Cut fold edges of wf sensor_waveform_dp_filtered = sensor_waveform_dp_filtered[number_points_folded:-number_points_folded] # Reconstruct Function dP: P(0), P(i) = dP(i) + P(i-1) sensor_waveform_reconstruct = np.zeros((len(sensor_waveform_dp_filtered))) # Initialize sensor_waveform_reconstruct[0] = sensor_waveform_dp_filtered[0] for i in range(1, len(sensor_waveform_dp_filtered) - 1): sensor_waveform_reconstruct[i] = sensor_waveform_dp_filtered[i] + sensor_waveform_reconstruct[i-1] return sensor_waveform_reconstruct, frequency_filter_low # Auxiliary functions to open parquets def df_unflatten(df: pd.DataFrame) -> None: """ Restores original shape of elements in all column. Used for loading dataframe from parquet. :param df: pandas DataFrame :return: original df """ df_ndim = df.filter(like='_ndim', axis=1) og_names = [col.replace('_ndim', '') for col in df_ndim.columns] for col_name in og_names: col_ndim_label = col_name + "_ndim" col_values = df[col_name].to_numpy() for index_array in df.index: if len(df[col_ndim_label][index_array]) > 1: # check that there is data if len(df[col_ndim_label][index_array]) == 2: col_values[index_array].shape = (int(df[col_ndim_label][index_array][0]), int(df[col_ndim_label][index_array][1])) if len(df[col_ndim_label][index_array]) == 3: # tfr col_values[index_array].shape = (int(df[col_ndim_label][index_array][0]), int(df[col_ndim_label][index_array][1]), int(df[col_ndim_label][index_array][2])) def df_column_unflatten(df: pd.DataFrame, col_wf_label: str, col_ndim_label: str) -> None: """ Restores original shape of elements in column. Used for loading columns in dataframe from parquet. :param df: pandas DataFrame :param col_wf_label: column label for data that needs reshaping, usually waveform arrays. :param col_ndim_label: column label with dimensions for reshaping. Elements in column need to be a numpy array. :return: original df, replaces column values with reshaped ones """ col_values = df[col_wf_label].to_numpy() for index_array in df.index: if len(df[col_ndim_label][index_array]) > 1: # check that there is data if len(df[col_ndim_label][index_array]) == 2: col_values[index_array].shape = (int(df[col_ndim_label][index_array][0]), int(df[col_ndim_label][index_array][1])) if len(df[col_ndim_label][index_array]) == 3: # tfr col_values[index_array].shape = (int(df[col_ndim_label][index_array][0]), int(df[col_ndim_label][index_array][1]), int(df[col_ndim_label][index_array][2]))
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_preprocess.py
0.949623
0.613815
redpd_preprocess.py
pypi
import numpy as np import pandas as pd from scipy import signal import matplotlib.pyplot as plt from typing import Tuple def find_nearest(array: np.ndarray, value) -> np.ndarray: """ Find nearest value in numpy array :param array: a numpy array :param value: value to search for in array :return: """ # https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array xi = np.argmin(np.abs(np.ceil(array[None].T - value)), axis=0) return xi def plot_square(xnorm_max, xoffset_s, xoffset_points, sig_descriptor: str = "Signal") -> None: """ Plot cross-correlation results :param xnorm_max: normalized cross correlation :param xoffset_s: offset cross correlation in seconds :param xoffset_points: offset points in cross correlation :param sig_descriptor: label to describe signal. Default is "Signal" :return: plot """ color_map = plt.get_cmap("Spectral_r") fig, ax = plt.subplots() im = ax.imshow(xnorm_max, cmap=color_map, origin='lower') fig.colorbar(im) ax.set_title(sig_descriptor + ' max cross-correlation in the time domain') fig, ax = plt.subplots() im = ax.imshow(xoffset_s, cmap=color_map, origin='lower') fig.colorbar(im) ax.set_title(sig_descriptor + ' cross-correlation offset in s') fig, ax = plt.subplots() im = ax.imshow(xoffset_points, cmap=color_map, origin='lower') fig.colorbar(im) ax.set_title(sig_descriptor + ' cross-correlation offset in points') def most_similar_station_index(xnorm_max) -> Tuple[int, float]: """ Sums over column, subtract self xcorr (1), divides by number of stations - 1 :param xnorm_max: normalized cross correlation :return: index of most self-similar station to the ensemble """ xnorm_max_sum = np.sum(xnorm_max, axis=1) xnorm_stats = (xnorm_max_sum - 1)/(len(xnorm_max_sum) - 1) xcorr_ref_index = int(np.argmax(xnorm_stats)) xcorr_mean_max = xnorm_stats[xcorr_ref_index] return xcorr_ref_index, xcorr_mean_max # Sort out time first: time gate input, refer to shared datum, correct times def xcorr_pandas(df: pd.DataFrame, sig_wf_label: str, sig_sample_rate_label: str, fs_fractional_tolerance: float = 0.02, abs_xcorr: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Returns square matrix, a concise snapshot of the self-similarity of the input data set. :param df: input pandas data frame :param sig_wf_label: string for the waveform column name in df :param sig_sample_rate_label: string for the sample rate in Hz column name in df :param fs_fractional_tolerance: difference in sample rate (in Hz) tolerated. Default is 0.02 :param abs_xcorr: Default is True :return: xcorr normalized, offset in seconds, and offset points """ number_sig = len(df.index) print("Number of signals:", number_sig) # Initialize xcorr_offset_points = np.zeros((number_sig, number_sig)) xcorr_offset_seconds = np.copy(xcorr_offset_points) xcorr_normalized_max = np.copy(xcorr_offset_points) for m in df.index: for n in df.index: sample_rate_condition = np.abs(df[sig_sample_rate_label][m] - df[sig_sample_rate_label][n]) \ > fs_fractional_tolerance*df[sig_sample_rate_label][m] if sample_rate_condition: print("Sample rates out of tolerance for index m,n =" + str(m) + "," + str(n)) continue else: sig_n = np.copy(df[sig_wf_label][n]) sig_m = np.copy(df[sig_wf_label][m]) # Generalized sensor cross correlations, including unequal lengths n_points = len(sig_n) m_points = len(sig_m) # Faster as floats if n_points > m_points: """Cross Correlation 'full' sums over the dimension of sig_n""" xcorr_indexes = np.arange(1-n_points, m_points) # Ensure it is a float xcorr = signal.correlate(sig_m, sig_n, mode='full') # Normalize xcorr /= np.sqrt(n_points*m_points) * sig_n.std() * sig_m.std() if abs_xcorr: # Allows negative peak in cross correlation (pi phase shift) xcorr_offset_index = np.argmax(np.abs(xcorr)) else: # Must be in phase - for array processing xcorr_offset_index = np.argmax(xcorr) xcorr_offset_samples = xcorr_indexes[xcorr_offset_index] elif n_points < m_points: """Cross Correlation 'full' sums over the dimension of sig_m""" xcorr_indexes = np.arange(1-m_points, n_points) xcorr = signal.correlate(sig_n, sig_m, mode='full') # Normalize xcorr /= np.sqrt(n_points*m_points) * sig_n.std() * sig_m.std() if abs_xcorr: # Allows negative peak in cross correlation (pi phase shift) xcorr_offset_index = np.argmax(np.abs(xcorr)) else: # Must be in phase - for array processing xcorr_offset_index = np.argmax(xcorr) # Flip sign xcorr_offset_samples = -xcorr_indexes[xcorr_offset_index] elif n_points == m_points: """Cross correlation is centered in the middle of the record and has length n_points""" # Fastest, o(NX) and can use FFT solution if n_points % 2 == 0: xcorr_indexes = np.arange(-int(n_points/2), int(n_points/2)) else: xcorr_indexes = np.arange(-int(n_points/2), int(n_points/2)+1) xcorr = signal.correlate(sig_m, sig_n, mode='same') # Normalize xcorr /= np.sqrt(n_points*m_points) * sig_n.std() * sig_m.std() if abs_xcorr: # Allows negative peak in cross correlation (pi phase shift) xcorr_offset_index = np.argmax(np.abs(xcorr)) else: # Must be in phase - for array processing xcorr_offset_index = np.argmax(xcorr) xcorr_offset_samples = xcorr_indexes[xcorr_offset_index] else: print('One of the waveforms is broken') continue xcorr_normalized_max[m, n] = xcorr[xcorr_offset_index] xcorr_offset_seconds[m, n] = xcorr_offset_samples/df[sig_sample_rate_label][n] xcorr_offset_points[m, n] = xcorr_offset_samples return xcorr_normalized_max, xcorr_offset_seconds, xcorr_offset_points def xcorr_re_ref_pandas(df: pd.DataFrame, ref_id_label: str, sig_id_label: str, sig_wf_label: str, sig_sample_rate_label: str, fs_fractional_tolerance: float = 0.02, abs_xcorr: bool = True, return_xcorr_full: bool = False, new_column_label_xcorr_offset_points: str = 'xcorr_offset_points', new_column_label_xcorr_offset_seconds: str = 'xcorr_offset_seconds', new_column_label_xcorr_normalized_max: str = 'xcorr_normalized_max', new_column_label_xcorr_full_array: str = 'xcorr_full') -> pd.DataFrame: """ Returns new pandas columns per station with cross-correlation results relative to a reference station :param df: input pandas data frame :param ref_id_label: string for reference station id column name in df :param sig_id_label: string for station id column name in df :param sig_wf_label: string for the waveform column name in df :param sig_sample_rate_label: string for the sample rate in Hz column name in df :param fs_fractional_tolerance: difference in sample rate (in Hz) tolerated. Default is 0.02 :param abs_xcorr: Default is True :param return_xcorr_full: default is False :param new_column_label_xcorr_offset_points: label for new column with xcorr offset points :param new_column_label_xcorr_offset_seconds: label for new column with xcorr offset seconds :param new_column_label_xcorr_normalized_max: label for new column with xcorr normalized :param new_column_label_xcorr_full_array: label for new column with xcorr full array :return: input dataframe with new columns """ number_sig = len(df.index) print("XCORR Nmber of signals:", number_sig) m_list = df.index[df[sig_id_label] == ref_id_label] m = m_list[0] if len(m_list) > 1: raise Warning("More than one station meets the id spec. Picked first instance") # Initialize xcorr_offset_points = [] xcorr_offset_seconds = [] xcorr_normalized_max = [] xcorr_full = [] if m is not None: print("XCORR Reference station ", df[sig_id_label][m]) sig_m = np.copy(df[sig_wf_label][m]) m_points = len(sig_m) for n in df.index: sample_rate_condition = np.abs(df[sig_sample_rate_label][m] - df[sig_sample_rate_label][n]) \ > fs_fractional_tolerance*df[sig_sample_rate_label][m] if sample_rate_condition: print("Sample rates out of tolerance") continue else: # Generalized sensor cross correlations, including unequal lengths sig_n = np.copy(df[sig_wf_label][n]) n_points = len(sig_n) if n_points > m_points: """Cross Correlation 'full' sums over the dimension of sig_n""" xcorr_indexes = np.arange(1-n_points, m_points) # Ensure it is a float xcorr = signal.correlate(sig_m, sig_n, mode='full') # Normalize xcorr /= np.sqrt(n_points*m_points) * sig_n.std() * sig_m.std() if abs_xcorr: # Allows negative peak in cross correlation (pi phase shift) xcorr_offset_index = np.argmax(np.abs(xcorr)) else: # Must be in phase - for array processing xcorr_offset_index = np.argmax(xcorr) xcorr_offset_samples = xcorr_indexes[xcorr_offset_index] elif n_points < m_points: """Cross Correlation 'full' sums over the dimension of sig_m""" xcorr_indexes = np.arange(1-m_points, n_points) xcorr = signal.correlate(sig_n, sig_m, mode='full') # Normalize xcorr /= np.sqrt(n_points*m_points) * sig_n.std() * sig_m.std() if abs_xcorr: # Allows negative peak in cross correlation (pi phase shift) xcorr_offset_index = np.argmax(np.abs(xcorr)) else: # Must be in phase - for array processing xcorr_offset_index = np.argmax(xcorr) # Flip sign xcorr_offset_samples = -xcorr_indexes[xcorr_offset_index] elif n_points == m_points: """Cross correlation is centered in the middle of the record and has length n_points""" # Fastest, o(NX) and can use FFT solution if n_points % 2 == 0: xcorr_indexes = np.arange(-int(n_points/2), int(n_points/2)) else: xcorr_indexes = np.arange(-int(n_points/2), int(n_points/2)+1) xcorr = signal.correlate(sig_m, sig_n, mode='same') # Normalize xcorr /= np.sqrt(n_points*m_points) * sig_n.std() * sig_m.std() if abs_xcorr: # Allows negative peak in cross correlation (pi phase shift) xcorr_offset_index = np.argmax(np.abs(xcorr)) else: # Must be in phase - for array processing xcorr_offset_index = np.argmax(xcorr) xcorr_offset_samples = xcorr_indexes[xcorr_offset_index] else: print('One of the waveforms is broken') continue # Main export parameters # Allows negative peak in cross correlation (pi phase shift) in raw waveform, unless the input is power xcorr_normalized_max.append(xcorr[xcorr_offset_index]) xcorr_offset_points.append(xcorr_offset_samples) xcorr_offset_seconds.append(xcorr_offset_samples/df[sig_sample_rate_label][n]) if return_xcorr_full: xcorr_full.append(xcorr) # Convert to columns and add it to df df[new_column_label_xcorr_normalized_max] = xcorr_normalized_max df[new_column_label_xcorr_offset_points] = xcorr_offset_points df[new_column_label_xcorr_offset_seconds] = xcorr_offset_seconds if return_xcorr_full: df[new_column_label_xcorr_full_array] = xcorr_full else: print('ERROR: Incorrect reference station id') exit() return df def spectcorr_re_ref_pandas(df: pd.DataFrame, ref_id_label: str, sig_id_label: str, sig_tfr_label: str, sig_tfr_frequency_label: str, sig_sample_rate_label: str, sig_tfr_frequency_low_hz_label: str, sig_tfr_frequency_high_hz_label: str, return_xcorr_full: bool = False, new_column_label_xcorr_offset_points: str = 'spectcorr_offset_points', new_column_label_xcorr_offset_seconds: str = 'spectcorr_offset_seconds', new_column_label_xcorr_normalized_max: str = 'spectcorr_normalized_max', new_column_label_xcorr_peak_frequency_hz: str = 'spectcorr_peak_frequency_hz', new_column_label_xcorr_full_array: str = 'spectcorr_full', new_column_label_xcorr_full_per_band: str = 'spectcorr_per_band_full', new_column_label_xcorr_full_frequency_hz: str = 'spectcorr_frequency_hz') -> pd.DataFrame: """ Returns new pandas columns per station with spectral correlation results relative to a reference station :param df: input pandas data frame :param ref_id_label: string for reference station id column name in df :param sig_id_label: string for station id column name in df :param sig_tfr_label: string for tfr column name in df :param sig_tfr_frequency_label: string for tfr frequency column name in df :param sig_sample_rate_label: string for sample rate in Hz column name in df :param sig_tfr_frequency_low_hz_label: string for tfr low frequency values column name in df :param sig_tfr_frequency_high_hz_label: string for tfr high frequency values column name in df :param return_xcorr_full: default is False :param new_column_label_xcorr_offset_points: label for new column with xcorr offset points :param new_column_label_xcorr_offset_seconds: label for new column with xcorr offset seconds :param new_column_label_xcorr_normalized_max: label for new column with xcorr normalized max :param new_column_label_xcorr_peak_frequency_hz: label for new column with xcorr peak frequency :param new_column_label_xcorr_full_array: label for new column with xcorr full array :param new_column_label_xcorr_full_per_band: label for new column with xcorr full per band :param new_column_label_xcorr_full_frequency_hz: label for new column with xcorr frequencies :return: input df with new columns """ # Have to learn how to use/validate correlate2D number_sig = len(df.index) print("SPECTCORR number of signals:", number_sig) # M is the reference station m_list = df.index[df[sig_id_label] == ref_id_label] m = m_list[0] if len(m_list) > 1: raise Warning("More than one station meets the id spec. Picked first instance") # Find frequency edges freq_index_low = find_nearest(df[sig_tfr_frequency_low_hz_label][m], df[sig_tfr_frequency_label][m]) freq_index_high = find_nearest(df[sig_tfr_frequency_high_hz_label][m], df[sig_tfr_frequency_label][m]) print(freq_index_low, freq_index_high) # Initialize xcorr_offset_points = [] xcorr_offset_seconds = [] xcorr_normalized_max = [] xcorr_peak_frequency_hz = [] xcorr_full = [] xcorr_full_per_band = [] xcorr_full_frequency = [] if m is not None: print("XCORR Reference station ", df[sig_id_label][m]) # Extract the passband of interest ref_tfr_m = np.copy(df[sig_tfr_label][m])[freq_index_low:freq_index_high, :] spect_corr_frequency = np.copy(df[sig_tfr_frequency_label][m])[freq_index_low:freq_index_high] # Improve error check ref_rows, ref_columns = ref_tfr_m.shape # MUST have equal rectangular matrices # Cross correlation is centered in the middle of the record and has length n_points # Fastest, o(NX) and can use FFT solution if ref_columns % 2 == 0: xcorr_index = np.arange(-int(ref_columns/2), int(ref_columns/2)) else: xcorr_index = np.arange(-int(ref_columns/2), int(ref_columns/2)+1) # Index matrix xcorr_index_mat = np.tile(xcorr_index, (ref_rows, 1)) if np.amax(ref_tfr_m) <= 0: ref_tfr_m -= np.min(ref_tfr_m) for n in df.index: # Generalized sensor cross correlations, including unequal time lengths sig_tfr_n = np.copy(df[sig_tfr_label][n])[freq_index_low:freq_index_high, :] n_rows, n_columns = sig_tfr_n.shape if n_rows != ref_rows: print("TFR does not have the same frequency dimensions:", df[sig_id_label][n]) continue if n_columns != ref_columns: print("TFR does not have the same time grid dimensions:", df[sig_id_label][n]) continue # Frequency-by-frequency spect_corr = np.zeros(xcorr_index_mat.shape) spect_corr_per_band = np.zeros(xcorr_index_mat.shape) # Condition so there is always a positive component if np.amax(sig_tfr_n) <= 0: sig_tfr_n -= np.min(sig_tfr_n) # normalize per band for k in np.arange(ref_rows): spect_corr[k, :] = signal.correlate(ref_tfr_m[k, :], sig_tfr_n[k, :], mode='same') spect_corr_per_band[k, :] = spect_corr[k, :]/np.max(np.abs(spect_corr[k, :])) # Normalize by max spect_corr /= np.max(np.abs(spect_corr)) frequency_index, time_index = np.unravel_index(np.argmax(spect_corr), spect_corr.shape) spect_xcorr_normalized_max = 1. # By definition, refine # Main export parameters xcorr_normalized_max.append(spect_xcorr_normalized_max) xcorr_offset_points.append(xcorr_index_mat[frequency_index, time_index]) xcorr_offset_seconds.append(xcorr_index_mat[frequency_index, time_index]/df[sig_sample_rate_label][n]) xcorr_peak_frequency_hz.append(spect_corr_frequency[frequency_index]) if return_xcorr_full: xcorr_full.append(spect_corr) xcorr_full_per_band.append(spect_corr_per_band) xcorr_full_frequency.append(spect_corr_frequency) # Convert to columns and add it to df df[new_column_label_xcorr_normalized_max] = xcorr_normalized_max df[new_column_label_xcorr_offset_points] = xcorr_offset_points df[new_column_label_xcorr_offset_seconds] = xcorr_offset_seconds df[new_column_label_xcorr_peak_frequency_hz] = xcorr_peak_frequency_hz if return_xcorr_full: df[new_column_label_xcorr_full_array] = xcorr_full df[new_column_label_xcorr_full_per_band] = xcorr_full_per_band df[new_column_label_xcorr_full_frequency_hz] = xcorr_full_frequency else: print('ERROR: Incorrect reference station id') exit() return df
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_xcorr.py
0.905674
0.734239
redpd_xcorr.py
pypi
import matplotlib.pyplot as plt from matplotlib.figure import Figure def plot_psd_coh(psd_sig, psd_ref, coherence_sig_ref, f_hz, f_min_hz, f_max_hz, f_scale: str = "log", sig_label: str = "PSD Sig", ref_label: str = "PSD Ref", psd_label: str = "PSD (bits)", coh_label: str = "Coherence", f_label: str = "Frequency (Hz)", fig_title: str = "Power spectral density and coherence", show_figure: bool = True) -> Figure: """ Plot coherence and power spectral density :param psd_sig: Power spectral density of signal :param psd_ref: Power spectral density of reference signal :param coherence_sig_ref: magnitude squared coherence of x and y :param f_hz: sample frequencies of PSD :param f_min_hz: minimum frequency to plot in Hz (x min limit) :param f_max_hz: maximum frequency to plot in Hz (x max limit) :param f_scale: scale of x axis. One of {"linear", "log", "symlog", "logit"}. Default is "log" :param sig_label: label for signal. Default is "PSD Sig" :param ref_label: label for reference. Default is "PSD Ref" :param psd_label: label for PSD. Default is "PSD (bits)" :param coh_label: label for coherence. Default is "Coherence" :param f_label: x axis label. Default is "Frequency (Hz)" :param fig_title: title of figure. Default is "Power spectral density and coherence" :param show_figure: show figure is True. Default is True :return: matplotlib figure instance """ # Plot PSDs fig1 = plt.figure() fig1.set_size_inches(8, 6) plt.clf() ax1 = plt.subplot(211) ax1.plot(f_hz, psd_ref, 'r-', linewidth=2, label=ref_label) ax1.plot(f_hz, psd_sig, 'k-', label=sig_label) ax1.set_xscale(f_scale) ax1.legend() ax1.set_xlim([f_min_hz, f_max_hz]) ax1.set_ylim([-16, 1]) ax1.set_ylabel(psd_label) ax1.grid('on', which='both') ax1.set_title(fig_title) ax2 = plt.subplot(212) ax2.plot(f_hz, coherence_sig_ref, 'k-') ax2.set_xscale(f_scale) ax2.set_xlim([f_min_hz, f_max_hz]) ax1.set_ylim([-16, 1]) ax2.set_xlabel(f_label) ax2.set_ylabel(coh_label) ax2.grid('on', which='both') if show_figure is True: plt.show() return fig1 def plot_response_scatter(h_magnitude, h_phase_deg, color_guide, f_hz, f_min_hz, f_max_hz, f_scale: str = 'log', fig_title: str = 'Response only valid at high coherence', show_figure: bool = True) -> Figure: """ Plot coherence response :param h_magnitude: magnitude, for example, absolute magnitude of response (which is power spectral density / cross-power spectral density) :param h_phase_deg: coherence phase degrees :param color_guide: parameters color guide, for example, magnitude squared coherence of x and y :param f_hz: frequency of coherence in Hz :param f_min_hz: minimum frequency to plot in Hz (x min limit) :param f_max_hz: maximum frequency to plot in Hz (x max limit) :param f_scale: scale of x axis. One of {"linear", "log", "symlog", "logit"}. Default is "log" :param fig_title: title of figure :param show_figure: show figure is True. Default is True :return: matplotlib figure instance """ # plot magnitude and coherence fig = plt.figure() fig.set_size_inches(8, 6) ax1 = plt.subplot(211) im1 = ax1.scatter(x=f_hz, y=h_magnitude, c=color_guide, marker='o') ax1.set_xscale(f_scale) ax1.set_xlim([f_min_hz, f_max_hz]) ax1.grid('on', which='both') hc = fig.colorbar(im1) hc.set_label('Coherence') ax1.set_ylabel('Magnitude ') ax1.set_title(fig_title) ax2 = plt.subplot(212) im2 = ax2.scatter(x=f_hz, y=h_phase_deg, c=color_guide, marker='o') ax2.set_xscale(f_scale) ax2.set_xlim([f_min_hz, f_max_hz]) ax2.grid('on', which='both') ax2.set_xlabel('Frequency [Hz]') ax2.set_ylabel('Phase [deg]') hc = plt.colorbar(im2) hc.set_label('Coherence') if show_figure is True: plt.show() return fig
/redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_plot/coherence.py
0.931279
0.6747
coherence.py
pypi
from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt from sklearn import metrics import seaborn as sns import numpy as np from sklearn.linear_model import LinearRegression import pandas as pd def rf_clf(dfs, feature_lists, y_val, plt_max_depth = 30): ''' Parameters ---------- dfs : pandas dataframe feature_lists : list of strings Each string in the list has to be a column in dfs. y_val : string Target variable. plt_max_depth : INT, optional Max depth of the classifier. The default is 30. Returns ------- models : TYPE DESCRIPTION. ''' models = [] for df,feature_list in zip(dfs, feature_lists): max_depth = plt_max_depth x_feat_list = feature_list df_standard = df[x_feat_list]/df[x_feat_list].std() df_standard[y_val] = df[y_val] sample = df_standard.sample(frac=.25) # extract data from dataframe x = sample.loc[:, x_feat_list].values y = sample.loc[:, y_val].values rf_clf = RandomForestClassifier(max_depth=max_depth, n_estimators=100) rf_clf.fit(x, y) sns.set() plot_feat_import(x_feat_list, rf_clf.feature_importances_) training_df = df_standard.sample(frac=0.3) # The remaining data is the testing data testing_df = df_standard.drop(training_df.index) X = training_df[x_feat_list] Y = training_df[y_val] x = testing_df[x_feat_list] y_true = testing_df[y_val] ac_scores = {} best_depth = 0 best_ac = 0 for depth in range(10,70,5): rf_clf = RandomForestClassifier(max_depth=max_depth, n_estimators=100) rf_clf.fit(X, Y) y_pred = rf_clf.predict(x) ac_score = metrics.accuracy_score(y_true, y_pred) ac_scores[depth] = ac_score if ac_score > best_ac: best_depth = depth rf_clf = RandomForestClassifier(max_depth=best_depth, n_estimators=100) rf_clf.fit(X, Y) models.append((rf_clf, ac_scores)) return models def plot_feat_import(feat_list, feat_import, sort=True, limit=None): """ plots feature importances in a horizontal bar chart Args: feat_list (list): str names of features feat_import (np.array): feature importances (mean gini reduce) sort (bool): if True, sorts features in decreasing importance from top to bottom of plot limit (int): if passed, limits the number of features shown to this value """ if sort: # sort features in decreasing importance idx = np.argsort(feat_import).astype(int) feat_list = [feat_list[_idx] for _idx in idx] feat_import = feat_import[idx] if limit is not None: # limit to the first limit feature feat_list = feat_list[:limit] feat_import = feat_import[:limit] # plot and label feature importance plt.barh(feat_list, feat_import) plt.gcf().set_size_inches(5, len(feat_list) / 2) plt.xlabel('Feature importance\n(Mean decrease in Gini across all Decision Trees)') def inning_game_ids(pxp): """ Categorize plays based on game names and innings pxp: DataFrame DataFrame of play by play data Returns: DataFrame of categorized plays with game_id and inning_id columns """ plays = [] game_id = 0 inning_id = 0 game = None inning = None for play in pxp.to_dict('records'): current_game = play.get('Notes') current_inning = play.get('Inning') if current_game != game: game_id += 1 inning_id += 1 game = current_game inning = current_inning elif current_inning != inning: inning_id += 1 inning = current_inning play['game_id'] = game_id play['inning_id'] = inning_id plays.append(play) return pd.DataFrame(plays) def base_count_score(pxp): """ Process play-by-play data by splitting scores, counts, and runners on base pxp: DataFrame DataFrame of play by play data Returns: DataFrame of processed play-by-play data with added columns for bat_score, balls, strikes, on_1b, on_2b, and on_3b """ data = pxp.to_dict('records') for play in data: score = play['Score'].split('-') if play['Inning'][0] == 'T': play['bat_score'] = int(score[0]) else: play['bat_score'] = int(score[1]) count = play['Count'].split('-') play['balls'] = count[0] play['strikes'] = count[1] play['on_1b'] = 1 if '1B' in play['Runners On Base'] else 0 play['on_2b'] = 1 if '2B' in play['Runners On Base'] else 0 play['on_3b'] = 1 if '3B' in play['Runners On Base'] else 0 return pd.DataFrame(data) def calculate_scores(pxp): """ Calculate runs scored and delta run expectancy for each play pxp: DataFrame DataFrame of play by play data Returns: DataFrame of plays with runs_scored and delta_run_expectancy columns """ pxp['runs_scored'] = pxp['bat_score'].shift(-1) - pxp['bat_score'] mask = (pxp['inning_id'].eq(pxp['inning_id'].shift(-1))) & (pxp['game_id'].eq(pxp['game_id'].shift(-1))) pxp.loc[~mask, 'runs_scored'] = 0 mask = pxp['inning_id'].eq(pxp['inning_id'].shift(-1)) pxp.loc[mask, 'delta_run_expectancy'] = pxp.loc[mask, 'run_expectancy'].shift( -1) - pxp['run_expectancy'] + pxp.loc[mask, 'runs_scored'] pxp.loc[~mask, 'delta_run_expectancy'] -= pxp.loc[~mask, 'run_expectancy'] return pxp def slope_intercept(df, x, y): ''' Parameters ---------- df : df A dataframe with a x column and y column. x : String Name of column in df that is independent variable. y : String Name of column in df that is target variable. Returns ------- intercept : Int coefficient : INT ''' X = df[[x]] y = df[y] model = LinearRegression().fit(X, y) intercept = model.intercept_ coefficient = model.coef_[0] return intercept, coefficient def linnear_func(df, x, y, intercept, coefficient): ''' Parameters ---------- df : df A dataframe with a x column and y column. x : String Name of column in df that is independent variable. y : String Name of column in df that is target variable. intercept : INT coefficient : INT Returns ------- Series Returns a series of the y predicted from f(x). ''' df[y] = df[x] * coefficient + intercept df[y] = df[y].clip(lower=0) return df[y]
/reece_calvin-0.0.2-py3-none-any.whl/reece_calvin.py
0.813424
0.4953
reece_calvin.py
pypi
import pandas as pd from reeco_ml_preprocessing.base import BasePreprocessor from reeco_ml_preprocessing.time_series.simple_imputer import _mean_fill, _median_fill, _backward_fill, _forward_fill class TimeSeriesImputer(BasePreprocessor): def __init__(self, method): """ Method initialization for imputer. Args: ------- method: String, default='linear' Custom kind to fill missing value. 'linear': Use linear interpolation within each column. 'backward': Use backward fill. Apply forward fill with remaining missing values in last columns. 'forward': Use forward fill. Apply backward fill with remaining missing values in first columns. 'mean': Replace by mean. 'median': Replace by median. """ self.method = method self.columns_ = None self.indices_ = None def fit(self): if self.method not in ["forward", "backward", "median", "mean", "linear"]: raise ValueError("Method {} is not exists".format(self.method)) return self def fit_transform(self, X: pd.DataFrame): """ Fill missing value with custom technique. Args: ------- X: DataFrame Samples. save_columns_and_indices: Boolean When using fit, save the columns and indices for inverse transform later. create_date_col: Boolean When using fit, add an Time column with type float, which is retrieved from the indices. Returns: ------- X_new: DataFrame Samples with filled values. """ self.fit() X_new = self._impute(X, self.method) self.columns_ = X.columns self.indices_ = X.index return X_new def transform(self, X: pd.DataFrame): self.indices_ = X.index X_new = self._impute(X, self.method) return X_new def _impute(self, X: pd.DataFrame, method: str): if method == "forward": X = _forward_fill(X) elif method == "backward": X = _backward_fill(X) elif method == "median": X = _median_fill(X) elif method == "mean": X = _mean_fill(X) elif method == "linear": X = self._linear_fill(X) else: raise Exception("Cannot found method {}".format(method)) return X.fillna(0.0) def _linear_fill(self, X): return _backward_fill(X.interpolate(method='linear'))
/reeco_ml_preprocessing-0.1.5.tar.gz/reeco_ml_preprocessing-0.1.5/reeco_ml_preprocessing/time_series/ts_imputer.py
0.917946
0.422326
ts_imputer.py
pypi
from typing import Tuple, Union import numpy as np import pandas as pd from reeco_ml_preprocessing.base import BasePreprocessor class SlidingWindow(BasePreprocessor): """ For every Series containing values `(t_0, t_1, t_2,..., t_l)`, transform into two 2D frame with `slides` columns. For example `(0, 1, 2, 3, 4)` when transforming with `slides = 2` will return: ``` np.array([ [0, 1], [1, 2], [2, 3] ]) ``` and np.array([[2], [3], [4]]) The SlidingWindow will return 3D-numpy array, therefore a PandasNumpyTransformer will be attached within. Args: ------- input_timesteps: int Input timesteps, i.e., number of columns in past dataframe output_timesteps: int. Depreciated Output timesteps, i.e., number of columns in future dataframe or the number of desired timesteps to be predicted. """ def __init__(self, input_timesteps: int, output_timesteps: int, target: str): self.target = target self.input_timesteps = input_timesteps self.output_timesteps = output_timesteps def fit_transform(self, X: Union[pd.Series, pd.DataFrame]) -> Tuple[np.ndarray, np.ndarray]: if isinstance(X, pd.Series): rnp = self.get_historical_attributes(X).values return (rnp, rnp) elif isinstance(X, pd.DataFrame): n_examples = X.shape[0] - self.input_timesteps - self.output_timesteps + 1 n_attr = len(X.columns) if n_examples < 1: rnp = np.zeros((1, n_attr, self.input_timesteps)) for i, col in enumerate(X.columns): rnp[:, i, :] = X[col].replace([np.inf, -np.inf], 0.0).fillna(0.0).T.values return rnp, None rnp = np.zeros((n_examples, n_attr, self.input_timesteps)) tnp = np.zeros((n_examples, self.output_timesteps)) for i, col in enumerate(X.columns): rnp[:, i, :] = self.get_historical_attributes(X[col]) tnp[:] = self.get_historical_attributes(X[self.target], target=True) return (rnp, tnp) def transform(self, X: pd.Series) -> Tuple[np.ndarray, np.ndarray]: return self.fit_transform(X) def get_historical_attributes(self, series: pd.Series, target: bool = False) -> np.ndarray: series.fillna(0.0, inplace=True) df = pd.DataFrame() n_features = self.input_timesteps + self.output_timesteps for i in range(n_features): df[f'lag-{i}-{series.name}'] = series.shift(i) df.dropna(inplace=True) if not target: df = df.iloc[:, :self.input_timesteps].iloc[:, ::-1] else: df = df.iloc[:, -self.output_timesteps:] return df.values
/reeco_ml_preprocessing-0.1.5.tar.gz/reeco_ml_preprocessing-0.1.5/reeco_ml_preprocessing/time_series/sliding_window.py
0.930703
0.926769
sliding_window.py
pypi
from typing import Union import pandas as pd class TimeSeriesCleaner(): """ The TimeSeriesCleaner perform must-preprocessed steps for timeseries data. These includes: 1. Set datetime column as index. 2. Remove null rows exists in the label column. 3. Align datetime index. """ def __init__( self, date_col: Union[str, pd.Index], label_col: Union[str, pd.Index], sampling_rule: str, time_lag: str, limit: int = None ): self.date_col = date_col self.label_col = label_col self.sampling_rule = sampling_rule self.time_lag = time_lag self.limit = limit def remove_null_rows(self, X): X = X.dropna(how='all') return X def set_index(self, X, date_col) -> pd.DataFrame: """Resample and set index. All categorical variable will also be removed.""" if date_col not in X.columns: raise KeyError("{} does not appear in your data columns".format(date_col)) try: # Check if the column can be converted to datetime X[date_col] = pd.to_datetime(X[date_col]) except: raise ValueError("The expected date time column does not have the correct format. \ Try to choose different column or change its format.") X = X.set_index(date_col).sort_index() return X def align_time(self, X, sampling_rule: str, time_lag: str, limit: int = None): """Align time to sampling_rule.""" base = '1H' if sampling_rule.endswith('H') else sampling_rule index = pd.date_range( start=X.index[0], end=X.index[-1], freq=sampling_rule ) # Fillna to 0 if all NaN within a column X = X.apply(lambda x: x.reindex( index, method='nearest', tolerance=time_lag )) if limit is not None: X = X[-limit:] return X def fit(self, X): X = self.set_index(X, self.date_col) X = self.remove_null_rows(X) X = self.align_time(X, self.sampling_rule, self.time_lag, self.limit) return X def fit_transform(self, X): return self.fit(X) def transform(self, X): return self.fit(X)
/reeco_ml_preprocessing-0.1.5.tar.gz/reeco_ml_preprocessing-0.1.5/reeco_ml_preprocessing/time_series/cleaner.py
0.945121
0.50891
cleaner.py
pypi
from abc import ABC from typing import ( Union, Optional, ) from dataclasses import ( dataclass, field, ) from .meta import ( TransMessageType, NoneType, ) from .base import ( IApplySession, ValidationFailure, ) from .valid_base import ( ValidationBase, ) from .expressions import ( DotExpression, ) from .exceptions import RuleSetupError from .utils import ( to_int, message_truncate, ) class FieldValidationBase(ValidationBase, ABC): ... @dataclass class FieldValidation(FieldValidationBase): """ generic validation runned on field """ ensure: DotExpression name: Optional[str] = field(default=None) error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def validate(self, apply_session: IApplySession) -> Union[NoneType, ValidationFailure]: return self._validate_common_impl(apply_session=apply_session) @dataclass class Required(FieldValidationBase): name: Optional[str] = None error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def __post_init__(self): if not self.error: self.error = "The value is required" super().__post_init__() def validate(self, apply_session: IApplySession) -> Optional[ValidationFailure]: component = apply_session.current_frame.component value = apply_session.get_current_value(component, strict=False) if value is None: return ValidationFailure( component_key_string = apply_session.get_key_string(component), error=self.error, validation_name=self.name, validation_title=self.title, details="The value is required." ) return None @dataclass class Readonly(FieldValidationBase): """ After filled with initial value can the value be cheanged. Synonym for: Editable(False) / Frozen. """ # TODO: check if autocomputed = ALLWAYS should not be enabled value: Optional[Union[bool, DotExpression]] = True name: Optional[str] = None error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def __post_init__(self): if not isinstance(self.value, (bool, DotExpression)): raise RuleSetupError(owner=self, msg=f"ensure must be DotExpression or bool, got: {type(self.value)} / {self.value}") if not self.error: self.error = "The value is readonly" super().__post_init__() def validate(self, apply_session: IApplySession) -> Optional[ValidationFailure]: component = apply_session.current_frame.component if isinstance(self.value, DotExpression): dexp_result = self.value._evaluator.execute_dexp(apply_session) is_readonly = dexp_result.value else: is_readonly = self.value if not is_readonly: return None key_string = apply_session.get_key_string(component) update_history = apply_session.update_history.get(key_string) if update_history and len(update_history) > 1: initial_value = update_history[0].value last_value = update_history[-1].value if initial_value != last_value: return ValidationFailure( component_key_string = key_string, error=self.error, validation_name=self.name, validation_title=self.title, details="Readonly value is changed" " from '{message_truncate(initial_value, 15)}'" " to '{message_truncate(last_value, 15)}'" ) return None @dataclass class ExactLength(FieldValidationBase): value: Union[int, DotExpression] name: Optional[str] = None error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def __post_init__(self): self._check_dot_expression_or_positive_int("value", self.value) if not self.error: self.error = f"Provide value with length at most {self.value}" super().__post_init__() def validate(self, apply_session: IApplySession) -> Optional[ValidationFailure]: component = apply_session.current_frame.component value = apply_session.get_current_value(component, strict=False) if value and hasattr(value, "__len__") and len(value) != self.value: return ValidationFailure( component_key_string = apply_session.get_key_string(component), error=self.error, validation_name=self.name, validation_title=self.title, details=f"Value's length of {len(value)} must be exactly {self.value}" f" (value is '{message_truncate(value)}')" ) return None @dataclass class MaxLength(FieldValidationBase): value: Union[int, DotExpression] name: Optional[str] = None error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def __post_init__(self): self._check_dot_expression_or_positive_int("value", self.value) if not self.error: self.error = f"Provide value with length at most {self.value}" super().__post_init__() def validate(self, apply_session: IApplySession) -> Optional[ValidationFailure]: component = apply_session.current_frame.component value = apply_session.get_current_value(component, strict=False) if value and hasattr(value, "__len__") and len(value) > self.value: return ValidationFailure( component_key_string = apply_session.get_key_string(component), error=self.error, validation_name=self.name, validation_title=self.title, details=f"Value's length of {len(value)} is greater of maximum allowed {self.value}" f" (value is '{message_truncate(value)}')" ) return None @dataclass class MinLength(FieldValidationBase): value: Union[int, DotExpression] name: Optional[str] = None error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def __post_init__(self): self._check_dot_expression_or_positive_int("value", self.value) if not self.error: self.error = f"Provide value with length at least {self.value}" super().__post_init__() def validate(self, apply_session: IApplySession) -> Optional[ValidationFailure]: component = apply_session.current_frame.component value = apply_session.get_current_value(component, strict=False) if value and hasattr(value, "__len__") and len(value) < self.value: return ValidationFailure( component_key_string = apply_session.get_key_string(component), error=self.error, validation_name=self.name, validation_title=self.title, details=f"Value's length of {len(value)} is smaller of minimum allowed {self.value}" f"({message_truncate(value)})") return None @dataclass class RangeLength(FieldValidationBase): # see MaxLength NOTE: min: Union[int, DotExpression] max: Union[int, DotExpression] name: Optional[str] = None error: Optional[TransMessageType] = field(repr=False, default=None) available: Optional[Union[bool, DotExpression]] = field(repr=False, default=True) title: Optional[TransMessageType] = field(repr=False, default=None) def __post_init__(self): if self.min is None and self.max is None: raise RuleSetupError(owner=self, msg="Please provide min and/or max") if self.min is not None: self._check_dot_expression_or_positive_int("min", self.min) if self.max is not None: self._check_dot_expression_or_positive_int("max", self.max) if to_int(self.min) and self.max<self.min: raise RuleSetupError(owner=self, msg="Please provide min <= max") if not self.error: if self.min and self.max: self.error = f"Provide value with length between {self.min} and {self.max}" elif self.min: self.error = f"Provide value with length at least {self.min}" elif self.max: self.error = f"Provide value with length at most {self.max}" else: assert False super().__post_init__() # value: Any, component: "ComponentBase", def validate(self, apply_session: IApplySession) -> Optional[ValidationFailure]: component = apply_session.current_frame.component value = apply_session.get_current_value(component, strict=False) if hasattr(value, "__len__"): if self.min and value and len(value) < self.min: return ValidationFailure( component_key_string = apply_session.get_key_string(component), error=self.error, validation_name=self.name, validation_title=self.title, details=f"Value's length of {len(value)} is smaller of minimum allowed {self.min}" f"({message_truncate(value)})") elif self.max and value and len(value) > self.max: return ValidationFailure( component_key_string = apply_session.get_key_string(component), error=self.error, validation_name=self.name, validation_title=self.title, details=f"Value's length of {len(value)} is greater of maximum allowed {self.max}" f"({message_truncate(value)})") return None
/reedwolf.entities-0.7.0.tar.gz/reedwolf.entities-0.7.0/src/reedwolf/entities/valid_field.py
0.614625
0.171373
valid_field.py
pypi
import warnings from typing import Optional import base58 from hashlib import blake2b from scalecodec.base import ScaleBytes, ScaleDecoder def ss58_decode(address: str, valid_ss58_format: Optional[int] = None, valid_address_type=None) -> str: """ Decodes given SS58 encoded address to an account ID Parameters ---------- address: e.g. EaG2CRhJWPb7qmdcJvy3LiWdh26Jreu9Dx6R1rXxPmYXoDk valid_ss58_format valid_address_type Returns ------- Decoded string AccountId """ # Check if address is already decoded if address.startswith('0x'): return address if valid_address_type is not None: warnings.warn("Keyword 'valid_address_type' will be replaced by 'valid_ss58_format'", DeprecationWarning) valid_ss58_format = valid_address_type checksum_prefix = b'SS58PRE' address_decoded = base58.b58decode(address) if address_decoded[0] & 0b0100_0000: ss58_format_length = 2 ss58_format = ((address_decoded[0] & 0b0011_1111) << 2) | (address_decoded[1] >> 6) | \ ((address_decoded[1] & 0b0011_1111) << 8) else: ss58_format_length = 1 ss58_format = address_decoded[0] if ss58_format in [46, 47]: raise ValueError(f"{ss58_format} is a reserved SS58 format") if valid_ss58_format is not None and ss58_format != valid_ss58_format: raise ValueError("Invalid SS58 format") # Determine checksum length according to length of address string if len(address_decoded) in [3, 4, 6, 10]: checksum_length = 1 elif len(address_decoded) in [5, 7, 11, 34 + ss58_format_length, 35 + ss58_format_length]: checksum_length = 2 elif len(address_decoded) in [8, 12]: checksum_length = 3 elif len(address_decoded) in [9, 13]: checksum_length = 4 elif len(address_decoded) in [14]: checksum_length = 5 elif len(address_decoded) in [15]: checksum_length = 6 elif len(address_decoded) in [16]: checksum_length = 7 elif len(address_decoded) in [17]: checksum_length = 8 else: raise ValueError("Invalid address length") checksum = blake2b(checksum_prefix + address_decoded[0:-checksum_length]).digest() if checksum[0:checksum_length] != address_decoded[-checksum_length:]: raise ValueError("Invalid checksum") return address_decoded[ss58_format_length:len(address_decoded)-checksum_length].hex() def ss58_encode(address: str, ss58_format: int = 42, address_type=None) -> str: """ Encodes an account ID to an Substrate address according to provided address_type Parameters ---------- address ss58_format address_type: (deprecated) Returns ------- """ checksum_prefix = b'SS58PRE' if address_type is not None: warnings.warn("Keyword 'address_type' will be replaced by 'ss58_format'", DeprecationWarning) ss58_format = address_type if ss58_format < 0 or ss58_format > 16383 or ss58_format in [46, 47]: raise ValueError("Invalid value for ss58_format") if type(address) is bytes or type(address) is bytearray: address_bytes = address else: address_bytes = bytes.fromhex(address.replace('0x', '')) if len(address_bytes) in [32, 33]: # Checksum size is 2 bytes for public key checksum_length = 2 elif len(address_bytes) in [1, 2, 4, 8]: # Checksum size is 1 byte for account index checksum_length = 1 else: raise ValueError("Invalid length for address") if ss58_format < 64: ss58_format_bytes = bytes([ss58_format]) else: ss58_format_bytes = bytes([ ((ss58_format & 0b0000_0000_1111_1100) >> 2) | 0b0100_0000, (ss58_format >> 8) | ((ss58_format & 0b0000_0000_0000_0011) << 6) ]) input_bytes = ss58_format_bytes + address_bytes checksum = blake2b(checksum_prefix + input_bytes).digest() return base58.b58encode(input_bytes + checksum[:checksum_length]).decode() def ss58_encode_account_index(account_index: int, ss58_format: int = 42, address_type=None) -> str: """ Encodes an AccountIndex to an Substrate address according to provided address_type Parameters ---------- account_index ss58_format address_type: (deprecated) Returns ------- """ if address_type is not None: warnings.warn("Keyword 'address_type' will be replaced by 'ss58_format'", DeprecationWarning) ss58_format = address_type if 0 <= account_index <= 2 ** 8 - 1: account_idx_encoder = ScaleDecoder.get_decoder_class('u8') elif 2 ** 8 <= account_index <= 2 ** 16 - 1: account_idx_encoder = ScaleDecoder.get_decoder_class('u16') elif 2 ** 16 <= account_index <= 2 ** 32 - 1: account_idx_encoder = ScaleDecoder.get_decoder_class('u32') elif 2 ** 32 <= account_index <= 2 ** 64 - 1: account_idx_encoder = ScaleDecoder.get_decoder_class('u64') else: raise ValueError("Value too large for an account index") return ss58_encode(account_idx_encoder.encode(account_index).data, ss58_format) def ss58_decode_account_index(address: str, valid_ss58_format: Optional[int] = None, valid_address_type=None) -> int: """ Decodes given SS58 encoded address to an AccountIndex Parameters ---------- address valid_ss58_format valid_address_type Returns ------- Decoded int AccountIndex """ if valid_address_type is not None: warnings.warn("Keyword 'valid_address_type' will be replaced by 'valid_ss58_format'", DeprecationWarning) valid_ss58_format = valid_address_type account_index_bytes = ss58_decode(address, valid_ss58_format) if len(account_index_bytes) == 2: return ScaleDecoder.get_decoder_class('u8', data=ScaleBytes('0x{}'.format(account_index_bytes))).decode() if len(account_index_bytes) == 4: return ScaleDecoder.get_decoder_class('u16', data=ScaleBytes('0x{}'.format(account_index_bytes))).decode() if len(account_index_bytes) == 8: return ScaleDecoder.get_decoder_class('u32', data=ScaleBytes('0x{}'.format(account_index_bytes))).decode() if len(account_index_bytes) == 16: return ScaleDecoder.get_decoder_class('u64', data=ScaleBytes('0x{}'.format(account_index_bytes))).decode() else: raise ValueError("Invalid account index length") def is_valid_ss58_address(value: str, valid_ss58_format: Optional[int] = None) -> bool: """ Checks if given value is a valid SS58 formatted address, optionally check if address is valid for specified ss58_format Parameters ---------- value: value to checked valid_ss58_format: if valid_ss58_format is provided the address must be valid for specified ss58_format (network) as well Returns ------- bool """ # Return False in case a public key is provided if value.startswith('0x'): return False try: ss58_decode(value, valid_ss58_format=valid_ss58_format) except ValueError: return False return True
/reef_interface-1.1.0-py3-none-any.whl/reefinterface/utils/ss58.py
0.892445
0.317453
ss58.py
pypi
import six class OpenApiException(Exception): """The base exception class for all OpenAPIExceptions""" class ApiTypeError(OpenApiException, TypeError): def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None): """ Raises an exception for TypeErrors Args: msg (str): the exception message Keyword Args: path_to_item (list): a list of keys an indices to get to the current_item None if unset valid_classes (tuple): the primitive classes that current item should be an instance of None if unset key_type (bool): False if our value is a value in a dict True if it is a key in a dict False if our item is an item in a list None if unset """ self.path_to_item = path_to_item self.valid_classes = valid_classes self.key_type = key_type full_msg = msg if path_to_item: full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) super(ApiTypeError, self).__init__(full_msg) class ApiValueError(OpenApiException, ValueError): def __init__(self, msg, path_to_item=None): """ Args: msg (str): the exception message Keyword Args: path_to_item (list) the path to the exception in the received_data dict. None if unset """ self.path_to_item = path_to_item full_msg = msg if path_to_item: full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) super(ApiValueError, self).__init__(full_msg) class ApiKeyError(OpenApiException, KeyError): def __init__(self, msg, path_to_item=None): """ Args: msg (str): the exception message Keyword Args: path_to_item (None/list) the path to the exception in the received_data dict """ self.path_to_item = path_to_item full_msg = msg if path_to_item: full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) super(ApiKeyError, self).__init__(full_msg) class ApiException(OpenApiException): def __init__(self, status=None, reason=None, http_resp=None): if http_resp: self.status = http_resp.status self.reason = http_resp.reason self.body = http_resp.data self.headers = http_resp.getheaders() else: self.status = status self.reason = reason self.body = None self.headers = None def __str__(self): """Custom error messages for exception""" error_message = "({0})\n"\ "Reason: {1}\n".format(self.status, self.reason) if self.headers: error_message += "HTTP response headers: {0}\n".format( self.headers) if self.body: error_message += "HTTP response body: {0}\n".format(self.body) return error_message def render_path(path_to_item): """Returns a string representation of a path""" result = "" for pth in path_to_item: if isinstance(pth, six.integer_types): result += "[{0}]".format(pth) else: result += "['{0}']".format(pth) return result
/reelase-manager-api-0.2.0.tar.gz/reelase-manager-api-0.2.0/client_sdk/exceptions.py
0.696268
0.26309
exceptions.py
pypi
import datetime import colorama import pandas as pd import usaddress from reemployct_data_entry.lib import stateDictionary from reemployct_data_entry.lib import webdriver as m_driver from reemployct_data_entry.lib.class_enum import ExtendedEnum from reemployct_data_entry.lib.stateDictionary import States class Jobs(): ''' A collection of all job entry data and metadata. Job data consists of three types: jobs_excel : excel job data read in as a raw pandas DataFrame jobs : modified and cleaned up version of jobs_excel that is convenient for high level use jobs_portal : the final stage of the job data that is formatted to match entry data for direct use in ReEmployCT ''' def __init__(self, excel_path: str) -> None: self.jobs = pd.read_excel(excel_path) # the job data format for convenient high level use self.consistent_rows() self.jobs_excel = self.jobs self.jobs_portal = None self.required_num_of_portal_entries = 3 def enough_entries_to_meet_minimum(self, existing: list) -> bool: ''' Determine if there are enough new entries to enter into ReEmployCT to meet the minimum requirement. Arguments: existing : PortalJobEntries ''' return len(self.jobs_portal.entries) < self.required_num_of_portal_entries - len(existing) def portal_format(self) -> None: ''' Convert high level job data to ReEmployCT job entry formatted job data. ''' self.jobs_portal = PortalJobEntries(self.jobs) def consistent_rows(self) -> None: ''' Clean table - drop rows of bad data. This is mainly to get rid of rows that are completely different from all the others/that break dataframe consistency (e.g. a row with only a single populated cell of comment text). This is not meant to remove rows that contain NaNs. ''' # set bad values to None/NaN/NaT for easy parsing self.jobs[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value] = self.jobs[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value].apply(lambda x: pd.to_datetime(x, errors='coerce')) index_NaT = self.jobs.loc[pd.isna(self.jobs[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value]), :].index # get array of indices of rows where data is of None/NaN/NaT self.jobs.drop(self.jobs.index[index_NaT]) # drop rows def target_week_has_jobs(self) -> bool: ''' Check if there is any job data for target week. ''' if(len(self.jobs) == 0): print(colorama.Fore.RED + f"\n*** You have no days of job data to enter for the target week! ({self.week.start.date()} - {self.week.end.date()}) ***\nQuitting script." + colorama.Style.RESET_ALL) return False return True def isolate_week(self, date_range) -> None: ''' Set the start day and end day of the week (Sun through Sat) that contains the desired jobs. Isolate the desired jobs from those days. ''' self.week = JobWeek(date_range) target_days = self.jobs[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value].between(self.week.start, self.week.end) # marks target days as True self.jobs = self.jobs.loc[target_days == True].reset_index() # isolate target week rows def sanitize(self) -> None: ''' Drop rows from pandas dataframe of bad data such as NaNs (includes empty data). ''' # clean excel jobs rows and convert them to dicts jobData_toCompare = [] for i, jobRow in self.jobs.iterrows(): jobRow.dropna(inplace=True) date_timestamp_col = jobRow[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value] # save timestamp dtype because .strip() destroys it jobRow = jobRow.str.strip() # strip leading and trailing whitespaces jobRow[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value] = date_timestamp_col jobRow = jobRow.drop('index') jobRow = jobRow.to_dict() jobData_toCompare.append(jobRow) self.jobs = pd.DataFrame(jobData_toCompare) self.jobs.dropna(inplace=True) self.jobs.reset_index(drop=True, inplace=True) def us_only_addresses(self) -> None: ''' Remove rows from DataFrame that contain non US addresses in Employer Address ''' states_full_names = States.value_list() states_full_names = [el.lower() for el in states_full_names] states_abbrev_names = States.key_list() states_abbrev_names = [el.lower() for el in states_abbrev_names] initial_n = len(self.jobs) indexes_to_drop = [] for i, row in self.jobs.iterrows(): address = Address.parse_us_address(row[Jobs_RequiredData.EMPLOYER_ADDRESS.value]) address[Address_RequiredComponents.ADDRESS_LINE_1.value] = Address.build_street_address_from_cleaned_address_dict(address) try: # check for US state if(not(address[Address_RequiredComponents.STATE_NAME.value].lower() in states_abbrev_names or address[Address_RequiredComponents.STATE_NAME.value].lower() in states_full_names)): indexes_to_drop.append(i) else: # check all address components exist for comp in Address_RequiredComponents.value_list(): if(len(address[comp]) == 0): indexes_to_drop.append(i) break except: # exception usually occurs when state name was never entered correctly/at all by user even if it was a US address # e.g. user enters US address like "1 W 1st St, New York, 10001, US" which is invalid because "New York" is only parsed as PlaceName here # it should instead be "1 W 1st St, New York, New York 10001, US" indexes_to_drop.append(i) addresses_to_drop = [] for i in indexes_to_drop: addresses_to_drop.append(self.jobs.iloc[i][Jobs_RequiredData.EMPLOYER_ADDRESS.value]) if(len(addresses_to_drop) > 0): m_driver.msg_user_verify_entries(f"{len(addresses_to_drop)} of {initial_n} job rows will be automatically excluded for the target week because they \ contain invalid addresses and/or are non-U.S. addresses.\ \nIf they are supposed to be U.S. addresses, please check they are entered correctly in your Excel data.\ \nIf they are non-U.S. addresses, ReEmployCT won't accept them.", color="yellow") print(colorama.Fore.YELLOW, "Excluding jobs with these addresses...") for i in range(len(addresses_to_drop)): print(colorama.Fore.YELLOW, i + 1, ":", addresses_to_drop[i]) self.jobs.drop(indexes_to_drop, inplace=True) remaining = initial_n - len(addresses_to_drop) if(remaining >= 1 and remaining < 3): # 0 remaining is dealt with later print(f"\nThere are only {remaining} valid entries remaining for the target week.\nThere may not be enough to enter to meet the minimum requirement of 3 entries.") print(colorama.Style.RESET_ALL) def isolate_columns(self, columns: list[str] | ExtendedEnum) -> None: ''' Remove all but the desired columns from a DataFrame. Strings must match the names of columns in the data. ''' if(type(columns) is not list): columns = [el.value for el in columns] self.jobs = self.jobs[columns] class Jobs_RequiredData(ExtendedEnum): ''' Required column data for a job application ("Employer Contact") in ReEmployCT ''' DATE_OF_WORK_SEARCH = 'Date of Work Search' EMPLOYER_NAME = 'Employer Name' POSITION_APPLIED_FOR = 'Position Applied For' WEBSITE_ADDRESS = 'Website Address' EMPLOYER_ADDRESS = 'Employer Address' class JobWeek(): ''' A standard calendar week (SUN-SAT) of a start and end day ''' def __init__(self, date_range: datetime.datetime) -> None: # convert given date into SAT (0) - SUN (7) index format day_idx = (date_range.weekday() + 1) % 7 # get day's number: SUN = 0 ... SAT = 6 self.start = pd.Timestamp(date_range - datetime.timedelta(day_idx)) # sunday self.end = pd.Timestamp(self.start + datetime.timedelta(6)) # saturday class JobType(ExtendedEnum): ''' Types of Work Searches listed in ReEmployCT ''' EMPLOYER_CONTACT = 'Employer Contact' JOB_FAIR = 'Attending a job fair' INTERVIEW = 'Attending a Job Interview' WORKSHOP = 'Attending a work shop at an American Job Center' CREATED_USER_PROFILE = 'Creating a personal user profile on a professional networking site' CREATED_PLAN = 'Creating a Reemployment Plan' RESUME_TO_CTDOL = 'Creating and uploading resume to the CTDOL State Job Bank' REEMPLOYMENT_ACTIVITY = 'Participating in reemployment service activities at an American Job Center' class PortalJobEntries(): ''' A collection of job entry data in ReEmployCT job entry format. Entries are dicts, all stored in a list. ''' def __init__(self, jobs_high: pd.DataFrame | list[dict]) -> None: if((type(jobs_high)) is list): jobs_high = pd.DataFrame(jobs_high) entries = [] for i, row in jobs_high.iterrows(): entries.append(PortalJobEntry(JobType.EMPLOYER_CONTACT, row)) self.entries = entries del entries def exclude_existing_entries(self, existing) -> None: ''' Remove entries based on the existence of given duplicate entries. Duplicates entries are determined by specific values of each entry that are compared. ''' if(len(existing.entries) == 0): return COMPARE = [ Jobs_RequiredData.EMPLOYER_NAME.value_attrib(), Jobs_RequiredData.POSITION_APPLIED_FOR.value_attrib() ] entries_to_remove = [] for e_main in self.entries: for e_existing in existing.entries: if(e_main.__getattribute__(COMPARE[0]) == e_existing.__getattribute__(COMPARE[0]) and e_main.__getattribute__(COMPARE[1]) == e_existing.__getattribute__(COMPARE[1])): entries_to_remove.append(e_main) break for row in entries_to_remove: self.entries.remove(row) class PortalJobEntry(): ''' A job entry that can be entered directly into ReEmployCT. ''' def __init__(self, entry_type: JobType, entry: pd.Series) -> None: self.entry_type = entry_type.value # isolate only required column data for the given job type required_cols = Jobs_RequiredData.value_list() entry = entry[required_cols] # create job type framework self.create_entry_attribs(entry) def create_entry_attribs(self, entry: pd.Series) -> None: ''' Create the required attribs for the given job entry. ''' if(self.entry_type == JobType.EMPLOYER_CONTACT.value): self.date_of_work_search = entry[Jobs_RequiredData.DATE_OF_WORK_SEARCH.value] self.employer_name = entry[Jobs_RequiredData.EMPLOYER_NAME.value] self.employer_address = Address(entry[Jobs_RequiredData.EMPLOYER_ADDRESS.value]) self.position_applied_for = entry[Jobs_RequiredData.POSITION_APPLIED_FOR.value] self.website_address = entry[Jobs_RequiredData.WEBSITE_ADDRESS.value] self.contact_method = ContactMethod.ONLINE.value self.result = ContactResult.FILED_BUT_NOT_HIRED.value class ContactMethod(ExtendedEnum): ''' Contact method Enums for a job entry ''' NONE = None EMAIL = "Email" FAX = "Fax" IN_PERSON = "In Person" ONLINE = "Online" PHONE = "Telephone" class ContactResult(ExtendedEnum): ''' Contact result Enums for a job entry ''' NONE = None FILED_BUT_NOT_HIRED = "Application/Resume Filed But Not Hired" HIRED = "Hired" NOT_ACCEPTING = "Not Accepting Applications/Resumes" class AddressControl(): ''' Methods for controlling an Address object. ''' @classmethod def parse_us_address(cls, address) -> dict: ''' Parse address elements from string into dict Only works correctly for U.S. addresses Can be used on any address and examined to see if it's a valid U.S. address (i.e. check if interpretation of U.S. state is valid) This is a wrapper around usaddress.parse() to fix its bad output Arguments: address : Address object Returns : dict object A cleaned U.S. address ''' address = usaddress.parse(address) address = cls._clean_usaddress_parse(address) return address def _clean_usaddress_parse(address_parsed: list[tuple]) -> dict: ''' Create a cleaned up dict of the parsed result of the usaddress package. Arguments: address_parsed : usaddress obj the result of usaddress.parse(some_address) ''' address_dict = {} # rebuild similar address components into same dict elements since usaddress breaks them up by char block for div in range(0, len(address_parsed)): key = address_parsed[div][1] if(key not in address_dict): address_dict[key] = address_parsed[div][0] else: address_dict[key] += ' ' + address_parsed[div][0] if(address_dict[key][-1] == ','): # remove trailing commas address_dict[key] = address_dict[key][:-1] return address_dict @classmethod def build_street_address_from_cleaned_address_dict(cls, address_dict) -> str: ''' Rebuild street address components of the cleaned address dict from a usaddress package parse into a single string ''' SEPARATER_KEY = 'StreetNamePostDirectional' address_line_1 = '' # US address components are sorted by a US standard, so loop through them to determine which dict elements to combine for key in usaddress.LABELS: if key in address_dict: address_line_1 += address_dict[key] + ' ' if(key == SEPARATER_KEY): address_line_1 = address_line_1.rstrip() break return address_line_1 class Address(AddressControl): ''' A standard U.S. address comprised of separated address components. Only contains components that are required for ReEmployCT entries. Intakes a raw address string in any format then parses and cleans it ''' def __init__(self, address_raw: str) -> None: super().__init__() parsed = self.parse_us_address(address_raw) self.full_address = address_raw self.address_line_1 = self.build_street_address_from_cleaned_address_dict(parsed) # self.address_line_2 = '' self.city = parsed[Address_RequiredComponents.PLACE_NAME.value] self.state = parsed[Address_RequiredComponents.STATE_NAME.value] if(len(self.state) == 2): self.state = self.state.upper() else: self.state = self.state.title() self.zip = parsed[Address_RequiredComponents.ZIP_CODE.value] def full_state_name(self) -> str: ''' Return full name of state from its letter abbreviation. ''' if(len(self.state) > 2): return self.state return stateDictionary.States[self.state].value def abbrev_state_name(self) -> str: ''' Return abbreviation letter name of state from its full name. ''' if(len(self.state) == 2): return self.state return stateDictionary.States(self.state).name class Address_RequiredComponents(ExtendedEnum): ''' Required address components for entries in ReEmployCT (usaddress package format). ''' ADDRESS_LINE_1 = 'AddressLine1' # assembled from build_street_address_from_cleaned_address_dict() PLACE_NAME = 'PlaceName' # City STATE_NAME = 'StateName' ZIP_CODE = 'ZipCode'
/reemployct_data_entry-2.0.2-py3-none-any.whl/reemployct_data_entry/lib/job_control.py
0.466603
0.416737
job_control.py
pypi
from typing import List, Dict, Callable, Optional, Tuple from threading import Thread, Event from queue import Queue import json import re import requests import time import logging from bs4 import BeautifulSoup import flask from common_pyutil.proc import call from .const import default_headers from .q_helper import ContentType def check_proxy_port(proxy_port: int, proxy_name: str, logger: logging.Logger) ->\ Tuple[bool, str, Dict[str, str]]: status = False proxies = {"http": f"http://127.0.0.1:{proxy_port}", "https": f"http://127.0.0.1:{proxy_port}"} try: response = requests.get("http://google.com", proxies=proxies, timeout=1) if response.status_code == 200: msg = f"{proxy_name} seems to work" logger.info(msg) status = True else: msg = f"{proxy_name} seems reachable but wrong" +\ f" status_code {response.status_code}" logger.info(msg) except requests.exceptions.Timeout: msg = f"Timeout: Proxy for {proxy_name} not reachable" logger.error(msg) except requests.exceptions.ProxyError: msg = f"ProxyError: Proxy for {proxy_name} not reachable. Will not proxy" logger.error(msg) return status, msg, proxies def check_proxy(proxies: Dict[str, str], flag: Event): check_count = 0 while flag.is_set(): try: response = requests.get("http://google.com", proxies=proxies, timeout=1) if response.status_code != 200: flag.clear() else: check_count = 0 except requests.exceptions.Timeout: check_count += 1 print(f"Proxy failed {check_count} times") if check_count > 2: flag.clear() time.sleep(10) print("Proxy failed. Exiting from check.") def parallel_fetch(urls: List[str], fetch_func: Callable[[str, Queue], None], batch_size: int): def helper(q: Queue): responses = {} while not q.empty(): url, retval = q.get() responses[url] = retval return responses j = 0 content = {} while True: _urls = urls[(batch_size * j): (batch_size * (j + 1))].copy() if not _urls: break q: Queue = Queue() threads = [] for url in _urls: threads.append(Thread(target=fetch_func, args=[url, q])) threads[-1].start() for t in threads: t.join() content.update(helper(q)) j += 1 def fetch_url_info(url: str) -> Dict[str, str]: response = requests.get(url, headers=default_headers) if response.status_code == 200: soup = BeautifulSoup(response.content, features="lxml") title = soup.find("title").text if re.match("https{0,1}://arxiv.org.*", url): title = soup.find(None, attrs={"class": "title"}).text.split(":")[1] authors = soup.find(None, attrs={"class": "authors"}).text.split(":")[1] abstract = soup.find(None, attrs={"class": "abstract"}).text.split(":")[1] date = soup.find("div", attrs={"class": "dateline"}).text.lower() pdf_url: Optional[str] = url.replace("/abs/", "/pdf/") if "last revised" in date: date = date.split("last revised")[1].split("(")[0] elif "submitted" in date: date = date.split("submitted")[1].split("(")[0] else: date = None else: authors = None abstract = None date = None pdf_url = None retval = {"title": title and title.strip(), "authors": authors and authors.strip(), "date": date and date.strip(), "abstract": abstract and abstract.strip(), "pdf_url": pdf_url and pdf_url.strip()} else: retval = {"error": "error", "code": response.status_code} return retval def fetch_url_info_parallel(url: str, q: Queue) -> None: q.put((url, fetch_url_info(url))) def post_json_wrapper(request: flask.Request, fetch_func: Callable[[str, Queue], None], helper: Callable[[Queue], ContentType], batch_size: int, host: str, logger: logging.Logger): """Helper function to parallelize the requests and gather them. Args: request: An instance :class:`~Flask.Request` fetch_func: :func:`fetch_func` fetches the request from the server helper: :func:`helper` validates and collates the results batch_size: Number of simultaneous fetch requests verbosity: verbosity level """ if not isinstance(request.json, str): data = request.json else: try: data = json.loads(request.json) except Exception: return json.dumps("BAD REQUEST") logger.info(f"Fetching {len(data)} queries from {host}") verbose = True j = 0 content: Dict[str, str] = {} while True: _data = data[(batch_size * j): (batch_size * (j + 1))].copy() for k, v in content.items(): if v == ["ERROR"]: _data.append(k) if not _data: break q: Queue = Queue() threads = [] for d in _data: # FIXME: This should also send the logger instance threads.append(Thread(target=fetch_func, args=[d, q], kwargs={"verbose": verbose})) threads[-1].start() for t in threads: t.join() content.update(helper(q)) j += 1 return json.dumps(content) def import_icra22_pdfs(files: List[str]): info: Dict[str, Optional[Dict]] = {} for f in files: out, err = call(f"pdfinfo {f}") # Not sure how to check for other IEEE title, subject = out.split("\n")[:2] title = re.split(r"[ \t]+", title, 1)[1] doi = re.split(r"[ \t]+", subject, 1)[1].split(";")[-1] info[f"{f}"] = {"title": title, "doi": doi} return info def import_elsevier_pdfs(files: List[str]): info: Dict[str, Optional[Dict]] = {} for f in files: out, err = call(f"pdfinfo {f}") # Elsevier? if re.match(r"[\s\S.]+creator.+elsevier.*", out, flags=re.IGNORECASE): subject = out.split("\n")[0] match = re.match(r".+doi:(.+)", subject) if match is not None: doi = match.groups()[0] info[f"{f}"] = {"doi": doi} else: info[f"{f}"] = None return info
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/util.py
0.475362
0.155591
util.py
pypi
from typing import Dict, Optional import sys import glob import gzip import json from collections import defaultdict import os import pickle from pathlib import Path from common_pyutil.monitor import Timer __doc__ = """Module to process Semantic Scholar Data.""" timer = Timer() def parse_citations(root_dir: Path): citations = defaultdict(set) filenames = glob.glob(str(root_dir.joinpath("*gz"))) for f_num, filename in enumerate(filenames): with gzip.open(filename, "rt") as s2_file: for i, line in enumerate(s2_file): data = json.loads(line) if data["citedcorpusid"] and data["citingcorpusid"]: a, b = int(data["citedcorpusid"]), int(data["citingcorpusid"]) citations[a].add(b) if not (i+1) % 1000000: print(f"{i+1} done for file {filename}") print(f"Done file {f_num+1} out of {len(filenames)}") out_file = root_dir.joinpath("citations.pkl") print(f"Writing file {out_file}") with open(out_file, "wb") as f: pickle.dump(citations, f) def save_temp(output_dir: Path, data: Dict, i: int): """Dump a temp pickle file of adjacency list Args: output_dir: Output directory to save the file temp: The temporary output file i: The numeric suffix for the file """ with timer: with open(output_dir.joinpath(f"temp_{i:010}.pkl"), "wb") as f: pickle.dump(data, f) print(f"Dumped for {i} in {timer.time} seconds") def split_and_dump_citations(input_dir: Path, output_dir: Path, citations: Dict, max_key: int): """Split and dump the citations Args: input_dir: Input Directory output_dir: Output Directory citations: Citations loaded from pickle file max_key: Max value of all keys """ j = 0 while True: temp = {} a = j * 1000000 b = (j+1) * 1000000 if os.path.exists(input_dir.joinpath(f"temp_{b:010}.pkl")): print(f"skipping for {b:010}") continue with timer: for i in range(a, b): if i in citations: temp[i] = citations[i].copy() if i > max_key: save_temp(output_dir, temp, b) return print(f"Done for {b} in {timer.time} seconds") save_temp(output_dir, temp, b) j += 1 def split_citations(root_dir: Path): """Read the citations.pkl file and split them based on corpus_id Args: root_dir: Root directory where citations reside """ with timer: with open(root_dir.joinpath("citations.pkl"), "rb") as f: citations = pickle.load(f) print(f"Loaded citations in {timer.time} seconds") keys = [*citations.keys()] max_key = max(keys) split_and_dump_citations(root_dir, root_dir, citations, max_key) def convert_keys_from_numpy(cache): """Convert cache keys from :class:`numpy.int64` to :class:`int` Used once when keys were taken from numpy Args: cache: :class:`RefsCache` """ for i, cf in enumerate(cache.files.values()): print(f"Opening {i+1} file") with open(cf, "rb") as fp: data = pickle.load(fp) out_data = defaultdict(set) for k, v in data.items(): out_data[int(k)] = v with open(cf, "wb") as fp: pickle.dump(out_data, fp) print(f"Done {i+1} file") class CitationsCache: """A Semantic Scholar Citations cache. Consists of pickle files of :class:`dict` entries with keys as :code:`citedPaper` and values of :code:`citingPaper` The pickle files are stored such that :code:`corpusId` of a :code:`citingPaper` is smaller than :code:`temp_{suffix}` where :code:`suffix` is an integer Args: root_dir: Root directory where cache resides """ def __init__(self, root_dir: Path): self._root_dir = root_dir _root_dir = str(root_dir).removesuffix("/") + "/" files = glob.glob(_root_dir + "*.pkl") if str(root_dir.joinpath("citations.pkl")) in files: files.remove(str(root_dir.joinpath("citations.pkl"))) files.sort() _files: Dict[int, str] = {int(f.replace(_root_dir, ""). replace("temp_", ""). replace(".pkl", "")): f for f in files} self.files = _files self._cache: Dict[int, set] = {} @property def cache(self): """Cache to avoid reading files multiple times It's dictionary of type Dict[corpusId, set(corpusId)] """ return self._cache def get_file(self, ID: int): """Get the file corresponding to a corpusId Args: ID: corpusId of a paper """ for i, f in enumerate(self.files): if ID < f: print(f"Looking in file {f}") with timer: with open(self.files[f], "rb") as fp: data = pickle.load(fp) print(f"Loaded file {self.files[f]} in {timer.time} seconds") return data def get_citations(self, ID: int) -> Optional[set]: """Get all the citing papers for a corpusId Args: ID: corpusId of a paper """ print(f"Searching for {ID}") if ID in self.cache: print(f"Have data for {ID} in cache") return self.cache[ID] else: data = self.get_file(ID) self.cache[ID] = data[ID].copy() if data and ID in data: return data[ID] else: print(f"Could not find reference data for {ID}") return None if __name__ == '__main__': root_dir = Path(sys.argv[1]) if not root_dir.exists(): raise ValueError(f"No such directory {root_dir}") parse_citations(root_dir) split_citations(root_dir)
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/data.py
0.590897
0.20468
data.py
pypi
from typing import Dict, List, Optional import re def year_filter(entry: Dict, min: int, max: int) -> bool: min = -1 if (min == "any" or not min) else min max = 10000 if (max == "any" or not max) else max return entry["year"] >= min and entry["year"] <= max def author_filter(entry: Dict, author_names: List[str], author_ids: List[str], exact: bool) -> bool: """Return True if any of the given authors by name or id are in the entry. Only one of ids or names are checked. """ if author_ids: return any([a == x['authorId'] for a in author_ids for x in entry["authors"]]) elif author_names and exact: return any([a == x['name'] for a in author_names for x in entry["authors"]]) elif author_names and not exact: names = [] for x in entry["authors"]: names.extend(x['name'].split(" ")) return any([a in map(str.lower, names) for a in author_names]) else: return False def num_citing_filter(entry: Dict, min: int, max: Optional[int] = None) -> bool: """Return True if the number of citations is greater or equal than `num`""" if max is not None: return entry["citationCount"] >= min and entry["citationCount"] < max else: return entry["citationCount"] >= min def num_influential_count_filter(entry: Dict, min: int, max: Optional[int] = None) -> bool: """Return True if the influential citations is greater or equal than `num`""" if max is not None: return entry["influentialCitationCount"] >= min and entry["influentialCitationCount"] < max else: return entry["influentialCitationCount"] >= min def venue_filter(entry: Dict, venues: List[str]) -> bool: """Return True if any of the given venues by regexp match are in the entry. The case in regexp match is ignored. """ return any([re.match(x, entry["venue"], flags=re.IGNORECASE) for x in venues]) def title_filter(entry: Dict, title_re: str, invert: bool) -> bool: """Return True if the given regexp matches the entry title. The case in regexp match is ignored. Args: entry: A paper entry title_re: title regexp invert: Whether to include or exclude matching titles """ match = bool(re.match(title_re, entry["title"], flags=re.IGNORECASE)) return not match if invert else match
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/filters.py
0.922931
0.461138
filters.py
pypi
from typing import Dict, List, Tuple, Optional import logging from pathlib import Path import os import time import shutil from subprocess import Popen, PIPE, TimeoutExpired from threading import Thread, Event class CacheHelper: """A local and remote (via :code:`rclone`) PDF files manager The pdf files are linked to publications and be stored in any `rclone` remote instance. The local links are stored in the :code:`cache_file` and can be updated on command. Args: local_dir: Local dirctory where pdf files are stored remote_dir: :code:`rclone` remote dirctory where pdf files are stored cache_file: ';' separated file of pdf links logger: the logger instance """ def __init__(self, local_dir: Path, remote_dir: Path, cache_file: Path, logger: logging.Logger): self.local_dir = local_dir self.remote_dir = remote_dir self.cache_file = cache_file self.updating_ev = Event() self.success_ev = Event() self.success_with_errors_ev = Event() self.update_thread: Optional[Thread] = None self.logger = logger self.check_and_fix_cache() @property def updating(self) -> bool: return self.updating_ev.is_set() @property def finished(self) -> bool: return self.success_ev.is_set() @property def finished_with_errors(self) -> bool: return self.success_with_errors_ev.is_set() # TODO: Change to sqlite def read_cache(self) -> Tuple[List[str], List[str], List[str]]: local_files = [os.path.join(self.local_dir, f) for f in os.listdir(self.local_dir) if not f.startswith(".")] if self.cache_file.exists(): with open(self.cache_file) as f: cache = [x for x in f.read().split("\n") if len(x)] cached_files = [x.rsplit(";")[0] for x in cache] return local_files, cache, cached_files else: print(f"Cache file {self.cache_file} does not exist") return local_files, [], [] @property def cache_needs_updating(self) -> set: lf, cache, cf = self.read_cache() files = set(lf) - set(cf) return files def _remote_path(self, fname) -> str: return os.path.join(self.remote_dir, os.path.basename(fname)) def _local_path(self, fname: str) -> str: return os.path.join(self.local_dir, os.path.basename(fname)) def stop_update(self) -> None: self.updating_ev.clear() def shutdown(self) -> None: self.stop_update() if self.update_thread is not None: self.update_thread.join() def check_and_fix_cache(self) -> None: self.logger.debug("Checking existing cache") local_files, cache, remote_files = self.read_cache() self.logger.debug(f"We have {len(local_files)} pdf files, {len(cache)} entries in cache" + f" and {len(remote_files)} remote files") deleted_links = [c for c in cache if not(os.path.exists(c.split(";")[0]))] deleted_files = [c.split(";")[0] for c in deleted_links] if deleted_links: self.logger.info(f"Files {deleted_files} not on disk. Removing from cache.") # FIXME: This removes from cache but may still exist on remote for dl in deleted_links: cache.remove(dl) with open(self.cache_file, "w") as f: f.write("\n".join(cache)) else: self.logger.debug("No deleted links") broken_links = [c.split(";")[0] for c in cache if c.split(";")[1] == ""] if broken_links: self.logger.debug(f"Found {len(broken_links)} broken links. Updating") self.update_thread = Thread(target=self.update_cache_helper, args=[broken_links]) self.update_thread.start() else: self.logger.debug("No broken links") def copy_file(self, fname: str) -> bool: local_path = self._local_path(fname) try: p = Popen(f"rclone --no-update-modtime -v copy {local_path} {self.remote_dir}", shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate(timeout=10) err = err.decode("utf-8").lower() # type: ignore if err and ("copied" in err or "transferred" in err): # type: ignore self.logger.debug(f"Copied file {local_path} to remote") status = True else: status = False except TimeoutExpired: self.logger.warning(f"Timeout while copying for file {local_path}") status = False return status def try_get_link(self, remote_path: str) -> Tuple[bool, str]: """Try and fetch a shareable link for an :code:`rclone` remote_path. :code:`rclone` remote paths are prepended with a remote :code:`name` so the path is :code:`name:path`. Depending on the remote the status and error messages may differ. Currently, these messages are in :code:`gdrive` format. Args: remote_path: Remote path for which to fetch the link """ # NOTE: # I had tried MS's `onedrive` once, but there were far too many errors and # timeouts while fetching the links. Perhaps it's a rate limiting issues self.logger.debug(f"Fetching link for {remote_path}") try: p = Popen(f"rclone -v link {remote_path}", shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate(timeout=10) if err: if "error 403" in err.decode().lower() or\ "object not found" in err.decode().lower(): status = False link = "NOT_PRESENT" else: status = False link = "OTHER_ERROR. {err.decode('utf-8')}" else: link = out.decode("utf-8").replace("\n", "") if link: status = True else: status = False link = "EMPTY_RESPONSE" except TimeoutExpired: self.logger.warning(f"Timeout while getting link for file {remote_path}") link = "TIMEOUT" status = False return status, link def get_link(self, fname: str, cache: Dict[str, str], warnings: List[str]) -> None: """Get a link for an file name. Copy the file to the remote path if it doesn't exist there. Args: fname: Local filename for which to fetch the link cache: Cache where it's checked and stored warnings: A shared variable where warnings are appened if any occur """ try: start = time.time() remote_path = self._remote_path(fname) if " " in remote_path: remote_path = f'"{remote_path}"' status, link = self.try_get_link(remote_path) if not status: if link == "NOT_PRESENT": self.logger.warning(f"File {fname} does not exist on remote. Copying") status = self.copy_file(fname) if status: status, link = self.try_get_link(remote_path) else: raise ValueError(f"Error {link} for {remote_path}") duration = time.time() - start if not status: warnings.append(f"{fname}") self.logger.error(f"Error occurred for file {fname} {link}") else: self.logger.debug(f"got link {link} for file {fname} in {duration} seconds") cache[fname] = link except Exception as e: self.logger.error(f"Error occured for file {fname} {e}") def update_cache(self) -> None: """Update the local cache For each file on the local machine fetch a shareable link from the remote dir. """ if not self.updating: self.update_thread = Thread(target=self.update_cache_helper) self.update_thread.start() else: self.logger.error("We are still updating") def sync_from_remote(self) -> None: self.logger.debug(f"Syncing remote {self.remote_dir} to {self.local_dir}") try: p = Popen(f"rclone -v sync {self.remote_dir} {self.local_dir}", shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception as e: self.logger.error(f"Error occured {e}") def copy_from_remote(self) -> None: self.logger.debug(f"Syncing remote {self.remote_dir} to {self.local_dir}") try: p = Popen(f"rclone -v copy --no-update-modtime {self.remote_dir} {self.local_dir}", shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception as e: self.logger.error(f"Error occured {e}") def update_cache_helper(self, fix_files: List[str] = []) -> None: if not self.updating_ev.is_set(): self.updating_ev.set() if self.success_ev.is_set(): self.success_ev.clear() if self.success_with_errors_ev.is_set(): self.success_with_errors_ev.clear() self.logger.info(f"Updating local cache {self.cache_file}") try: warnings: List[str] = [] local_files, cache, remote_files = self.read_cache() files = list(set(local_files) - set(remote_files)) if fix_files: for f in fix_files: if f in cache: cache.remove(f) files = fix_files init_cache_size = len(cache) cache_dict: Dict[str, str] = dict(c.split(";") for c in cache) # type: ignore self.logger.info(f"Will try to fetch links for {len(files)} files") for f in files: if not self.updating_ev.is_set(): break self.get_link(f, cache_dict, warnings) self.logger.info(f"Writing {len(cache_dict) - init_cache_size} links to {self.cache_file}") shutil.copyfile(str(self.cache_file), str(self.cache_file) + ".bak") with open(self.cache_file, "w") as cf: write_list = [";".join(c) for c in cache_dict.items()] cf.write("\n".join(write_list)) self.updating_ev.clear() if warnings: self.success_with_errors_ev.set() else: self.success_ev.set() except Exception as e: self.updating_ev.clear() self.logger.error(f"Error {e} while updating cache") self.logger.error(f"Overwritten {self.cache_file}.\n" + f"Original file backed up to {self.cache_file}.bak")
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/cache.py
0.703957
0.203965
cache.py
pypi
from typing import Dict, List, Optional import json import requests from queue import Queue from functools import partial from bs4 import BeautifulSoup from .q_helper import q_helper # TODO: There should be a cache of entries def dict_to_bibtex(bib_dict: Dict[str, str], json_out: bool = False) -> Optional[str]: temp = bib_dict.copy() if "author" in temp: k = "author" elif "authors" in temp: k = "authors" else: return None if isinstance(temp[k], str): temp[k].split(" ")[-1].lower() + temp["year"] +\ temp["title"].split(" ")[0].lower() else: key = temp[k][0].split(" ")[-1].lower() + temp["year"] +\ temp["title"].split(" ")[0].lower() bib = "@" + temp.pop("type") + "{" + key + "\n" for k, v in temp.items(): if k in {"author", "authors"}: if isinstance(v, list): authors = [", ".join([_.split(" ")[-1], " ".join(_.split(" ")[:-1])]) for _ in v] bib += " author" + "={" + " and ".join(authors) + "},\n" elif isinstance(v, str): bib += " author" + "={" + v + "},\n" else: bib += " " + k + "={" + v + "},\n" bib = bib[:-2] bib += "\n}" if json_out: return json.dumps(bib) else: return bib def arxiv_get(arxiv_id: str) -> str: """Fetch details of article with arxiv_id from arxiv api. Args: arxiv_id: The Arxiv ID of the article """ response = requests.get(f"http://export.arxiv.org/api/query?id_list={arxiv_id}") soup = BeautifulSoup(response.content, features="xml") entry = soup.find("entry") abstract = entry.find("summary").text title = entry.find("title").text authors = [a.text for a in entry.find_all("author")] date = entry.find("published").text bib_dict = {"abstract": abstract.replace("\n", " ").strip(), "title": title, "authors": [a.replace("\n", " ").strip() for a in authors], "year": date[:4], "url": f"https://arxiv.org/abs/{arxiv_id}", "type": "article"} if bib_dict: return dict_to_bibtex(bib_dict, True) or "" else: return json.dumps("ERROR RETRIEVING") def _arxiv_success(query: str, response: requests.Response, content: Dict[str, str]) -> None: soup = BeautifulSoup(response.content, features="xml") entry = soup.find("entry") abstract = entry.find("summary").text title = entry.find("title").text authors = [a.text for a in entry.find_all("author")] date = entry.find("published").text bib_dict = {"abstract": abstract.replace("\n", " ").strip(), "title": title, "authors": [a.replace("\n", " ").strip() for a in authors], "year": date[:4], "url": f"https://arxiv.org/abs/{query}", "type": "misc"} content[query] = dict_to_bibtex(bib_dict) or "" # FIXME: content has mixed type Dict[str, List[str]] and Dict[str, str] def _arxiv_no_result(query: str, response: requests.Response, content: Dict[str, List[str]]) -> None: content[query] = ["NO_RESULT"] def _arxiv_error(query: str, response: requests.Response, content: Dict[str, List[str]]) -> None: content[query] = ["ERROR"] def arxiv_fetch(arxiv_id: str, q: Queue, ret_type: str = "json", verbose: bool = False): if verbose: print(f"Fetching for arxiv_id {arxiv_id}\n") if ret_type == "json": response = requests.get("http://export.arxiv.org/api/query?id_list={arxiv_id}") q.put((arxiv_id, response)) else: q.put((arxiv_id, "INVALID")) arxiv_helper = partial(q_helper, _arxiv_success, _arxiv_no_result, _arxiv_error)
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/arxiv.py
0.494141
0.198316
arxiv.py
pypi
from typing import List, Dict, Optional, Tuple, Union import os import re import operator from pathlib import Path import bs4 from bs4 import BeautifulSoup import requests class CVF: def __init__(self, files_dir, logger): """A CVF class to manage CVF links. Caches and manages links from open access CVF paper repositories so as not to burden it with excessive requests and save time. The downloaded html pages are kept in a :code:`files_dir` and pdf links extracted for easy search. Args: files_dir: Files directory. This is where all the files would be kept logger: A :class:`logging.Logger` instance """ self.files_dir = files_dir self.logger = logger self.cvf_url_root = "https://openaccess.thecvf.com" self.soups: Dict[Tuple, bs4.Tag] = {} self.cvf_pdf_links: Dict[Tuple, List[str]] = {} self._requests_timeout = 5 # self.load_cvf_files() self.load_cvf_pdf_links() def _get(self, url: str, **kwargs) -> requests.Response: """Get a :code:`url` with sensible defaults Args: url: The url to fetch kwargs: kwargs to pass on to :meth:`requests.get` """ return requests.get(url, timeout=self._requests_timeout, **kwargs) def get_pdf_link(self, title: str, venue: str, year: Optional[str]) -> Optional[str]: """Get a pdf link if it exists from the CVF files. The links are searched with given :code:`title`, :code:`venue` and :code:`year` Although :code:`year` is optional, it's better to give it for faster search. Args: title: Title of the paper venue: Venue where it appeared year: Optional year Returns: A string of title;url if found, otherwise None """ venue = venue.lower() if year: keys = [(v, y) for v, y in self.cvf_pdf_links if v == venue and y == year] else: keys = [(v, y) for v, y in self.cvf_pdf_links if v == venue] year = ",".join([y for v, y in self.soups if v == venue]) if not keys and year: self.logger.debug(f"Fetching page(s) for {venue.upper()}{year}") self.download_cvf_page_and_update_soups(venue, year) self.save_cvf_pdf_links_and_update(venue, year, self.soups[(venue, year)]) keys = [(v, y) for v, y in self.soups if v == venue and y == year] # maybe_link = self.find_soup(keys, title) return self.find_pdf_link(keys, title) def read_cvf_pdf_links(self, venue: str, year: str) -> List[str]: fname = self.files_dir.joinpath(f"{venue.upper()}{year}_pdfs") with open(fname) as f: pdf_links = f.read().split("\n") return pdf_links def load_cvf_files(self): """Load the CVF Soups from HTML files. XML parses via :class:`BeautifulSoup` are maintained for easy fetching of an article in case it's availble. """ self.cvf_files = [os.path.join(self.files_dir, f) for f in os.listdir(self.files_dir) if re.match(r'^(cvpr|iccv)', f.lower()) and not f.endswith("_pdfs")] self.logger.debug("Loading CVF soups.") for cvf in self.cvf_files: if not cvf.endswith("_pdfs"): match = re.match(r'^(cvpr|iccv)(.*?)([0-9]+)', Path(cvf).name, flags=re.IGNORECASE) if match: venue, _, year = map(str.lower, match.groups()) with open(cvf) as f: self.soups[(venue, year)] = BeautifulSoup(f.read(), features="lxml") else: self.logger.error(f"Could not load file {cvf}") self.logger.debug(f"Loaded conference files {self.soups.keys()}") def load_cvf_pdf_links(self): """Load the CVF PDF links saved from HTML files. """ self.cvf_pdf_link_files = [os.path.join(self.files_dir, f) for f in os.listdir(self.files_dir) if re.match(r'^(cvpr|iccv)', f.lower()) and f.endswith("_pdfs")] self.logger.debug("Loading CVF pdf links.") for fname in self.cvf_pdf_link_files: match = re.match(r'^(cvpr|iccv)(.*?)([0-9]+)', Path(fname).name, flags=re.IGNORECASE) if match: venue, _, year = map(str.lower, match.groups()) with open(fname) as f: self.cvf_pdf_links[(venue, year)] = f.read().split("\n") else: self.logger.error(f"Could not load pdf links from {fname}") self.logger.debug(f"Loaded PDF links {self.cvf_pdf_links.keys()}") def best_match(self, title: str, matches: List) -> str: """Subroutine for finding the best match from regexp matches The match with the longest regexp match span is returned Args: title: Title to match matches: List of regexp matches """ if not matches: return f"URL Not found for {title}" elif len(matches) == 1: href = matches[0].group(0) else: matches.sort(key=lambda x: operator.abs(operator.sub(*x.span()))) href = matches[-1].group(0) href = "https://openaccess.thecvf.com/" + href.lstrip("/") return f"{title};{href}" def find_pdf_link_from_soups(self, keys: List[Tuple[str, str]], title: str) -> Optional[str]: """Find a possible pdf link from soups for a given title and list of keys The keys correspond to the (venue, year) combination. The match is found by a greedy regexp match with first three tokens split on " ". The match with the longest span is returned. Args: keys: A list of (venue, year) tuples title: The title to match """ links = [] for k in keys: links.extend(self.soups[k].find_all("a")) if links: regexp = ".*" + ".*".join([*filter(None, title.split(" "))][:3]) + ".*\\.pdf$" matches = [*filter(None, map(lambda x: re.match(regexp, x["href"], flags=re.IGNORECASE) if "href" in x.attrs else None, links))] return self.best_match(title, matches) else: return None def find_pdf_link(self, keys: List[Tuple[str, str]], title: str) -> Optional[str]: """Find link from a list of pdf links for given keys and title. Similar to :meth:`find_pdf_link_from_soups` but it searches in already filtered pdf links. Args: keys: A list of (venue, year) tuples title: The title to match """ links = [] for k in keys: links.extend(self.cvf_pdf_links[k]) if links: regexp = ".*" + ".*".join([*filter(None, title.split(" "))][:3]) + ".*\\.pdf$" matches = [*filter(None, map(lambda x: re.match(regexp, x, flags=re.IGNORECASE), links))] return self.best_match(title, matches) else: return None def maybe_download_cvf_day_pages(self, response: requests.Response, venue: str, year: str): """Maybe download the pages for each day of the conference The CVF links pages are sometimes split into days. We download all of the day pages and concatenate them into one big html for easy parsing. Args: response: An instance of :class:`requests.Response` venue: The venue year: The year """ soup = BeautifulSoup(response.content, features="lxml") links = soup.find_all("a") regexp = f"(/)?({venue.upper()}{year}(.py)?).+" last_link_attrs = links[-1].attrs venue_match = re.match(regexp, last_link_attrs['href']) venue_group = venue_match and venue_match.group(2) if "href" in last_link_attrs and venue_group: day_links = [*filter(lambda x: re.match(r"Day [0-9]+?: ([0-9-+])", x.text), soup.find_all("a"))] content = [] for i, dl in enumerate(day_links): maybe_matches = re.match(r"Day [0-9]+?: ([0-9-]+)", dl.text) if maybe_matches: day = maybe_matches.groups()[0] else: raise AttributeError(f"Could not find day {dl.text} in day links {day_links}") d_url = f"{self.cvf_url_root}/{venue_group}?day={day}" resp = self._get(d_url) if not resp.ok: err = f"Status code {response.status_code} for {d_url}" raise requests.HTTPError(err) content.append(resp.content) self.logger.debug(f"Fetched page {i+1} for {venue.upper()}{year} and {day}") soup_content = BeautifulSoup("") for c in content: soup_content.extend(BeautifulSoup(c, features="lxml").html) return soup_content.decode() self.logger.debug(f"Fetched page for {venue.upper()}{year}") return response.content.decode() def download_cvf_page_and_update_soups(self, venue, year): """Download a CVF page and update soups If required, pages for each day of the conference are downloaded and concatenated. Args: venue: Venue of conference year: Year of conference """ url = f"{self.cvf_url_root}/{venue.upper()}{year}" response = self._get(url) if response.ok: content = self.maybe_download_cvf_day_pages(response, venue, year) else: err = f"Status code {response.status_code} for {url}" raise requests.HTTPError(err) fname = self.files_dir.joinpath(f"{venue.upper()}{year}") with open(fname, "w") as f: f.write(content) with open(fname) as f: self.soups[(venue.lower(), year)] = BeautifulSoup(content, features="lxml") def save_cvf_pdf_links_and_update(self, venue: str, year: str, soup) -> None: """Save the pdf links from parsed html and update cvf_pdf_links Args: venue: Venue of conference year: Year of conference soup: Html parsed as :class:`BeautifulSoup` """ links = soup.find_all("a") pdf_links = [x["href"] for x in links if "href" in x.attrs and x["href"].endswith(".pdf")] fname = self.files_dir.joinpath(f"{venue.upper()}{year}_pdfs") with open(fname, "w") as f: f.write("\n".join(pdf_links)) self.cvf_pdf_links[(venue, year)] = pdf_links
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/cvf.py
0.830181
0.271813
cvf.py
pypi
from typing import List, Dict, Optional, Union, Tuple, Any, Callable import os import json import requests from pathlib import Path Cache = Dict[str, Dict[str, str]] assoc = [(x, i) for i, x in enumerate(["acl", "arxiv", "corpus", "doi"])] class FilesCache: """A files based Cache for Semantic Scholar data. The cache is a Dictionary of type :code:`Cache` where they keys are one of `["acl", "arxiv", "corpus", "doi"]` and values are a dictionary of that id type and the associated ss_id. Each ss_id is stored as a file with the same name as the ss_id and contains the data for the entry in JSON format. NOTE: This class is now deprecated and superceded by :class:`SemanticScholar` which ties includes the Semantic Scholar api and the cache with the new graph API. Args: root: root directory where all the metadata and the files data will be kept """ def __init__(self, root: Path): if not root.exists(): raise FileExistsError(f"{root} doesn't exist") self._root = root self._cache: Cache = {} self._rev_cache: Dict[str, List[str]] = {} self._files: List[str] = [*filter(lambda x: not x.endswith("~") and not x == "metadata", os.listdir(self._root))] def load(self): """Load the Semantic Scholar metadata from the disk. The cache is indexed as a file in :code:`metadata` and the file data itself is named as the Semantic Scholar :code:`corpusId` for the paper. We load metadata on startup and fetch the rest as needed. Args: data_dir: Directory where the cache is located """ with open(os.path.join(self._root, "metadata")) as f: _cache = [*filter(None, f.read().split("\n"))] self._cache = {"acl": {}, "doi": {}, "arxiv": {}, "corpus": {}} self._rev_cache = {} dups = False for _ in _cache: c = _.split(",") if c[-1] in self._rev_cache: dups = True self._rev_cache[c[-1]] = [x or y for x, y in zip(self._rev_cache[c[-1]], c[:-1])] else: self._rev_cache[c[-1]] = c[:-1] for key, ind in assoc: if c[ind]: self._cache[key][c[ind]] = c[-1] print(f"Loaded SS cache {len(self._rev_cache)} entries and " + f"{sum(len(x) for x in self._cache.values())} keys.") if dups: print("There were duplicates. Writing new metadata") self._dump_metadata() def _dump_metadata(self): """Dump metadata to disk""" with open(self._root.joinpath("metadata"), "w") as f: f.write("\n".join([",".join([*v, k]) for k, v in self._rev_cache.items()])) print("Dumped metadata") def put(self, acl_id: str, data: Dict[str, str]): """Update entry, save paper data and Semantic Scholar cache to disk. We read and write data for individual papers instead of one big json object. Args: data: data for the paper acl_id: Optional ACL Id for the paper """ with open(os.path.join(self._root, data["paperId"]), "w") as f: json.dump(data, f) c = [acl_id if acl_id else "", data["arxivId"] if data["arxivId"] else "", str(data["corpusId"]), data["doi"] if data["doi"] else "", data["paperId"]] for key, ind in assoc: if c[ind]: self._cache[key][c[ind]] = c[-1] existing = self._rev_cache.get(c[-1], None) if existing: self._rev_cache[c[-1]] = [x or y for x, y in zip(self._rev_cache[c[-1]], c[:-1])] else: self._rev_cache[c[-1]] = c[:-1] with open(os.path.join(self._root, "metadata"), "a") as f: f.write("\n" + ",".join([*self._rev_cache[c[-1]], c[-1]])) print("Updated metadata") def get(self, id_type: str, ID: str, force: bool) -> Union[str, bytes]: """Get semantic scholar paper details The Semantic Scholar cache is checked first and if it's a miss then the details are fetched from the server and stored in the cache. Args: id_type: type of the paper identifier one of `['ss', 'doi', 'mag', 'arxiv', 'acl', 'pubmed', 'corpus']` ID: paper identifier force: Force fetch from Semantic Scholar server, ignoring cache """ urls = {"ss": f"https://api.semanticscholar.org/v1/paper/{ID}", "doi": f"https://api.semanticscholar.org/v1/paper/{ID}", "mag": f"https://api.semanticscholar.org/v1/paper/MAG:{ID}", "arxiv": f"https://api.semanticscholar.org/v1/paper/arXiv:{ID}", "acl": f"https://api.semanticscholar.org/v1/paper/ACL:{ID}", "pubmed": f"https://api.semanticscholar.org/v1/paper/PMID:{ID}", "corpus": f"https://api.semanticscholar.org/v1/paper/CorpusID:{ID}"} if id_type not in urls: return json.dumps("INVALID ID TYPE") elif id_type == "ss": ssid: Optional[str] = ID elif id_type in {"doi", "acl", "arxiv", "corpus"}: ssid = self._cache[id_type].get(ID, None) if not ssid or force: fetch_from_disk = False else: fetch_from_disk = True return self.fetch(fetch_from_disk, ssid, urls, id_type, ID, force) def fetch(self, fetch_from_disk: bool, ssid: Optional[str], urls: Dict[str, str], id_type: str, ID: str, force: bool): """Subroutine to fetch from either disk or Semantic Scholar. Args: fetch_from_disk: Fetch from disk if True ssid: Optional Semantic Scholar ID urls: A dictionary of urls for each ID type id_type: type of the paper identifier one of `['ss', 'doi', 'mag', 'arxiv', 'acl', 'pubmed', 'corpus']` ID: paper identifier force: Force fetch from Semantic Scholar server if True, ignoring cache """ if fetch_from_disk and ssid: print(f"Fetching {ssid} from disk") data_file = self._root.joinpath(ssid) if data_file.exists(): with open(data_file, "rb") as f: data = f.read() return data else: print(f"File for {ssid} not present on disk. Will fetch.") url = f"https://api.semanticscholar.org/v1/paper/{ssid}" return self.fetch_from_ss(url) else: acl_id = "" if id_type == "acl": acl_id = ID url = urls[id_type] + "?include_unknown_references=true" if force and ssid: print(f"Forced Fetching for {ssid}") else: print(f"Data not in cache for {id_type}, {ID}. Fetching") return self.fetch_from_ss(url, acl_id) def fetch_from_ss(self, url, acl_id=""): """Fetch paper data from SS for url. Args: url: Full url of paper data """ response = requests.get(url) if response.status_code == 200: self.put(acl_id, json.loads(response.content)) return response.content # already JSON else: print(f"Server error. Could not fetch") return json.dumps(None)
/ref-man-py-0.7.1.tar.gz/ref-man-py-0.7.1/ref_man_py/files_cache.py
0.853303
0.423577
files_cache.py
pypi
import abc import numpy as np import tensorflow as tf from refactored_DC.settings import CLASSIFICATION_KEY, REGRESSION_KEY, EPSILON class DataLoader(object, metaclass=abc.ABCMeta): @abc.abstractmethod def next_batch(self, batch_size): raise NotImplementedError('users must define next_batch to create a data loader') @abc.abstractmethod def get_epochs(self, batch_size): raise NotImplementedError('users must define get_epochs to create a data loader') @abc.abstractmethod def reset_cursor(self, batch_size): raise NotImplementedError('users must define reset_cursor to create a data loader') class AugmentedDataLoader(object, metaclass=abc.ABCMeta): @abc.abstractmethod def next_batch(self, batch_size): raise NotImplementedError('users must define next_batch to create an augmented data loader') @abc.abstractmethod def activate_augmentation(self): raise NotImplementedError('users must define activate_augmentation to create an augmented data loader') @abc.abstractmethod def deactivate_augmentation(self): raise NotImplementedError('users must define activate_augmentation to create an augmented data loader') @abc.abstractmethod def get_epochs(self, batch_size): raise NotImplementedError('users must define get_epochs to create a data loader') @abc.abstractmethod def reset_cursor(self, batch_size): raise NotImplementedError('users must define reset_cursor to create a data loader') class DataLoaderFromArrays(DataLoader): def __init__(self, features, targets, homogeneous=True, problem_type=CLASSIFICATION_KEY, shuffle=True, one_hot=True, normalization=True, target_scaling=True, target_range=[0.0,1.0]): self.curr_pos = 0 self.target_divider = None self.rows_count = features.shape[0] self.homogeneous = homogeneous self.to_shuffle = shuffle self.problem_type = problem_type self.normalization = normalization self.target_scaling = target_scaling self.target_range = target_range self.one_hot = one_hot self.features = self.normalize(features) self.targets = self.preprocess(targets) def next_batch(self, batch_size): if self.curr_pos+batch_size >= self.rows_count: batch = self.features[self.curr_pos:self.rows_count], self.targets[self.curr_pos:self.rows_count] self.curr_pos = 0 if self.to_shuffle: self.shuffle() return batch batch = self.features[self.curr_pos:self.curr_pos+batch_size], self.targets[self.curr_pos:self.curr_pos+batch_size] self.curr_pos += batch_size return batch def shuffle(self): indices = np.arange(0, self.rows_count) np.random.shuffle(indices) self.features = self.features[indices] self.targets = self.targets[indices] def deprocess(self, perf): if self.problem_type == REGRESSION_KEY and self.target_scaling: deprocessed_perf = (perf/(self.target_range[1]-self.target_range[0]))*self.target_divider return deprocessed_perf else: return perf def preprocess(self, targets): if self.problem_type == REGRESSION_KEY and self.target_scaling: mi = targets.min(axis=0) divider = targets.max(axis=0) - mi if isinstance(divider, np.ndarray): divider[divider==0.0] = EPSILON else: divider = EPSILON if divider==0.0 else divider targets = self.target_range[0] + np.float32((targets-mi)/divider)*(self.target_range[1]-self.target_range[0]) self.target_divider = divider elif self.one_hot: onehot_targets = np.zeros((self.rows_count, targets.max()+1)) onehot_targets[np.arange(self.rows_count),targets] = 1 targets = onehot_targets return targets def normalize(self, data): if not self.normalization: return data if self.homogeneous: mi = data.min() divider = data.max() - mi divider = EPSILON if divider==0.0 else divider else: mi = data.min(axis=0) divider = data.max(axis=0) - mi if isinstance(divider, np.ndarray): divider[divider==0.0] = EPSILON else: divider = EPSILON if divider==0.0 else divider return np.float32((data-mi)/divider) def get_epochs(self, batch_size): if self.rows_count % batch_size != 0: return self.rows_count // batch_size + 1 else: return self.rows_count // batch_size def reset_cursor(self): self.curr_pos = 0
/refactored_DC-0.0.4.tar.gz/refactored_DC-0.0.4/refactored_DC/data.py
0.756717
0.287974
data.py
pypi
import scipy import numpy as np from itertools import groupby from functools import reduce from operator import itemgetter from scipy.stats import mannwhitneyu def are_significantly_different(sample_1, sample_2, alpha = 0.05): stat, p = mannwhitneyu(sample_1, sample_2) return p <= alpha def empirical_f_test(data, ref_std, F_critical = 1.5): var_1 = np.std(data)**2 var_2 = (ref_std)**2 F = var_1 / var_2 if var_1 > var_2 else var_2 / var_1 return F, F <= F_critical def pure_f_test(data, ref_std, alpha=0.1): def _F_critical(alpha): #http://socr.ucla.edu/Applets.dir/F_Table.html if alpha == 0.1: return 2.70554 elif alpha == 0.05: return 3.8415 elif alpha == 0.025: return 5.0239 elif alpha == 0.01: return 6.635 var_1 = np.std(data)**2 var_2 = (ref_std)**2 F = var_1 / var_2 if var_1 > var_2 else var_2 / var_1 return F, F <= _F_critical(alpha) def smoothness(data): data_size = len(data) if data_size < 1: return 1.0 ratios = (data[1:]/data[:-1]) rate_changes = np.abs(np.diff(ratios > 1.)) rate_changes_count = np.count_nonzero(rate_changes) smoothness = (data_size-rate_changes_count)/data_size return smoothness def compute_ro_B(activations, min_out, max_out, bins_count): bin_size = (max_out-min_out)/bins_count bins = np.arange(min_out, max_out, bin_size).tolist() divided_values = np.digitize(activations, bins) data = [(neu_act,bin_v) for neu_act,bin_v in zip(divided_values,activations)] data = list(zip(divided_values,activations)) grouped_data = [list(map(lambda x:x[1], group)) for _, group in groupby(sorted(data), key=itemgetter(0))] f_g = [(len(values),np.mean(values)) for values in grouped_data] f_g_prime = np.array([(f_b,np.abs(2*(g_b-min_out)/(max_out-min_out)-1)*f_b) for f_b,g_b in f_g]) return f_g_prime[:,1].sum() / f_g_prime[:,0].sum() # Copyright 2018 Google Inc. # https://github.com/google-research/google-research/blob/master/representation_similarity/Demo.ipynb def gram_linear(x): """Compute Gram (kernel) matrix for a linear kernel. Args: x: A num_examples x num_features matrix of features. Returns: A num_examples x num_examples Gram matrix of examples. """ return x.dot(x.T) def gram_rbf(x, threshold=1.0): """Compute Gram (kernel) matrix for an RBF kernel. Args: x: A num_examples x num_features matrix of features. threshold: Fraction of median Euclidean distance to use as RBF kernel bandwidth. (This is the heuristic we use in the paper. There are other possible ways to set the bandwidth; we didn't try them.) Returns: A num_examples x num_examples Gram matrix of examples. """ dot_products = x.dot(x.T) sq_norms = np.diag(dot_products) sq_distances = -2 * dot_products + sq_norms[:, None] + sq_norms[None, :] sq_median_distance = np.median(sq_distances) return np.exp(-sq_distances / (2 * threshold ** 2 * sq_median_distance)) def center_gram(gram, unbiased=False): """Center a symmetric Gram matrix. This is equvialent to centering the (possibly infinite-dimensional) features induced by the kernel before computing the Gram matrix. Args: gram: A num_examples x num_examples symmetric matrix. unbiased: Whether to adjust the Gram matrix in order to compute an unbiased estimate of HSIC. Note that this estimator may be negative. Returns: A symmetric matrix with centered columns and rows. """ if not np.allclose(gram, gram.T): raise ValueError('Input must be a symmetric matrix.') gram = gram.copy() if unbiased: # This formulation of the U-statistic, from Szekely, G. J., & Rizzo, M. # L. (2014). Partial distance correlation with methods for dissimilarities. # The Annals of Statistics, 42(6), 2382-2412, seems to be more numerically # stable than the alternative from Song et al. (2007). n = gram.shape[0] np.fill_diagonal(gram, 0) means = np.sum(gram, 0, dtype=np.float64) / (n - 2) means -= np.sum(means) / (2 * (n - 1)) gram -= means[:, None] gram -= means[None, :] np.fill_diagonal(gram, 0) else: means = np.mean(gram, 0, dtype=np.float64) means -= np.mean(means) / 2 gram -= means[:, None] gram -= means[None, :] return gram def cka(gram_x, gram_y, debiased=False): """Compute CKA. Args: gram_x: A num_examples x num_examples Gram matrix. gram_y: A num_examples x num_examples Gram matrix. debiased: Use unbiased estimator of HSIC. CKA may still be biased. Returns: The value of CKA between X and Y. """ gram_x = center_gram(gram_x, unbiased=debiased) gram_y = center_gram(gram_y, unbiased=debiased) # Note: To obtain HSIC, this should be divided by (n-1)**2 (biased variant) or # n*(n-3) (unbiased variant), but this cancels for CKA. scaled_hsic = gram_x.ravel().dot(gram_y.ravel()) normalization_x = np.linalg.norm(gram_x) normalization_y = np.linalg.norm(gram_y) return scaled_hsic / (normalization_x * normalization_y) def _debiased_dot_product_similarity_helper( xty, sum_squared_rows_x, sum_squared_rows_y, squared_norm_x, squared_norm_y, n): """Helper for computing debiased dot product similarity (i.e. linear HSIC).""" # This formula can be derived by manipulating the unbiased estimator from # Song et al. (2007). return ( xty - n / (n - 2.) * sum_squared_rows_x.dot(sum_squared_rows_y) + squared_norm_x * squared_norm_y / ((n - 1) * (n - 2))) def feature_space_linear_cka(features_x, features_y, debiased=False): """Compute CKA with a linear kernel, in feature space. This is typically faster than computing the Gram matrix when there are fewer features than examples. Args: features_x: A num_examples x num_features matrix of features. features_y: A num_examples x num_features matrix of features. debiased: Use unbiased estimator of dot product similarity. CKA may still be biased. Note that this estimator may be negative. Returns: The value of CKA between X and Y. """ features_x = features_x - np.mean(features_x, 0, keepdims=True) features_y = features_y - np.mean(features_y, 0, keepdims=True) dot_product_similarity = np.linalg.norm(features_x.T.dot(features_y)) ** 2 normalization_x = np.linalg.norm(features_x.T.dot(features_x)) normalization_y = np.linalg.norm(features_y.T.dot(features_y)) if debiased: n = features_x.shape[0] # Equivalent to np.sum(features_x ** 2, 1) but avoids an intermediate array. sum_squared_rows_x = np.einsum('ij,ij->i', features_x, features_x) sum_squared_rows_y = np.einsum('ij,ij->i', features_y, features_y) squared_norm_x = np.sum(sum_squared_rows_x) squared_norm_y = np.sum(sum_squared_rows_y) dot_product_similarity = _debiased_dot_product_similarity_helper( dot_product_similarity, sum_squared_rows_x, sum_squared_rows_y, squared_norm_x, squared_norm_y, n) normalization_x = np.sqrt(_debiased_dot_product_similarity_helper( normalization_x ** 2, sum_squared_rows_x, sum_squared_rows_x, squared_norm_x, squared_norm_x, n)) normalization_y = np.sqrt(_debiased_dot_product_similarity_helper( normalization_y ** 2, sum_squared_rows_y, sum_squared_rows_y, squared_norm_y, squared_norm_y, n)) if (normalization_x * normalization_y) == 0.: return 0. return dot_product_similarity / (normalization_x * normalization_y)
/refactored_DC-0.0.4.tar.gz/refactored_DC-0.0.4/refactored_DC/metrics.py
0.837885
0.42662
metrics.py
pypi
import collections import numpy as np import tensorflow as tf import scipy.stats as stats from tensorflow.python.training.basic_session_run_hooks import _as_graph_element from refactored_DC.settings import CLASSIFICATION_KEY, REGRESSION_KEY import refactored_DC.utils as utils class InputData: def __init__(self, data, problem_type): self.homogeneous = data.homogeneous self.n_repeats = 300 self.sample_size = 128 self.sampled_data = self.sampling_data(data.train_loader) self.problem_type = problem_type self.extract_meta_data() def sampling_data(self, data_loader): sampled_data = {} data_features, data_targets = data_loader.next_batch(self.n_repeats * self.sample_size) sampled_data['features'] = data_features sampled_data['targets'] = data_targets return sampled_data def get_sample(self, sample_size): features, targets = self.sampled_data['features'], self.sampled_data['targets'] if self.problem_type == CLASSIFICATION_KEY: N_labels = self.targets_metadata['labels'] if self.targets_metadata['onehot']: labels = np.argmax(targets, axis=1) else: labels = np.squeeze(targets) N_per_class = int(sample_size / N_labels) for label_idx in range(N_labels): lbl_indices = np.argwhere(labels == label_idx).flatten() selected_indices = np.random.choice(lbl_indices, N_per_class) if label_idx == 0: batch_x, batch_y = features[selected_indices], targets[selected_indices] else: batch_x = np.concatenate([batch_x, features[selected_indices]], axis=0) batch_y = np.concatenate([batch_y, targets[selected_indices]], axis=0) if len(batch_y.shape) == 1: batch_y = batch_y.reshape(batch_y.shape[0], 1) elif self.problem_type == REGRESSION_KEY: selected_indices = np.random.choice(np.arange(features.shape[0]), sample_size) batch_x, batch_y = features[selected_indices], targets[selected_indices] return batch_x, batch_y def extract_meta_data(self): if self.homogeneous: self.features_metadata = utils.reduce_data(self.sampled_data['features'], reductions=['mean', 'std', 'max', 'min']) else: self.features_metadata = utils.reduce_data(self.sampled_data['features'], reductions=['mean', 'std', 'max', 'min'], axis=0) targets = self.sampled_data['targets'] if self.problem_type == CLASSIFICATION_KEY: self.targets_metadata = {} if targets.shape[1] == 1: self.targets_metadata['count'] = 1 self.targets_metadata['labels'] = 2 self.targets_metadata['onehot'] = False labels_probas = np.zeros(2) labels_probas[0] = np.mean(1.0 - targets) labels_probas[1] = np.mean(targets) else: self.targets_metadata['count'] = targets.shape[1] self.targets_metadata['labels'] = targets.shape[1] self.targets_metadata['onehot'] = True labels_probas = np.zeros(targets.shape[1]) labels_probas = np.mean(targets, axis=0) perplexity = np.exp(stats.entropy(labels_probas)) self.targets_metadata['probas'] = labels_probas self.targets_metadata['balance'] = (perplexity - 1) / (self.targets_metadata['labels'] - 1) elif self.problem_type == REGRESSION_KEY: self.targets_metadata = utils.reduce_data(targets, reductions=['mean', 'std', 'max', 'min'], axis=0) self.targets_metadata['count'] = targets.shape[1] if len(targets.shape) == 2 else 1 self.targets_metadata['labels'] = None self.targets_metadata['max_abs_greater_than_one'] = np.abs(self.targets_metadata['max']) > 1.0 self.targets_metadata['can_be_negative'] = self.targets_metadata['min'] < 0 class DNNState: def __init__(self, model, buff_scale): self.model = model self.buff_scale = buff_scale self.loss_data = [] self.perf_data = [] self.weights_reductions = dict() def init_or_reset(self, batch_size, act_dict): self.acts_data = self.init_acts_tensors_and_data(batch_size, act_dict) self.weights_gradients = self.init_gradients_tensors() self.weights = dict() self.weights_reductions = {weight_name: collections.deque(maxlen=self.buff_scale) \ for weight_name in self.weights.keys()} self.gradients_reductions = {weight_name: collections.deque(maxlen=self.buff_scale) \ for weight_name in self.model.weights.keys()} self.biases_reductions = {} if self.model.biases: # In case, there is no bias self.biases_reductions = {bias_name: collections.deque(maxlen=self.buff_scale) \ for bias_name in self.model.biases.keys()} def init_acts_tensors_and_data(self, batch_size, act_dict): acts_data = {} for act_name, act_tensor in act_dict.items(): dims = [int(dim) for dim in act_tensor.get_shape()[1:]] buffer_size = self.buff_scale * batch_size acts_data[act_name] = np.zeros(shape=(buffer_size, *dims)) return acts_data def init_gradients_tensors(self): names = list(self.model.weights.keys()) tensors = list(self.model.weights.values()) gradients = tf.gradients(self.model.loss, tensors) return {wn: wg for wn, wg in list(zip(names, gradients))}
/refactored_DC-0.0.4.tar.gz/refactored_DC-0.0.4/refactored_DC/metadata.py
0.675551
0.293474
metadata.py
pypi
from abc import ABC, abstractmethod from enum import Enum from refal.constants import * class AST(object): def __init__(self, functions, is_file_type=False, default_functions=None): if not is_file_type: # Для доступности default-функций: if default_functions is None: default_functions = [Extern(name) for name in DEFAULT_FUNCTIONS] # default_functions = [] self.functions = [*default_functions, *functions] else: self.functions = functions def __str__(self): return str("\n".join(list(map(str, self.functions)))) def clone(self): return AST([function.clone() for function in self.functions if isinstance(function, Definition)], False, []) class Function(ABC): def __init__(self, name, pos): self.name = name self.pos = pos def __eq__(self, other): return isinstance(other, Function) and self.name == other.name @abstractmethod def __str__(self): return self.name @abstractmethod def clone(self): raise NotImplementedError("Can't clone abstract class") class Extern(Function): def __init__(self, name, pos=None): super(Extern, self).__init__(name, pos) def __eq__(self, other): return isinstance(other, Extern) and self.name == other.name def __str__(self): return "$EXTERN " + self.name def clone(self): return Extern(self.name, self.pos.clone()) class Definition(Function): def __init__(self, name, pos, is_entry=False, sentences=None): super(Definition, self).__init__(name, pos) if sentences is None: sentences = [] self.is_entry = is_entry self.sentences = sentences def __eq__(self, other): return isinstance(other, Definition) and self.name == other.name def __str__(self): return self.name + " {\n" + ";\n".join(list(map(str, self.sentences))) + ";\n}" def clone(self): return Definition(self.name, self.pos.clone(), self.is_entry, [sentences.clone() for sentences in self.sentences]) class DefinitionType(Function): def __init__(self, name, pattern, result, pos): super(DefinitionType, self).__init__(name, pos) self.pattern = pattern self.result = result def __eq__(self, other): return isinstance(other, DefinitionType) and self.name == other.name def __str__(self): return self.name + " " + str(self.pattern) + " = " + str(self.result) def clone(self): return DefinitionType(self.name, self.pattern.clone(), self.result.clone(), self.pos.clone()) class Sentence(object): def __init__(self, pattern, conditions, result, block, pos=None): self.pattern = pattern self.conditions = conditions self.result = result self.block = block self.pos = pos self.has_call = False self.no_substitution = False def __eq__(self, other): return isinstance(other, Sentence) \ and self.pattern == other.pattern \ and self.conditions == other.conditions \ and self.result == other.result \ and self.block == other.block def __str__(self): result_str = "\t" + str(self.pattern) if self.conditions: result_str += (", " + ",\t".join(list(map(str, self.conditions[::-1])))) if self.block: result_str += (", " + str(self.result)) if self.block: result_str += (" :\n\t{\n\t\t" + ";\n\t\t".join(list(map(str, self.block))) + ";\n\t}") if self.result is not None and not self.block: result_str += (" = " + str(self.result)) return result_str def clone(self): sentence_copy = Sentence(self.pattern.clone(), [condition.clone() for condition in self.conditions], self.result.clone(), [block.clone() for block in self.block], self.pos.clone()) sentence_copy.has_call = self.has_call sentence_copy.no_substitution = self.no_substitution return sentence_copy class Condition(object): def __init__(self, result, pattern): self.result = result self.pattern = pattern def __str__(self): return str(self.result) + " : " + str(self.pattern) def clone(self): return Condition(self.result.clone(), self.pattern.clone()) class Expression(object): def __init__(self, terms): self.terms = terms def __eq__(self, other): return isinstance(other, Expression) and self.terms == other.terms def __hash__(self): return hash(self.__str__()) def __str__(self): return " ".join(list(map(str, self.terms))) def clone(self): return Expression([term.clone() for term in self.terms]) class Term(ABC): def __init__(self, value): self.value = value @abstractmethod def __str__(self): return str(self.value) @abstractmethod def clone(self): raise NotImplementedError("Can't clone abstract class") class Char(Term): def __init__(self, value): super(Char, self).__init__(value) def __eq__(self, other): return isinstance(other, Char) and self.value == other.value def __str__(self): return super(Char, self).__str__() def clone(self): return Char(self.value) class Macrodigit(Term): def __init__(self, value): super(Macrodigit, self).__init__(value) def __eq__(self, other): return isinstance(other, Macrodigit) and self.value == other.value def __str__(self): return super(Macrodigit, self).__str__() def clone(self): return Macrodigit(self.value) class CompoundSymbol(Term): def __init__(self, value): super(CompoundSymbol, self).__init__(value) def __eq__(self, other): return isinstance(other, CompoundSymbol) and self.value == other.value def __str__(self): return super(CompoundSymbol, self).__str__() def clone(self): return CompoundSymbol(self.value) class StructuralBrackets(Term): def __init__(self, value): super(StructuralBrackets, self).__init__(value) def __eq__(self, other): return isinstance(other, StructuralBrackets) and self.value == other.value def __str__(self): return "(" + " ".join(list(map(str, self.value))) + ")" def clone(self): return StructuralBrackets([value.clone() for value in self.value]) class CallBrackets(Term): def __init__(self, func_name, pos, content): super(CallBrackets, self).__init__(func_name) self.pos = pos self.content = content def __str__(self): return "<" + self.value + " " + " ".join(list(map(str, self.content))) + ">" def clone(self): return CallBrackets(self.value, self.pos.clone(), [content.clone() for content in self.content]) class Type(Enum): s = 1 t = 2 e = 3 class Variable(Term): def __init__(self, value, type_variable, pos, index=-1, sentence_index=-1): super(Variable, self).__init__(value) self.type_variable = type_variable self.index = index self.pos = pos self.sentence_index = sentence_index def __eq__(self, other): return isinstance(other, Variable) and self.type_variable == other.type_variable and self.index == other.index def __str__(self): if self.type_variable == Type.s: return "s." + str(self.index) elif self.type_variable == Type.t: return "t." + str(self.index) elif self.type_variable == Type.e: return "e." + str(self.index) def clone(self): return Variable(self.value, self.type_variable, None if self.pos is None else self.pos.clone(), self.index, self.sentence_index)
/refalchecker-2.2.0rc1.tar.gz/refalchecker-2.2.0rc1/refal/ast.py
0.785432
0.264376
ast.py
pypi
from abc import ABC, abstractmethod from enum import Enum class DomainTag(Enum): Keyword = 1 Ident = 2 Number = 3 Variable = 4 Composite_symbol = 5 Characters = 6 Mark_sign = 7 Left_bracket = 8 Eop = 9 Unknown = 10 class Token(ABC): def __init__(self, tag, coords, value): self.tag = tag self.coords = coords self.value = value def __eq__(self, other): return self.tag == other.tag and self.value == other.value @abstractmethod def __str__(self): return str(self.coords) class KeywordToken(Token): def __init__(self, value, coords=None): super(KeywordToken, self).__init__(DomainTag.Keyword, coords, value) def __str__(self): return "Extern " + super(KeywordToken, self).__str__() + ": " + self.value class IdentToken(Token): def __init__(self, value, coords=None): super(IdentToken, self).__init__(DomainTag.Ident, coords, value) def __str__(self): return "Ident " + super(IdentToken, self).__str__() + ": " + self.value class VariableToken(Token): def __init__(self, value, coords=None): super(VariableToken, self).__init__(DomainTag.Variable, coords, value) def __str__(self): return "Variable " + super(VariableToken, self).__str__() + ": " + self.value class CompositeSymbolToken(Token): def __init__(self, value, coords=None): super(CompositeSymbolToken, self).__init__(DomainTag.Composite_symbol, coords, value) def __str__(self): return "CompositeSymbol " + super(CompositeSymbolToken, self).__str__() + ": " + self.value class CharacterToken(Token): def __init__(self, value, coords=None): super(CharacterToken, self).__init__(DomainTag.Characters, coords, value) def __str__(self): return "Characters " + super(CharacterToken, self).__str__() + ": " + self.value class MarkSignToken(Token): def __init__(self, value, coords=None): super(MarkSignToken, self).__init__(DomainTag.Mark_sign, coords, value) def __str__(self): return "Sign " + super(MarkSignToken, self).__str__() + ": " + self.value class LeftBracketToken(Token): def __init__(self, value, coords=None): super(LeftBracketToken, self).__init__(DomainTag.Left_bracket, coords, value) def __str__(self): return "Left Bracket: " + super(LeftBracketToken, self).__str__() + ": " + self.value class NumberToken(Token): def __init__(self, value, coords=None): super(NumberToken, self).__init__(DomainTag.Number, coords, value) def __str__(self): return "Number " + super(NumberToken, self).__str__() + ": " + str(self.value) class UnknownToken(Token): def __init__(self, value, coords=None): super(UnknownToken, self).__init__(DomainTag.Unknown, coords, value) def __str__(self): return super(UnknownToken, self).__str__() + "= " + self.value class EopToken(Token): def __init__(self, coords=None): super(EopToken, self).__init__(DomainTag.Eop, coords, "") def __str__(self): return ""
/refalchecker-2.2.0rc1.tar.gz/refalchecker-2.2.0rc1/refal/tokens.py
0.881066
0.209712
tokens.py
pypi
class Fragment(object): def __init__(self, starting, following): self.starting = starting self.following = following def __str__(self): return str(self.starting) + "-" + str(self.following) def clone(self): return Fragment(self.starting.clone(), self.following.clone()) def equals(self, other): return isinstance(other, Fragment) \ and self.starting.equals(other.starting) \ and self.following.equals(other.following) class Position(object): def __init__(self, text, line=1, pos=1, index=0): self.text = text self.line = line self.pos = pos self.index = index def __eq__(self, other): return self.index == other.index def __lt__(self, other): return self.index < other.index def __str__(self): return "(" + str(self.line) + "," + str(self.pos) + ")" def is_eof(self): return self.index == len(self.text) def cp(self): return -1 if self.index == len(self.text) else ord(self.text[self.index]) def letter(self): return "" if self.index >= len(self.text) else self.text[self.index] def read(self, n): cur = Position(self.text, self.line, self.pos, self.index) result_str = "" for i in range(0, n): if cur is not None: result_str += cur.letter() cur = next(cur, None) return result_str def __iter__(self): return self def is_white_space(self): return self.index != len(self.text) and self.letter().isspace() def is_letter(self): return self.index != len(self.text) and self.letter().isalpha() def is_digit(self): return self.index != len(self.text) and self.letter().isdigit() def is_letter_or_digit(self): return self.index != len(self.text) and self.letter().isalnum() def is_decimal_digit(self): return self.index != len(self.text) and "0" <= self.text[self.index] <= "9" def is_latin_letter(self): return self.index != len(self.text) and ("a" <= self.text[self.index] <= "z" or "A" <= self.text[self.index] <= "Z") def is_new_line(self): if self.index == len(self.text): return True if self.text[self.index] == "\r" and self.index + 1 < len(self.text): return self.text[self.index+1] == "\n" return self.text[self.index] == "\n" def __next__(self): if not self.has_next(): raise StopIteration elif self.is_new_line(): if self.text[self.index] == "\r": self.index += 1 self.line += 1 self.pos = 1 else: self.pos += 1 self.index += 1 return self def has_next(self): return self.index < len(self.text) def clone(self): return Position(self.text, self.line, self.pos, self.index) def equals(self, other): return isinstance(other, Position) and self.index == other.index
/refalchecker-2.2.0rc1.tar.gz/refalchecker-2.2.0rc1/refal/position.py
0.737064
0.327413
position.py
pypi
# RefCliq This package is a **full rewrite** of [Neal Caren's RefCliq](https://github.com/nealcaren/RefCliq). The objective is the same, to analyse clustering of co-cited publications using graph clustering. Note that this package also operates over the **co-citation network, not the citation network**. The main differences are: * More robust article matching, based on all available information (so two articles from the same author/year in the same journal don't get clumped together if they also have the DOI or title) * Robust string matching, to catch spelling errors ([using fuzzywuzzy](https://github.com/seatgeek/fuzzywuzzy)) * Degree centrality instead of betweenness centrality. * Geocoding support, where the affiliation field is used to map the location of the citing authors. **This requires a Google maps API key, which may require payment**. [More information about Google geocoding API](https://developers.google.com/maps/documentation/geocoding/start). The guide on how to get a key [is available here](https://developers.google.com/maps/documentation/geocoding/get-api-key). **Important**: The input bibliography files *must be from Web of Science / Web of Knowledge*, including the *Cited references* field. Otherwise the references section might be missing or with a different format and this will not work. **Really Important**: Most .bib files include information that was manually filled by different people using different ideas/notations/conventions. This package will work for most cases, but not all. Some manual editing of the .bib file might be required. If you run into an error that you believe should be fixed in the code, or if you have a suggestion for a new feature, please [open a new issue here](https://github.com/fabioasdias/RefCliq/issues/new). Be sure to [check the existing issues first](https://github.com/fabioasdias/RefCliq/issues), be as descriptive as possible and include examples of the error and detailed instructions on how to replicate it. ## Installation: *Only python 3 is supported*, after all python 2 is set to [retire very soon](https://pythonclock.org/). ``` pip install refcliq ``` All the dependencies will be automatically installed. It is a good idea to run a small .bib file to make sure every step of the script works before running large datasets (see the FAQ at the end). ## Usage: This package contains two scripts: * rc_cluster.py: Computes the clustering and saves the result in a json file. * rc_vis.py: Starts the visualization interface for a pre-computed file. ### Generating the results Running rc_cluster.py with a '-h' argument will display the help: ``` $ rc_cluster.py -h usage: rc_cluster.py [-h] [-o OUTPUT_FILE] [--cites CITES] [-k GOOGLE_KEY] [--graphs] files [files ...] positional arguments: files List of .bib files to process optional arguments: -h, --help show this help message and exit -o OUTPUT_FILE Output file to save, defaults to 'clusters.json'. --cites CITES Minimum number of citations for an article to be included, defaults to 2. -k GOOGLE_KEY Google maps API key. Necessary for precise geocoding. --graphs Saves graph drawing information for the cluster. ``` * *files*: The .bib files to be used. It can be one file (`a.bib`), a list of files (`a.bib b.bib`), or wildcards (`*.bib`). * *-o* (output_file): The name to be used for the results file. The 'json' extension is automatically added. If not provided, defaults to `clusters.json`. * *--cites*: Minimum number of citations for an article be included. While this can be changed in the interactive interface, increasing this number speeds up the processing time and reduces the memory requirements. *Increase this parameter if the processing crashes / runs out of memory*. Further, with an argument of `1`, all the works cited by only one article will present as a densely connected cluster, which may hinder a bit the interpretation, so it defaults to `2`. * *--graphs*: Enables the visualization of citation graphs in the interface. **Greatly increases the size of the results file*. Only clusters with less than 50 articles will be displayed in the interface. * *-k*: The Google API key. This key is **necessary** for geocoding and **may require payment**. Please check [Google's billing calculator](https://mapsplatformtransition.withgoogle.com/calculator). While this package tries to be smart and reduce the geocoding calls, it is reasonably safe to assume one call for each author of each publication as an approximation of an upper bound on the number of calls. **Monitor your usage carefully**, this package is provided as is and the authors cannot be help responsible for any billing issues with Google. Without the geocoding key the countries are still identified and present in the exported .tsv files, but the map will not be displayed in the interface. ### Visualizing the results Assuming that the results file is named `clusters.json`: ``` $ rc_vis.py clusters.json ``` A new tab will be open on the default browser that will look like this (with geocoding enabled): ![Basic interface with the map on the top right and the cluster listing on the left](https://github.com/fabioasdias/RefCliq/raw/master/doc/base.png "") The interface is divided in two panels, the cluster visualisation on the left and the citation details on the right. **Clusters**: Each box on the left represents one cluster found by the louvain method. In its "collapsed" visualisation, it displays the number of articles in this cluster, the *Content keywords* directly computed from the available abstracts of the articles in this cluster, and the *Keyworkds of citing papers*, representing they keywords computed from the papers that cite the papers in this cluster. The keywords are computed using [sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html), with only tf enabled. The two sliders on the top left control what is displayed, hiding works with fewer citations than the value of the first slider and clusters with fewer works than the value of the second. This is done without reprocessing the clustering. Clicking on the chevron on the top right part of the cluster box will "expand" that cluster, looking like this (after clicking also on the first citation): ![one cluster on the left side is expanded, showing a node-link plot](https://github.com/fabioasdias/RefCliq/raw/master/doc/graph.png "") The expanded version lists all articles in that cluster, with clickable links that activate the panel on the right of the interface that displays the citing details for that citation, along with the network centrality measure ([degree_centrality](https://en.wikipedia.org/wiki/Centrality#Degree_centrality) implemented using [networkx](https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.algorithms.centrality.degree_centrality.html)), citation count, article keywords (when the abstract is available), and the keywords of the citing works. **Centrality**: This image is also showing the network plot, with the first work in the list highlighted. This plot is not included by default (and only available for clusters with fewer than 50 works), but it is helpful to understand the centrality measure. Since we adopted degree centrality, this number is the ratio between the number of existing connections of this work and the number of possible connections, it represents the fraction of the cluster that is cited when this work is cited. A centrality of `1` means that every time any article in this cluster is cited, that work is also cited. In this case, the work "Durandlasserve, A. 2002. Holding Their Ground." has only three citations, but has a centrality measure of `0.81` meaning that it is connected (was cited concomitantly) to `81%` of the works in this cluster. The connections are highlighted in red in the network plot. **Citation details**: This panel is divided in two parts: the map on the top (if geocoding is enabled by providing a Google geocoding key) and the list of citing works. This list can be exported as a tab separated values file by clicking on the *Export tsv file* button. The DOI link for each work is also provided, if that information is available. The geocoded information can be displayed as markers or as a heatmap. To reduce the impact of papers with several authors on the heatmap, the log of the number of authors is used. The information on the map can be filtered to include only a single year or all years up to the selected year (using the *cumulative* checkbox). Unchecking the *Fit to markers* box will keep the map from changing the viewport (useful to do comparisons). ## FAQ * I don't need geocoding, should I use the original version? *or* I ran my .bibs in the original version and I got different results! This project started as a fork of the original version, aiming only to add the authors' addresses information, I'm not sure if any lines from the original version are present in this project now. Python 2 and coding methodology aside (and the whole [try/except misuse](https://github.com/nealcaren/RefCliq/blob/f67fef07900e322db90ddd5ce94dc83ca8dcf10c/refcliq.py#L90)), since the original version only considers the [first author, year, and the title/journal](https://github.com/nealcaren/RefCliq/blob/f67fef07900e322db90ddd5ce94dc83ca8dcf10c/refcliq.py#L101), it merges things that should not be merged (two papers of the same author in the same journal and year). Further, the cavalier approach to text processing silently loses works, even whole files (.bib). * Why two scripts? I chose to store the processed information as a json, instead of generating a bunch of static pages, because then I can fully use react and make an interactive visualization interface. Further, if/when I decide to change the interface, the results don't need to be processed again (depending on the feature), which is handy if you ran for a few hundred files and has around a million works in there. And if someone wants to use the processed result for something else, the json is easy to load and parse. * I don't want to get a Google key, is there any way to draw a map? The exported .tsv files contain country information as a field. It should be possible to break that field and use Excel or Google docs map drawing features to do a choropleth map. It is a bit tricky to match the country names provided to actual countries, which is why I didn't implement that yet. Pull requests more than welcome for this. * The sliders are too finicky! It's hard to select precisely a number. Once the slider is selected, you can use the left/right arrows in the keyboard to do unitary increments. On mobile, well, that's trickier. * What is this `cache.json` file that appeared in my folder? To minimize the calls to Google's geocoding API, the script caches the results, so no duplicate calls are made, even in different runs. If you don't want start from zero, [download my current cache.json here](https://www.dropbox.com/s/n5q4ha2vz606mp5/cache.json?dl=0). * The first time I'm running it I get `Can't find model 'en_core_web_sm'.`, then it crashes. Just run the command again and it will work. It needs to download the English language model for SpaCy, but the script doesn't automatically refresh the installed packages. Pull requests welcome. * Why not use nominatim for geocoding? We actually used it at the start of the project because it was free, but it missed several addresses (like `Leics, England`) and it geocoded `Toronto, Ontario, Canada` as a point about 50km north of Victoria, BC, in the middle of the forest. Google geocoding **can get expensive**, but it actually works. * Why tab separated values (.tsv) instead of comma separated values (.csv) for the exported citations? While the specification of the csv format is rather robust, there are some atrocious implementations of it in the wild. By using a character that is *not* present in the values, the parsing is easier and less error-prone. * Why degree centrality instead of betweenness_centrality as the original RefCliq? Consider the following graph: ![Node link plot of a graph with 9 nodes, two cliques of four in each side connected by a node in the center](https://github.com/fabioasdias/RefCliq/raw/master/doc/centrality.png "") Betweenness centrality measures how many shortest paths of the graph pass through a given node. In this case, all paths connecting nodes from the left side of the red node to nodes on the right side will pass through the red node, so the betweenness centrality of the red node will be rather high (`~0.57`), which is not exactly what we want to measure. The degree centrality for this node is `2/8`, because it is connected to two of the possible eight nodes in the network. This is a rather extreme example, which likely would be cut in two clusters by Louvain (depending on the co-citation count). Further, degree centrality is *much* faster to compute. * The time estimates for the *citation network - Cited-References* part of the processing are wrong/consistently increasing. That is the trickiest part of the code, where given a very incomplete reference (parts of the name of the first author, year, and something related to the title/journal, maybe a DOI), the code has to decide if that work is already on the citation graph or not. Since the graph keeps growing, this search will get progressively slower. Robust string comparison is slow, but it is a somewhat reliable way of properly matching the works, even when DOIs are present, because the same work can be associated with multiple DOIs, or someone might have written the name of the company in the DOI field. *Manually filled fields*. And typos. * How do I save a map displayed on the interface to use in my blog/paper/etc ? Print screen. Yes, a rather archaic way, but it works and it doesn't require any complicated implementation on my part. It helps if you "zoom in" / increase the resolution on the browser (Ctrl and + on Chrome) to make the map bigger. Pull requests on that feature are welcome.
/refcliq-0.1.12.tar.gz/refcliq-0.1.12/README.md
0.430506
0.955486
README.md
pypi
# refcount - Python classes for reference counting [![license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/csiro-hydroinformatics/pyrefcount/blob/master/LICENSE.txt) ![status](https://img.shields.io/badge/status-stable-green.svg) [![Documentation Status](https://readthedocs.org/projects/pyrefcount/badge/?version=latest)](https://pyrefcount.readthedocs.io/en/latest/?badge=latest) [![codecov](https://codecov.io/gh/csiro-hydroinformatics/pyrefcount/branch/master/graph/badge.svg?token=ZBBFq3ncAq)](https://codecov.io/gh/csiro-hydroinformatics/pyrefcount) master: [![Python package](https://github.com/csiro-hydroinformatics/pyrefcount/actions/workflows/build-matrix.yml/badge.svg?branch=master)](https://github.com/csiro-hydroinformatics/pyrefcount/actions/workflows/build-matrix.yml) testing: [![Python package](https://github.com/csiro-hydroinformatics/pyrefcount/actions/workflows/build-matrix.yml/badge.svg?branch=testing)](https://github.com/csiro-hydroinformatics/pyrefcount/actions/workflows/build-matrix.yml) ![Reference counted native handles](./docs/img/refcount-principles.png "Reference counted native handles") This package is primarily for managing resources in native libraries, written for instance in C++, from Python. While it boils down to "simply" maintaining a set of counters, **it is deceptively complicated to do so properly** and not end up with memory leaks or crashes. This package offers structured options for reliably managing external native resources. Surprisingly I could not locate an existing package doing just what I needed. Other use cases requiring reference counting, aside from native library resources, may benefit from reusing and extending classes in `refcount`. `refcount` ( >=0.7) includes classes using [cffi](https://cffi.readthedocs.io/). Other low-level interoperability mechanisms may well be added in the future. ## License MIT (see [License.txt](https://github.com/csiro-hydroinformatics/pyrefcount/blob/master/LICENSE.txt)) ## Documentation Hosted at [refcount via readthedocs.io](https://pyrefcount.readthedocs.io/en/latest/?badge=latest) ## Source code The code repository is on [GitHub](https://github.com/csiro-hydroinformatics/pyrefcount). ## Installation ### conda-forge Using `conda` or `mamba`: ```sh mamba install -c conda-forge refcount ``` ### pypi ```sh pip install refcount ``` ### From source (development) ```sh pip install -r requirements.txt pip install -e . ``` ## Sample use The following example is based on one of the unit tests. Say we have a C++ library with objects and a C API: ```C++ #define TEST_DOG_PTR testnative::dog* #define TEST_OWNER_PTR testnative::owner* #define TEST_COUNTED_PTR testnative::reference_counter* testnative::dog* create_dog(); testnative::owner* create_owner(testnative::dog* d); void say_walk(testnative::owner* owner); void release(testnative::reference_counter* obj); // etc. ``` From the outside of the library the API is exported with opaque pointers `void*` (C structs pointers and native C99 types could be handled too). ```C++ void* create_dog(); void* create_owner(void* d); void say_walk(void* owner); void release(void* obj); // etc. ``` Starting with the end in mind, from Python we want an API hiding the low level details close to the C API, in particular avoiding managing native memory via `release` C API calls, piggybacking the python GC instead. ```python dog = Dog() owner = DogOwner(dog) owner.say_walk() print(dog.position) dog = None # the "native dog" is still alive though, as the owner incremented the ref count owner = None ``` This is doable with `refcount` and the `cffi` package. One possible design is: ```python ut_ffi = cffi.FFI() ut_ffi.cdef('extern void* create_dog();') ut_ffi.cdef('extern void* create_owner( void* d);') ut_ffi.cdef('extern void say_walk( void* owner);') ut_ffi.cdef('extern void release( void* obj);') # etc. ut_dll = ut_ffi.dlopen('c:/path/to/test_native_library.dll', 1) # Lazy loading class CustomCffiNativeHandle(CffiNativeHandle): def __init__(self, pointer, prior_ref_count = 0): super(CustomCffiNativeHandle, self).__init__(pointer, type_id='', prior_ref_count = prior_ref_count) def _release_handle(self) -> bool: ut_dll.release(self.get_handle()) return True class Dog(CustomCffiNativeHandle): def __init__(self, pointer = None): if pointer is None: pointer = ut_dll.create_dog() super(Dog, self).__init__(pointer) # etc. class DogOwner(CustomCffiNativeHandle): def __init__(self, dog): super(DogOwner, self).__init__(None) self._set_handle(ut_dll.create_owner(dog.get_handle())) self.dog = dog self.dog.add_ref() # Do note this important reference increment def say_walk(self): ut_dll.say_walk(self.get_handle()) def _release_handle(self) -> bool: super(DogOwner, self)._release_handle() # super(DogOwner, self)._release_handle() self.dog.release() return True ``` ## Related work ### Ancestry, acknowledgements This python package `refcount` actually spawned from prior work for interoperability between C++, R and .NET ([R.NET](https://github.com/rdotnet/rdotnet)) `refcount` features using `cffi` were also significantly informed by Kevin Plastow's [work](https://search.informit.com.au/documentSummary;dn=823898220073899;res=IELENG) while he was at the Australian Bureau of Meteorology; this contribution is gratefully acknowledged. In you have native interop needs you may also want to look at: * the nuget package [dynamic-interop-dll](https://github.com/rdotnet/dynamic-interop-dll) for .NET/native interop. * a set of mostly c++ software [tools for interop with C/C++](https://github.com/csiro-hydroinformatics/c-interop) * a C# library for [generating interop glue code on top of C API glue code](https://github.com/csiro-hydroinformatics/c-api-wrapper-generation). ### Other python packages `refcount` was created in part because no existing prior (Python) work could quite fit the need. There are however packages that may better address your particular need: * [infi.pyutils](https://pypi.org/project/infi.pyutils/) contains a reference counting class.
/refcount-1.2.0.zip/refcount-1.2.0/README.md
0.634317
0.92976
README.md
pypi
r""" A Python package to translate accents and special characters from LaTeX to UTF-8, and vice versa. Handles the following: Accents ^^^^^^^ LaTeX uses commands of the form \\<char>{<letter>}, i.e. \\\\"{u} = ü, where <char> specifies the type of accent, and <letter> is the letter that has the accent: ========= ========= =============== <char> Example Accent ========= ========= =============== " ö Diaeresis ' ó Acute Accent . ȯ Dot Above = ō Macron \^ ô Circumflex Accent \` ò Grave Accent \| o̍ Vertical Line Above ~ õ Tilde b o̱ Macron Below c ç Cedilla C ȍ Double Grave Accent d ọ Dot Below f ȏ Inverted Breve h ả Hook Above H ő Double Acute Accent k ǫ Ogonek r o̊ Ring Above t o͡o Double Inverted Breve u ŏ Breve U o̎ Double Vertical Line Above v ǒ Caron ========= ========= =============== Special Symbols ^^^^^^^^^^^^^^^ LaTeX uses commands of the form \\<letter>, i.e. \\o = ø, where <letter> is one of the following: ========= ========= =================== <letter> Result Description ========= ========= =================== i ı Latin Small Letter Dotless I j ȷ Latin Small Letter Dotless J l ł Latin Small Letter L With Stroke L Ł Latin Capital Letter L With Stroke o ø Latin Small Letter O With Stroke O Ø Latin Capital Letter O With Stroke ========= ========= =================== Dashes ^^^^^^ LaTeX uses multiple dashes to represent different dashes: ========= ========= =================== <letters> Result Description ========= ========= =================== ``--`` – en-dash ``---`` — em-dash ========= ========= =================== """ import re import typing import unicodedata alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' # LaTeX characters in accent macros of form \<char>{<letter>}, i.e. \"{u} accent = { '"': '\N{Combining Diaeresis}', "'": '\N{Combining Acute Accent}', '.': '\N{Combining Dot Above}', '=': '\N{Combining Macron}', '^': '\N{Combining Circumflex Accent}', '`': '\N{Combining Grave Accent}', '|': '\N{Combining Vertical Line Above}', '~': '\N{Combining Tilde}', 'b': '\N{Combining Macron Below}', 'c': '\N{Combining Cedilla}', 'C': '\N{Combining Double Grave Accent}', 'd': '\N{Combining Dot Below}', 'f': '\N{Combining Inverted Breve}', 'h': '\N{Combining Hook Above}', 'H': '\N{Combining Double Acute Accent}', 'k': '\N{Combining Ogonek}', 'r': '\N{Combining Ring Above}', 't': '\N{Combining Double Inverted Breve}', 'u': '\N{Combining Breve}', 'U': '\N{Combining Double Vertical Line Above}', 'v': '\N{Combining Caron}' } # The regexp to detect the LaTeX accent commands. The two added characters # are the dotless i and j. accent_re = re.compile( r'\\([' + ''.join(accent.keys()) + r']){([a-zA-Z\u0131\u0237])}' ) # Invert the accent dictionary for each letter in the alphabet to make # a dictionary of LaTeX encodings. encoding = {} for key, val in accent.items(): for char in list(alphabet): encoding[char + val] = '\\' + key + '{' + char + '}' # handle any precombined versions of the character string = unicodedata.normalize('NFC', char + val) if len(string) == 1: encoding[string] = encoding[char + val] for char in [ '\N{Latin Small Letter Dotless I}', '\N{Latin Small Letter Dotless J}' ]: encoding[char + val] = r'\%s{%s}' % (key, char) string = unicodedata.normalize('NFC', char + val) # These characters have no precombined versions, but check anyway if len(string) == 1: encoding[string] = encoding[char + val] def _decode_latex_accent(match: typing.Match) -> str: """Helper function for re.sub for replacing LaTeX accents. Parameters: match: The match object from re.sub Returns: The unicode character for the accented letter """ return match[2] + accent[match[1]] # LaTeX single character symbols symbol = { 'i': '\N{Latin Small Letter Dotless I}', 'j': '\N{Latin Small Letter Dotless J}', 'l': '\N{Latin Small Letter L With Stroke}', 'L': '\N{Latin Capital Letter L With Stroke}', 'o': '\N{Latin Small Letter O With Stroke}', 'O': '\N{Latin Capital Letter O With Stroke}' } # The regexp to detect the LaTeX commands for special characters symbol_re = re.compile(r'\\([' + ''.join(symbol.keys()) + '])') # Invert the symbol dictionary to make a dictionary of LaTeX encodings. for key, val in symbol.items(): encoding[val] = '\\' + key def _decode_latex_symbol(match: typing.Match) -> str: """Helper function for re.sub for replacing LaTeX special characters. Parameters: match: The match object from re.sub Returns: The unicode character for the symbol. """ return symbol[match[1]] # LaTeX dashes dash = {'--': '\N{EN Dash}', '---': '\N{EM Dash}'} # The regexp to detect the LaTeX commands for dashes dash_re = re.compile(r'([^-]?)(-{2,3})([^-]?)') # Invert the dictionary tomake a dictionary of LaTeX encodings. for key, val in dash.items(): encoding[val] = key def _decode_latex_dash(match: typing.Match) -> str: """Helper function for re.sub for replacing LaTeX dashes. Parameters: match: The match object from re.sub Returns: The unicode character for the dash. """ return match[1] + dash[match[2]] + match[3] # LaTeX braces to protect capitalization. brace_re = re.compile(r"""{([^}]*)}""") def decode_latex(text: str) -> str: """Replaces all LaTeX accents in the input string with their UTF8 equivalents. Parameters: text: The text to translate. Returns: The translated string, using LaTeX commands for accents and special characters. """ return brace_re.sub( r'\1', accent_re.sub( _decode_latex_accent, symbol_re.sub( _decode_latex_symbol, dash_re.sub(_decode_latex_dash, text) ) ) ) def encode_latex(text: str) -> str: """ Encode the accented and special unicode characters into LaTeX commands. Parameters: text: The text to translate Returns: The translated text, using LaTeX commands for accents and special characters """ # Map the double character representations text2 = '' char1 = text[0] i = 1 len_text = len(text) while i < len_text: char2 = text[i] # print(char1 + ' ' + char2) if char1 + char2 in encoding: text2 += encoding[char1 + char2] i += 1 if i >= len_text: char1 = None break char1 = text[i] else: text2 += char1 char1 = char2 i += 1 if char1 is not None: text2 += char1 # Map the single characters representations result = '' for char in list(text2): if char in encoding: result += encoding[char] else: result += char return result if __name__ == '__main__': # pragma: no cover text = r""" (Vorlova_2015) Barbora Vorlov{\'{a}} and Dana Nachtigallov{\'{a}} and Jana Jir{\'{a}}skov{\'{a}}-Van{\'{\i}}{\v{c}}kov{\'{a}} and Haresh Ajani and Petr Jansa and Jan {\v{R}}ez{\'{a}}{\v{c}} and Jind{\v{r}}ich Fanfrl{\'{\i}}k and Michal Otyepka and Pavel Hobza and Jan Konvalinka and Martin Lep{\v{s}}{\'{\i}}k; Malonate-based inhibitors of mammalian serine racemase: Kinetic characterization and structure-based computational study; European Journal of Medicinal Chemistry; 2015; 89; 189--197; 10.1016/j.ejmech.2014.10.043. """ print(text) text2 = decode_latex(text) print(text2) text3 = encode_latex(text2) print(text3) print(text3 == text) text4 = decode_latex(text3) print(text4) print(text2 == text4) ris = """ TY - JOUR DO - 10.1016/j.ejmech.2014.10.043 UR - http://dx.doi.org/10.1016/j.ejmech.2014.10.043 TI - Malonate-based inhibitors of mammalian serine racemase: Kinetic characterization and structure-based computational study T2 - European Journal of Medicinal Chemistry AU - Vorlová, Barbora AU - Nachtigallová, Dana AU - Jirásková-Vaníčková, Jana AU - Ajani, Haresh AU - Jansa, Petr AU - Řezáč, Jan AU - Fanfrlík, Jindřich AU - Otyepka, Michal AU - Hobza, Pavel AU - Konvalinka, Jan AU - Lepšík, Martin PY - 2015 DA - 2015/01 PB - Elsevier BV SP - 189-197 VL - 89 SN - 0223-5234 ER - """ # noqa: E501 # print(encode_latex('Řezáč'))
/reference_handler-0.9.1.tar.gz/reference_handler-0.9.1/reference_handler/latex_utf8.py
0.828904
0.496033
latex_utf8.py
pypi
import sqlite3 import pprint import re import bibtexparser from .latex_utf8 import decode_latex from .utils import entry_to_bibtex supported_fmts = ['bibtex', 'text'] # '-' must be first for the regex to work. subscript = { '-': '\N{Subscript Minus}', '0': '\N{Subscript Zero}', '1': '\N{Subscript One}', '2': '\N{Subscript Two}', '3': '\N{Subscript Three}', '4': '\N{Subscript Four}', '5': '\N{Subscript Five}', '6': '\N{Subscript Six}', '7': '\N{Subscript Seven}', '8': '\N{Subscript Eight}', '9': '\N{Subscript Nine}', '+': '\N{Subscript Plus Sign}', '=': '\N{Subscript Equals Sign}', '(': '\N{Subscript Left Parenthesis}', ')': '\N{Subscript Right Parenthesis}', 'a': '\N{Latin Subscript Small Letter A}', 'e': '\N{Latin Subscript Small Letter E}', 'o': '\N{Latin Subscript Small Letter O}', 'x': '\N{Latin Subscript Small Letter X}', 'h': '\N{Latin Subscript Small Letter H}', 'k': '\N{Latin Subscript Small Letter K}', 'l': '\N{Latin Subscript Small Letter L}', 'm': '\N{Latin Subscript Small Letter M}', 'n': '\N{Latin Subscript Small Letter N}', 'p': '\N{Latin Subscript Small Letter P}', 's': '\N{Latin Subscript Small Letter S}', 't': '\N{Latin Subscript Small Letter T}', 'i': '\N{Latin Subscript Small Letter I}', 'r': '\N{Latin Subscript Small Letter R}', 'u': '\N{Latin Subscript Small Letter U}', 'v': '\N{Latin Subscript Small Letter V}', r'.': '.' } subscript_re = re.compile(r'\$_([' + ''.join(subscript.keys()) + r']+)\$') # '-' must be first for the regex to work. superscript = { '-': '\N{Superscript Minus}', '0': '\N{Superscript Zero}', '1': '\N{Superscript One}', '2': '\N{Superscript Two}', '3': '\N{Superscript Three}', '4': '\N{Superscript Four}', '5': '\N{Superscript Five}', '6': '\N{Superscript Six}', '7': '\N{Superscript Seven}', '8': '\N{Superscript Eight}', '9': '\N{Superscript Nine}', '+': '\N{Superscript Plus Sign}', '=': '\N{Superscript Equals Sign}', '(': '\N{Superscript Left Parenthesis}', ')': '\N{Superscript Right Parenthesis}', 'a': '\N{Feminine Ordinal Indicator}', 'b': 'ᵇ', 'c': 'ᶜ', 'd': 'ᵈ', 'e': 'ᵉ', 'f': 'ᶠ', 'g': 'ᵍ', 'h': 'ʰ', 'i': 'ⁱ', 'j': 'ʲ', 'k': 'ᵏ', 'l': 'ˡ', 'm': 'ᵐ', 'n': 'ⁿ', 'o': '\N{Masculine Ordinal Indicator}', 'p': 'ᵖ', 'r': 'ʳ', 's': 'ˢ', 't': 'ᵗ', 'u': 'ᵘ', 'v': 'ᵛ', 'w': 'ʷ', 'x': 'ˣ', 'y': 'ʸ', 'z': 'ᶻ' } superscript_re = re.compile(r'\$\^([' + ''.join(superscript.keys()) + r']+)\$') greek_symbol = { 'alpha': '\N{Greek Small Letter Alpha}', 'beta': '\N{Greek Small Letter Beta}', 'gamma': '\N{Greek Small Letter Gamma}', 'delta': '\N{Greek Small Letter Delta}', 'epsilon': '\N{Greek Small Letter Epsilon}', 'zeta': '\N{Greek Small Letter Zeta}', 'eta': '\N{Greek Small Letter Eta}', 'theta': '\N{Greek Small Letter Theta}', 'iota': '\N{Greek Small Letter Iota}', 'kappa': '\N{Greek Small Letter Kappa}', 'lamda': '\N{Greek Small Letter Lamda}', 'lambda': '\N{Greek Small Letter Lamda}', 'mu': '\N{Greek Small Letter Mu}', 'nu': '\N{Greek Small Letter Nu}', 'xi': '\N{Greek Small Letter Xi}', 'omicron': '\N{Greek Small Letter Omicron}', 'pi': '\N{Greek Small Letter Pi}', 'rho': '\N{Greek Small Letter Rho}', 'sigma': '\N{Greek Small Letter Sigma}', 'tau': '\N{Greek Small Letter Tau}', 'upsilon': '\N{Greek Small Letter Upsilon}', 'phi': '\N{Greek Small Letter Phi}', 'chi': '\N{Greek Small Letter Chi}', 'psi': '\N{Greek Small Letter Psi}', 'omega': '\N{Greek Small Letter Omega}', 'Alpha': '\N{Greek Capital Letter Alpha}', 'Beta': '\N{Greek Capital Letter Beta}', 'Gamma': '\N{Greek Capital Letter Gamma}', 'Delta': '\N{Greek Capital Letter Delta}', 'Epsilon': '\N{Greek Capital Letter Epsilon}', 'Zeta': '\N{Greek Capital Letter Zeta}', 'Eta': '\N{Greek Capital Letter Eta}', 'Theta': '\N{Greek Capital Letter Theta}', 'Iota': '\N{Greek Capital Letter Iota}', 'Kappa': '\N{Greek Capital Letter Kappa}', 'Lamda': '\N{Greek Capital Letter Lamda}', 'Lambda': '\N{Greek Capital Letter Lamda}', 'Mu': '\N{Greek Capital Letter Mu}', 'Nu': '\N{Greek Capital Letter Nu}', 'Xi': '\N{Greek Capital Letter Xi}', 'Omicron': '\N{Greek Capital Letter Omicron}', 'Pi': '\N{Greek Capital Letter Pi}', 'Rho': '\N{Greek Capital Letter Rho}', 'Sigma': '\N{Greek Capital Letter Sigma}', 'Tau': '\N{Greek Capital Letter Tau}', 'Upsilon': '\N{Greek Capital Letter Upsilon}', 'Phi': '\N{Greek Capital Letter Phi}', 'Chi': '\N{Greek Capital Letter Chi}', 'Psi': '\N{Greek Capital Letter Psi}', 'Omega': '\N{Greek Capital Letter Omega}', } greek_symbol_re = re.compile(r'\$\\(' + '|'.join(greek_symbol.keys()) + r')\$') class Reference_Handler(object): def __init__(self, database): """ Constructs a reference handler class by connecting to a SQLite database and bulding the two tables within it. """ self.conn = sqlite3.connect(database) self.cur = self.conn.cursor() self._initialize_tables() def __del__(self): try: self.conn.commit() self.conn.close() # print('Closed database connection.') except: # noqa: E722 pass # print('Database was already closed.') def dump(self, outfile=None, fmt='bibtex', level=3): """ Retrieves the individual citations that were collected during the execution of a program and tallies the number of times each citation was referenced. Parameters ---------- outfile: str, Optional, default: None The file name where for the dump, if desired. fmt: str, Optional, default: 'bibtex' The format of the dump file, if desired. level: int, Optional, default: None Only those citations whose level at least the specified by level will be output. Returns ------- ret: list A list whose elements are tuples containing pairs of raw citations and their counts. """ if fmt not in supported_fmts: raise NameError('Format %s not currently supported.' % (fmt)) if fmt not in supported_fmts: raise NameError('Format %s not currently supported.' % (fmt)) if level not in range(1, 4) and level is not None: raise ValueError( 'Invalid value for level. Please input a value in the range ' '[1,3]' ) self.cur.execute( """ SELECT t1.id, t1.raw, t2.counts, t2.level FROM citation t1 LEFT JOIN( SELECT id, reference_id, level, SUM(count) AS counts FROM context WHERE level <= ? GROUP BY reference_id ) t2 ON t1.id = t2.reference_id WHERE counts > 0 ORDER BY counts DESC """, (level,) ) query = self.cur.fetchall() if fmt == 'bibtex': ret = query if outfile is not None: if type(outfile) is not str: raise TypeError( 'The name of the output file must be a string but it ' 'is %s' % type(outfile) ) with open(outfile, 'w') as f: for item in query: f.write('TOTAL_MENTIONS: %s \n' % str(item[2])) f.write('LEVEL: %s \n' % str(item[3])) f.write(item[1]) elif fmt == 'text': ret = [] for item in query: parse = bibtexparser.loads(item[1]).entries[0] entry_type = parse['ENTRYTYPE'] if entry_type == 'misc': plain_text = self.format_misc(parse) elif entry_type == 'article': plain_text = self.format_article(parse) elif entry_type == 'inbook': plain_text = self.format_inbook(parse) elif entry_type == 'phdthesis': plain_text = self.format_phdthesis(parse) else: plain_text = f"Do not have a handler for '{entry_type}':" plain_text += '\n' plain_text += pprint.pformat(parse) plain_text = decode_latex(plain_text) plain_text = self.decode_math_symbols(plain_text) ret.append((item[0], plain_text, item[2], item[3])) return ret @staticmethod def load_bibliography(bibfile=None, fmt='bibtex'): """ Utility function to read a bibliographic file in common formats. The current supported formats are BibTeX. Parameters ---------- bibfile: str, default: None The file name for the bibliographic file. fmt: str, Optional, default: 'bibtex' The format of the bibliographic file, if desired. Returns ------- ret: dict A dictionary whose keys are the identifiers used in the bibliographic file (e.g. the first line in a BibTeX entry) and values are the raw entries found in such file. Note that the values of the dictionary might not be the exactly as found in the original bibliographic file. """ if bibfile is None: raise FileNotFoundError('A bibliography file must be specified.') if fmt not in supported_fmts: raise NameError('Format %s not currently supported.' % (fmt)) with open(bibfile, 'r') as f: parser = bibtexparser.bparser.BibTexParser(common_strings=True) bibliography = bibtexparser.load(f, parser=parser).entries ret = {k['ID']: {} for k in bibliography} for entry in bibliography: ret[entry['ID']] = entry_to_bibtex(entry) return ret def cite( self, raw=None, alias=None, module=None, level=1, note=None, fmt='bibtex', doi=None ): """ Adds a given reference to the internal database. Parameters ---------- alias: str, default: None A string ID for the citation. raw: str, default: None The raw text for a given citation. module: str, default: None The module or function where this citation was called from level: int, default: 1 The level of importance for this citation. References with the highest priority must have level 1 and references with lowest priority must have level 3. note: str, default: None A note that describes this citation. doi: str, Optional, default: None The digital object identifier if not provided in the raw argument. If provided in raw, DOI in the doi argument will be used. Returns ------- None """ if alias is None or raw is None or module is None or note is None: raise NameError( 'Need to provide the "alias", "raw", "module" and "note" ' 'arguments' ) doi = self._extract_doi(raw, fmt) reference_id = self._get_reference_id(raw=raw, alias=alias, doi=doi) if reference_id is None: self._create_citation(raw=raw, alias=alias, doi=doi) reference_id = self.cur.lastrowid self._create_context( reference_id=reference_id, module=module, note=note, level=level ) else: context_id = self._get_context_id( reference_id=reference_id, module=module, note=note, level=level ) if context_id is None: self._create_context( reference_id=reference_id, module=module, note=note, level=level ) else: self._update_counter(context_id=context_id) # Save the changes! self.conn.commit() return reference_id def _update_counter(self, context_id=None): """ Updates the counter for given context """ if context_id is None: raise NameError("The context ID must be provided") self.cur.execute( "UPDATE context SET count = count + 1 WHERE id=?;", (context_id,) ) def _extract_doi(self, raw=None, fmt='bibtex'): """ Parses DOI from bibliographic format """ if fmt not in supported_fmts: raise NameError('Format %s not currently supported.' % (fmt)) if fmt == 'bibtex': ret = bibtexparser.loads(raw) ret = ret.entries[0] if 'doi' in ret.keys(): return ret['doi'] def _initialize_tables(self): """ Initializes the citation and context tables """ self.cur.execute( """CREATE TABLE IF NOT EXISTS "citation" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, "alias" TEXT NOT NULL UNIQUE, "raw" TEXT NOT NULL UNIQUE, "doi" TEXT UNIQUE ); """ ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_raw on citation (raw);" ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_alias on citation (alias);" ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_doi on citation (doi);" ) self.cur.execute( """ CREATE TABLE IF NOT EXISTS "context" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, "reference_id" INTEGER NOT NULL, "module" TEXT NOT NULL, "note" TEXT NOT NULL, "count" INTEGER NOT NULL, "level" INTEGER NOT NULL, FOREIGN KEY(reference_id) REFERENCES Citation(id) ); """ ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_refid on context (reference_id);" ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_module on context (module);" ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_count on context (count);" ) self.cur.execute( "CREATE INDEX IF NOT EXISTS idx_level on context (level);" ) self.conn.commit() def _get_reference_id(self, raw=None, alias=None, doi=None): """ Gets the ID of the given raw or doi if exists """ if raw is None: if alias is None: if doi is None: raise NameError( 'Variables "raw" or "alias" or "DOI" must be input.' ) else: self.cur.execute( "SELECT id FROM citation WHERE doi=?;" (doi,) ) else: self.cur.execute( "SELECT id FROM citation WHERE alias=?;" (alias,) ) else: self.cur.execute("SELECT id FROM citation WHERE raw=?;", (raw,)) ret = self.cur.fetchall() if len(ret) == 0: return None return ret[0][0] def _get_context_id( self, reference_id=None, module=None, note=None, level=None ): """ Gets the ID of the context if exists. A context is specified by (reference_id, module, note, level) combination """ if ( reference_id is None or module is None or note is None or level is None ): raise NameError( 'The variables "reference_id" and "module" and "note" and ' '"level" must be specified' ) self.cur.execute( "SELECT id FROM context WHERE reference_id=? AND module=? AND " "note=? AND level=?;", (reference_id, module, note, level) ) ret = self.cur.fetchall() if len(ret) == 0: return None return ret[0][0] def _create_citation(self, raw=None, alias=None, doi=None): """ Adds a new record to the citation table using a raw reference text. """ if raw is None or alias is None: raise NameError('The value for raw and alias must be provided') else: self.cur.execute( "INSERT INTO citation (raw, alias, doi) VALUES (?, ?, ?);", (raw, alias, doi) ) self.conn.commit() def _create_context( self, reference_id=None, module=None, note=None, level=None ): """ Adds a new record to the context table using the combination of the provided arguments. """ if reference_id is None: raise NameError("Variables 'reference_id' or must be specified.") self.cur.execute( "INSERT INTO context (reference_id, module, note, count, level) " "VALUES (?, ?, ?, ?, ?)", (reference_id, module, note, 1, level) ) self.conn.commit() def total_mentions(self, reference_id=None, alias=None): """ Returns the number of times a given citation has been used. """ if reference_id is None: if alias is None: raise NameError( "The 'reference_id' or 'alias' must be provided." ) else: self.cur.execute( """ SELECT t1.alias, t2.counts FROM citation t1 INNER JOIN ( SELECT reference_id, SUM(count) AS counts FROM context GROUP BY reference_id ) t2 ON t1.id = t2.reference_id WHERE alias = ? """, (alias,) ) else: self.cur.execute( """ SELECT t1.id, t2.counts FROM citation t1 INNER JOIN ( SELECT reference_id, SUM(count) AS counts FROM context GROUP BY reference_id ) t2 ON t1.id = t2.reference_id WHERE id = ? """, (reference_id,) ) ret = self.cur.fetchall() if len(ret) == 0: return 0 return ret[0][1] def total_citations(self, reference_id=None, alias=None): """ Returns the total number of citations in the citation table. If reference is provided, returns the total number of citations for a given reference ID. """ if reference_id is None: if alias is None: self.cur.execute("SELECT COUNT(*) FROM citation") ret = self.cur.fetchall()[0][0] return ret else: self.cur.execute( "SELECT COUNT(*) FROM citation WHERE alias = ?", (alias,) ) else: self.cur.execute( "SELECT COUNT(*) FROM citation WHERE id=?;", (reference_id,) ) ret = self.cur.fetchall() if len(ret) == 0: return 0 return ret[0][0] def total_contexts(self, reference_id=None, alias=None): """ Returns the total number of contexts for a given reference ID. """ if reference_id is None: if alias is None: raise NameError( "Variables 'reference_id' or 'alias' must be specified." ) else: self.cur.execute( """ SELECT COUNT(*) FROM citation INNER JOIN ( SELECT id, reference_id FROM context ) t2 ON citation.id = t2.reference_id WHERE alias=? """, (alias,) ) else: self.cur.execute( "SELECT COUNT(*) FROM context WHERE reference_id = ?;", (reference_id,) ) return self.cur.fetchall()[0][0] def __str__(self): pass def format_article(self, data): """Format an article BibTex record ACS style: Foster, J. C.; Varlas, S.; Couturaud, B.; Coe, J.; O’Reilly, R. K. Getting into Shape: Reflections on a New Generation of Cylindrical Nanostructures’ Self-Assembly Using Polymer Building Block. J. Am. Chem. Soc. 2019, 141 (7), 2742−2753. DOI: 10.1021/jacs.8b08648 """ result = '' if 'author' in data: result += '; '.join(data['author'].split(' and ')) if result[-1] != '.': result += '.' if 'title' in data: result += ' ' + data['title'].rstrip('.') + '.' if 'journal' in data: result += ' ' + data['journal'] if 'year' in data: result += f" {data['year']}," if 'volume' in data: result += f" {data['volume']}," if 'pages' in data: result += f" {data['pages']}." if 'doi' in data: result += f" DOI: {data['doi']}" return result def format_phdthesis(self, data): """Format a PhD Thesis BibTex record ACS style: Cable, M. L. Life in Extreme Environments: Lanthanide-Based Detection of Bacterial Spores and Other Sensor Design Pursuits. Ph.D. Dissertation, California Institute of Technology, Pasadena, CA, 2010. http://resolver.caltech.edu/CaltechTHESIS:05102010-145436548 (accessed 2019-09-10). """ result = '' if 'author' in data: result += '; '.join(data['author'].split(' and ')) if result[-1] != '.': result += '.' if 'title' in data: result += ' ' + data['title'].rstrip('.') + '.' result += " Ph.D. Dissertation" if 'school' in data: result += f", {data['school']}" if 'address' in data: result += f", {data['address']}" if 'year' in data: result += f", {data['year']}." if 'url' in data: result += f", {data['url']}" if 'doi' in data: result += f", DOI: {data['doi']}" return result def format_misc(self, data): """Format a misc BibTex record, used for software Author 1; Author 2; etc. Program Title, version or edition; Publisher: Place of Publication, Year. Example: Binkley, J. S. GAUSSIAN82; Department of Chemistry, Carnegie Mellon University: Pittsburgh, PA, 1982. """ result = '' if 'author' in data: result += '; '.join(data['author'].split(' and ')) if result[-1] != '.': result += '.' if 'title' in data: result += ' ' + data['title'] if 'version' in data: result += f", version {data['version']};" else: result += ';' if 'organization' in data: result += ' ' + data['organization'] if 'address' in data: result += f": {data['address']}" if 'url' in data: result += f", {data['url']}" if 'doi' in data: result += f", DOI: {data['doi']}" return result def format_inbook(self, data): """Format a chapter or part of a book or series BibTex record ACS style: Bard, A. J.; Faulkner, L. R. Double-Layer Structure and Absorption. In Electrochemical Methods: Fundamentals and Applications, 2nd ed.; John Wiley & Sons, 2001; pp 534−579. for series: Gaede, H. C. Professional Development for REU Students. In Best Practices for Chemistry REU Programs; Griep, M. A, Watkins, L., Eds.; ACS Symposium Series, Vol. 1295; American Chemical Society, 2018; pp 33−44. DOI: 10.1021/bk-2018-1295.ch003 """ result = '' if 'author' in data: result += '; '.join(data['author'].split(' and ')) if result[-1] != '.': result += '.' if 'title' in data: result += ' ' + data['title'].rstrip('.') + '. In' if 'booktitle' in data: result += f" {data['booktitle']}" if 'series' in data: result += f"; {data['series']}" if 'publisher' in data: result += f"; {data['publisher']}" if 'place' in data: result += f", {data['place']}" if 'year' in data: result += f", {data['year']}." if 'url' in data: result += f", {data['url']}" if 'doi' in data: result += f", DOI: {data['doi']}" return result def decode_math_symbols(self, text): """Clean up math symbols such as subscripts.""" text = greek_symbol_re.sub(self._decode_greek_symbol, text) text = superscript_re.sub(self._decode_superscript, text) return subscript_re.sub(self._decode_subscript, text) def _decode_subscript(self, match): result = '' for digit in list(match[1]): result += subscript[digit] return result def _decode_superscript(self, match): result = '' for digit in list(match[1]): result += superscript[digit] return result def _decode_greek_symbol(self, match): return greek_symbol[match[1]]
/reference_handler-0.9.1.tar.gz/reference_handler-0.9.1/reference_handler/reference_handler.py
0.444324
0.203193
reference_handler.py
pypi
from typing import Dict import click from .converters.url_converter import convert_urls_to_bibtex from .parsers import IEEEParser, SsauParser PARSER_MAPPING = { "ssau": SsauParser(), "ieee": IEEEParser(), # 'ieee': IEEEParser(), } def log_errors(errors: Dict[str, dict]): if errors is not None and len(errors.keys()) > 0: print("Some errors was occured during parsing:") for bibtex_title, error in errors.items(): for k, v in error.items(): print(f"ERROR | {bibtex_title}: {k} - {v}") @click.group() def cli(): pass @cli.command() @click.argument("path", type=str) @click.option( "-s", "--save", type=str, help="Path to file where to save the result of parsing.", default="", ) @click.option( "-p", "--parser", type=str, help="Parser to use. Available parsers: ieee, ssau. Default: ssau.", default="ssau", ) @click.option( "-v", "--verbose", is_flag=True, help="Whether to print output to stdout or not", default=False, ) @click.option( "-b", "--beautify", type=int, help="Number of line wraps between references. Default: 1", default=1, ) def parse(path: str, save: str, parser: str, verbose: bool, beautify: int): with open(path, "r", encoding="utf-8") as f: citations = f.read() parser_type = parser.lower() parser = PARSER_MAPPING.get(parser_type) if parser is None: raise ValueError( "Unknown parser type. Expect one of " + f"{list(PARSER_MAPPING.keys())}, but received" f"{parser_type}" ) result, errors = parser(citations) # end = "".join(["\n" for _ in range(beautify)]) end = "\n" * beautify if verbose: for entry in result: if entry is not None: print(entry, end=end) if save: with open(save, "w", encoding="utf-8") as f: for entry in result: if entry is not None: f.write(entry + end) print(f"Saved result to {save}.") log_errors(errors) @cli.command() @click.argument("path", type=str) def prepare_urls(path: str): with open(path, "r", encoding="utf-8") as f: citations = f.read() citations, errors = convert_urls_to_bibtex(citations) with open(path, "w", encoding="utf-8") as f: f.write(citations) log_errors(errors) if __name__ == "__main__": cli()
/references_parser-1.2.1-py3-none-any.whl/references_parser/cli.py
0.606732
0.195709
cli.py
pypi
from typing import Optional, Tuple import bibtexparser as p from .bibtex import Bibtex class BibtexSsau(Bibtex): def get_parsed_authors( self, return_all=False, space_between_initials=False ) -> Tuple[Optional[str], str]: """ Perform authors entry parsing using following rules: 1. Авторов < 4 Фамилия, инициалы первого автора. Основное заглавие : добавочное заглавие / Инициалы и фамилии первого, второго, третьего автора 2. Авторов == 4 Основное заглавие : добавочное заглавие / Инициалы и фамилии всех четырех авторов ; 3. Авторов > 4 Основное заглавие : добавочное заглавие / Инициалы и фамилии первых трех авторов [и др.] Returns: Tuple[Optional[str], str]: first author and others """ author_string = self.author last_names_joiner = "-" # "" was used previously initials_joiner = " " if space_between_initials else "" authors_joiner = ", " def merged_last_names(splitted_author): return last_names_joiner.join(splitted_author["last"]) def all_first_name_initials(splitted_author): return initials_joiner.join( [f"{first_name[0]}." for first_name in splitted_author["first"]] ) def parse_single_author(str_author): splitted_author = p.customization.splitname(str_author) return ( all_first_name_initials(splitted_author) + " " + merged_last_names(splitted_author) ) splitted_first_author = p.customization.splitname(author_string[0]) first_author = ( merged_last_names(splitted_first_author) + ", " + all_first_name_initials(splitted_first_author) ) if len(author_string) < 4 or return_all: authors = authors_joiner.join( [parse_single_author(str_author) for str_author in author_string] ) elif len(author_string) == 4: authors = authors_joiner.join( [parse_single_author(str_author) for str_author in author_string] ) first_author = None else: authors = authors_joiner.join( [parse_single_author(str_author) for str_author in author_string[:3]] ) first_author = None authors = f"{authors} [и др.]" return first_author, authors
/references_parser-1.2.1-py3-none-any.whl/references_parser/models/bibtex_ssau.py
0.704872
0.425307
bibtex_ssau.py
pypi
from typing import List, Optional from .ssau_parser import SsauParser SEP = ", " class IEEEParser(SsauParser): def __init__(self): super().__init__() """ Parses Bibtext into IEEE citation format. Official citation guideline: https://ieee-dataport.org/sites/default/files/analysis/27/IEEE%20Citation%20Guidelines.pdf """ def parse_article(self, bibtex_entry: dict): """ Parse bibtex annotated with @article """ authors = self._parse_authors(bibtex_entry["author"]) title = bibtex_entry["title"] year = bibtex_entry["year"] journal = bibtex_entry["journal"] result = authors + SEP + '"' + title + '," ' + journal if "arXiv" in journal: result += SEP + f"{year}." return result number = "" if "number" not in bibtex_entry else f"no. {bibtex_entry['number']}" volume = ( "" if "volume" not in bibtex_entry else f"vol. {bibtex_entry['volume']}" ) if len(volume) != 0: result += SEP + volume result += SEP + number pages = bibtex_entry["pages"].split("--") if len(pages) == 1: result += SEP + f"p. {pages[0]}" elif len(pages) == 2: result += SEP + f"pp. {pages[0]}-{pages[1]}" result += SEP + f"{year}." return result def parse_proceedings(self, bibtex_entry: dict): """ Parse bibtex annotated with @inproceedings """ authors = self._parse_authors(bibtex_entry["author"]) title = bibtex_entry["title"] booktitle = bibtex_entry["booktitle"] year = bibtex_entry["year"] pages = [] if "pages" not in bibtex_entry else bibtex_entry["pages"].split("--") result = authors + SEP + '"' + title + '," In ' + booktitle + SEP + year if len(pages) == 1: result += SEP + f"p. {pages[0]}" elif len(pages) == 2: result += SEP + f"pp. {pages[0]}-{pages[1]}" return result + "." def _parse_authors(self, authors): first_author, authors = super()._parse_authors(authors, return_all=True) authors_list = authors.split(", ") if len(authors_list) == 1: return authors if len(authors_list) == 2: return authors_list[0] + " and " + authors_list[1] # Add `and` before the last author authors_upd = "" for author in authors_list[:-1]: authors_upd += author + ", " authors_upd += "and " + authors_list[-1] return authors_upd def __call__(self, bibtex: str): raise Exception("Hasn't been tested yet") return super().__call__(bibtex)
/references_parser-1.2.1-py3-none-any.whl/references_parser/parsers/ieee_parser.py
0.836488
0.252885
ieee_parser.py
pypi
from datetime import datetime from typing import List, Optional, Tuple, Dict import bibtexparser as p from duckpy import Client from tqdm import tqdm from references_parser.models import Bibtex, BibtexSsau # Constant which stands for the following template: ". (long dash) " or ". -- " SEP_DASH = ". \u2012 " class SsauParser: def __init__(self): self.search_client = Client() self.errors = {} def parse_website(self, bibtex: BibtexSsau): result = bibtex.title if bibtex.base is not None and len(bibtex.base) > 0: result += " // " + bibtex.base result += " / " + bibtex.origin if bibtex.address is None: result += SEP_DASH + "[Б.м.], " + bibtex.year else: result += SEP_DASH + f"{bibtex.address}, " + bibtex.year result += SEP_DASH + "URL: " result += bibtex.url result += f" (дата обращения: {bibtex.date})." return result def parse_arxiv(self, bibtex: BibtexSsau): """ In case of reference on arxiv.org Le, Q. Distributed Representations of Sentences and Documents / Q. Le, T. Mikolov // ArXiv / Cornell University. – 2014. – URL: https://arxiv.org/abs/1405.4053 (дата обращения: 03.11.2020) """ first_author, authors = bibtex.get_parsed_authors() today = datetime.now().strftime("%d.%m.%Y") if bibtex.url is None: try: results = self.search_client.search(f"arxiv {bibtex.title}") url = results[0]["url"] except Exception as e: self.errors[bibtex.title] = {"Error occured while searching": str(e)} return None result = "" if first_author is not None: result += first_author + " " return ( result + bibtex.title + " / " + authors + " // ArXiv / Cornell University" + SEP_DASH + bibtex.year + SEP_DASH + f"URL: {url} (дата обращения: {today})." ) def parse_article(self, bibtex: BibtexSsau): """ Parse bibtex annotated with @article """ if bibtex.journal is not None and "arXiv" in bibtex.journal: return self.parse_arxiv(bibtex) first_author, authors = bibtex.get_parsed_authors() result = "" if first_author is not None: result += first_author + " " result = ( result + bibtex.title + " / " + authors + " // " + bibtex.journal + SEP_DASH + bibtex.year ) number = f"({bibtex.number})" if bibtex.number is not None else "" volume = f"Vol. {bibtex.volume}{number}" if bibtex.volume is not None else "" if len(volume) != 0: result += SEP_DASH + volume pages = bibtex.pages_list if len(pages) == 0: pass elif len(pages) == 1: result += SEP_DASH + f"P. {pages[0]}" elif len(pages) == 2: result += SEP_DASH + f"P. {pages[0]}-{pages[1]}" return result + "." def parse_proceedings(self, bibtex: BibtexSsau): """ Parse bibtex annotated with @inproceedings """ first_author, authors = bibtex.get_parsed_authors() org_year = ( f"{bibtex.organization}, {bibtex.year}" if bibtex.organization is not None else bibtex.year ) result = "" if first_author is not None: result += first_author + " " result = ( result + bibtex.title + " / " + authors + " // " + bibtex.booktitle + SEP_DASH + org_year ) number = f"({bibtex.number})" if bibtex.number is not None else "" volume = f"Vol. {bibtex.volume}{number}" if bibtex.volume is not None else "" if len(volume) != 0: result += SEP_DASH + volume pages = bibtex.pages_list if len(pages) == 0: pass elif len(pages) == 1: result += SEP_DASH + f"P. {pages[0]}" elif len(pages) == 2: result += SEP_DASH + f"P. {pages[0]}-{pages[1]}" return result + "." def __call__(self, bibtex: str) -> Tuple[List[Optional[str]], Dict[str, dict]]: if bibtex.endswith(".txt"): with open(bibtex, "r") as f: bibtex_dict = p.load( f, p.bparser.BibTexParser(ignore_nonstandard_types=False) ) else: bibtex_dict = p.loads( bibtex, p.bparser.BibTexParser(ignore_nonstandard_types=False) ) result = [] for bibtex_entry in tqdm(bibtex_dict.entries): # The following line will parse string with authors in list of authors bibtex_entry = p.customization.author(bibtex_entry) bibtex_entry = p.customization.convert_to_unicode(bibtex_entry) bibtex_model = BibtexSsau(**bibtex_entry) if bibtex_model.ENTRYTYPE == Bibtex.Types.Article: result.append(self.parse_article(bibtex_model)) elif bibtex_model.ENTRYTYPE == Bibtex.Types.Proceedings: result.append(self.parse_proceedings(bibtex_model)) elif bibtex_model.ENTRYTYPE == Bibtex.Types.Website: result.append(self.parse_website(bibtex_model)) else: raise Exception(f"Unsupported bibtex type: {bibtex_model.ENTRYTYPE}") return result, self.errors
/references_parser-1.2.1-py3-none-any.whl/references_parser/parsers/ssau_parser.py
0.751557
0.215371
ssau_parser.py
pypi
[![DOI](https://joss.theoj.org/papers/10.21105/joss.01994/status.svg)](https://doi.org/10.21105/joss.01994) [![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-brightgreen.svg)](https://github.com/oschwengers/referenceseeker/blob/master/LICENSE) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/referenceseeker.svg) ![GitHub release](https://img.shields.io/github/release/oschwengers/referenceseeker.svg) ![PyPI](https://img.shields.io/pypi/v/referenceseeker.svg) ![PyPI - Status](https://img.shields.io/pypi/status/referenceseeker.svg) [![Conda](https://img.shields.io/conda/v/bioconda/referenceseeker.svg)](http://bioconda.github.io/recipes/referenceseeker/README.html) ![Python package](https://github.com/oschwengers/referenceseeker/workflows/Python%20package/badge.svg?branch=master) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4415843.svg)](https://doi.org/10.5281/zenodo.4415843) # ReferenceSeeker: rapid determination of appropriate reference genomes ## Contents - [Description](#description) - [Input & Output](#input-output) - [Installation](#installation) - [BioConda](#bioconda) - [GitHub](#github) - [Usage](#usage) - [Examples](#examples) - [Databases](#databases) - [RefSeq](#refseq-based) - [Custom](#custom-database) - [Dependencies](#dependencies) - [Citation](#citation) ## Description ReferenceSeeker determines closely related reference genomes following a scalable hierarchical approach combining an fast kmer profile-based database lookup of candidate reference genomes and subsequent computation of specific average nucleotide identity (ANI) values for the rapid determination of suitable reference genomes. ReferenceSeeker computes kmer-based genome distances between a query genome and potential reference genome candidates via Mash (Ondov et al. 2016). For resulting candidates ReferenceSeeker subsequently computes (bidirectional) ANI values picking genomes meeting community standard thresholds by default (ANI >= 95 % & conserved DNA >= 69 %) (Goris, Konstantinos et al. 2007) ranked by the product of ANI and conserved DNA values to take into account both genome coverage and identity. Custom databases can be built with local genomes. For further convenience, we provide pre-built databases with sequences from RefSeq (<https://www.ncbi.nlm.nih.gov/refseq>), GTDB and PLSDB copmrising the following taxa: - bacteria - archaea - fungi - protozoa - viruses as well as *plasmids*. The reasoning for subsequent calculations of both ANI and conserved DNA values is that Mash distance values correlate well with ANI values for closely related genomes, however the same is not true for conserved DNA values. A kmer fingerprint-based comparison alone cannot distinguish if a kmer is missing due to a SNP, for instance or a lack of the kmer-comprising subsequence. As DNA conservation (next to DNA identity) is very important for many kinds of analyses, *e.g.* reference based SNP detections, ranking potential reference genomes based on a mash distance alone is often not sufficient in order to select the most appropriate reference genomes. If desired, ANI and conserved DNA values can be computed bidirectionally. ![Mash D vs. ANI / conDNA](mash-ani-cdna.mini.png?raw=true) ## Input & Output ### Input Path to a taxon database and a draft or finished genome in (zipped) fasta format: ```bash $ referenceseeker ~/bacteria GCF_000013425.1.fna ``` ### Output Tab separated lines to STDOUT comprising the following columns: Unidirectionally (query -> references): - RefSeq Assembly ID - Mash Distance - ANI - Conserved DNA - NCBI Taxonomy ID - Assembly Status - Organism ```bash #ID Mash Distance ANI Con. DNA Taxonomy ID Assembly Status Organism GCF_000013425.1 0.00000 100.00 100.00 93061 complete Staphylococcus aureus subsp. aureus NCTC 8325 GCF_001900185.1 0.00002 100.00 99.89 46170 complete Staphylococcus aureus subsp. aureus HG001 GCF_900475245.1 0.00004 100.00 99.57 93061 complete Staphylococcus aureus subsp. aureus NCTC 8325 NCTC8325 GCF_001018725.2 0.00016 100.00 99.28 1280 complete Staphylococcus aureus FDAARGOS_10 GCF_003595465.1 0.00185 99.86 96.81 1280 complete Staphylococcus aureus USA300-SUR6 GCF_003595385.1 0.00180 99.87 96.80 1280 complete Staphylococcus aureus USA300-SUR2 GCF_003595365.1 0.00180 99.87 96.80 1280 complete Staphylococcus aureus USA300-SUR1 GCF_001956815.1 0.00180 99.87 96.80 46170 complete Staphylococcus aureus subsp. aureus USA300_SUR1 ... ``` Bidirectionally (query -> references [QR] & references -> query [RQ]): - RefSeq Assembly ID - Mash Distance - QR ANI - QR Conserved DNA - RQ ANI - RQ Conserved DNA - NCBI Taxonomy ID - Assembly Status - Organism ```bash #ID Mash Distance QR ANI QR Con. DNA RQ ANI RQ Con. DNA Taxonomy ID Assembly Status Organism GCF_000013425.1 0.00000 100.00 100.00 100.00 100.00 93061 complete Staphylococcus aureus subsp. aureus NCTC 8325 GCF_001900185.1 0.00002 100.00 99.89 100.00 99.89 46170 complete Staphylococcus aureus subsp. aureus HG001 GCF_900475245.1 0.00004 100.00 99.57 99.99 99.67 93061 complete Staphylococcus aureus subsp. aureus NCTC 8325 NCTC8325 GCF_001018725.2 0.00016 100.00 99.28 99.95 98.88 1280 complete Staphylococcus aureus FDAARGOS_10 GCF_001018915.2 0.00056 99.99 96.35 99.98 99.55 1280 complete Staphylococcus aureus NRS133 GCF_001019415.2 0.00081 99.99 94.47 99.98 99.36 1280 complete Staphylococcus aureus NRS146 GCF_001018735.2 0.00096 100.00 94.76 99.98 98.58 1280 complete Staphylococcus aureus NRS137 GCF_003354885.1 0.00103 99.93 96.63 99.93 96.66 1280 complete Staphylococcus aureus 164 ... ``` ## Installation ReferenceSeeker can be installed via Conda and Git(Hub). In either case, a taxon database must be downloaded which we provide for download at Zenodo: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3562004.svg)](https://doi.org/10.5281/zenodo.3562004) For more information have a look at [Databases](#databases). ### BioConda The preferred way to install and run ReferenceSeeker is [Conda](https://conda.io/docs/install/quick.html) using the [Bioconda](https://bioconda.github.io/) channel: ```bash $ conda install -c bioconda referenceseeker $ referenceseeker --help ``` ### GitHub Alternatively, you can use this raw GitHub repository: 1. install necessary Python dependencies (if necessary) 2. clone the latest version of the repository 3. install necessary 3rd party executables (Mash, MUMmer4) ```bash $ pip3 install --user biopython xopen $ git clone https://github.com/oschwengers/referenceseeker.git $ # install Mash & MUMmer $ ./referenceseeker/bin/referenceseeker --help ``` ### Test To test your installation we prepared a tiny mock database comprising 4 `Salmonella spp` genomes and a query assembly (SRA: SRR498276) in the `tests` directory: ```bash $ git clone https://github.com/oschwengers/referenceseeker.git # GitHub installation $ ./referenceseeker/bin/referenceseeker referenceseeker/test/db referenceseeker/test/data/Salmonella_enterica_CFSAN000189.fasta # BioConda installation $ referenceseeker referenceseeker/test/db referenceseeker/test/data/Salmonella_enterica_CFSAN000189.fasta ``` Expected output: ```bash #ID Mash Distance ANI Con. DNA Taxonomy ID Assembly Status Organism GCF_000439415.1 0.00003 100.00 99.55 1173427 complete Salmonella enterica subsp. enterica serovar Bareilly str. CFSAN000189 GCF_900205275.1 0.01522 98.61 83.13 90370 complete Salmonella enterica subsp. enterica serovar Typhi ``` ## Usage Usage: ```bash usage: referenceseeker [--crg CRG] [--ani ANI] [--conserved-dna CONSERVED_DNA] [--unfiltered] [--bidirectional] [--help] [--version] [--verbose] [--threads THREADS] <database> <genome> Rapid determination of appropriate reference genomes. positional arguments: <database> ReferenceSeeker database path <genome> target draft genome in fasta format Filter options / thresholds: These options control the filtering and alignment workflow. --crg CRG, -r CRG Max number of candidate reference genomes to pass kmer prefilter (default = 100) --ani ANI, -a ANI ANI threshold (default = 0.95) --conserved-dna CONSERVED_DNA, -c CONSERVED_DNA Conserved DNA threshold (default = 0.69) --unfiltered, -u Set kmer prefilter to extremely conservative values and skip species level ANI cutoffs (ANI >= 0.95 and conserved DNA >= 0.69 --bidirectional, -b Compute bidirectional ANI/conserved DNA values (default = False) Runtime & auxiliary options: --help, -h Show this help message and exit --version, -V show program's version number and exit --verbose, -v Print verbose information --threads THREADS, -t THREADS Number of used threads (default = number of available CPU cores) ``` ## Examples Installation: ```bash $ conda install -c bioconda referenceseeker $ wget https://zenodo.org/record/4415843/files/bacteria-refseq.tar.gz $ tar -xzf bacteria-refseq.tar.gz $ rm bacteria-refseq.tar.gz ``` Simple: ```bash $ # referenceseeker <REFERENCE_SEEKER_DB> <GENOME> $ referenceseeker bacteria-refseq/ genome.fasta ``` Expert: verbose output and increased output of candidate reference genomes using a defined number of threads: ```bash $ # referenceseeker --crg 500 --verbose --threads 8 <REFERENCE_SEEKER_DB> <GENOME> $ referenceseeker --crg 500 --verbose --threads 8 bacteria-refseq/ genome.fasta ``` ## Databases ReferenceSeeker depends on databases comprising taxonomic genome informations as well as kmer hash profiles for each entry. ### Pre-built We provide pre-built databases based on public genome data hosted at Zenodo: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4415843.svg)](https://doi.org/10.5281/zenodo.4415843) : #### RefSeq release: 205 (2021-04-01) | Taxon | URL | # Genomes | Size | | :---: | --- | ---: | :---: | | bacteria | <https://zenodo.org/record/4415843/files/bacteria-refseq.tar.gz> | 30,941 | 40 Gb | | archaea | <https://zenodo.org/record/4415843/files/archaea-refseq.tar.gz> | 606 | 553 Mb | | fungi | <https://zenodo.org/record/4415843/files/fungi-refseq.tar.gz> | 347 | 3.3 Gb | | protozoa | <https://zenodo.org/record/4415843/files/protozoa-refseq.tar.gz> | 88 | 1.1 Gb | | viruses | <https://zenodo.org/record/4415843/files/viral-refseq.tar.gz> | 10,339 | 730 Mb | #### GTDB release: v95 (2021-01-06) | Taxon | URL | # Genomes | Size | | :---: | --- | ---: | :---: | | bacteria | <https://zenodo.org/record/4415843/files/bacteria-gtdb.tar.gz> | 30,238 | 34 Gb | | archaea | <https://zenodo.org/record/4415843/files/archaea-gtdb.tar.gz> | 1,672 | 1.1 Gb | #### Plasmids In addition to the genome based databases, we provide the following plasmid databases based on RefSeq and PLSDB: | DB | URL | # Plasmids | Size | | :---: | --- | ---: | :---: | | RefSeq | <https://zenodo.org/record/4415843/files/plasmids-refseq.tar.gz> | 32,611 | 1.1 Gb | | PLSDB | <https://zenodo.org/record/4415843/files/plasmids-plsdb.tar.gz> | 27,393 | 1.1 Gb | ### Custom database If above mentiond RefSeq based databases do not contain sufficiently-close related genomes or are just too large, ReferenceSeeker provides auxiliary commands in order to either create databases from scratch or to expand existing ones. Therefore, a second executable `referenceseeker_db` accepts `init` and `import` subcommands: Usage: ```bash usage: referenceseeker_db [--help] [--version] {init,import} ... Rapid determination of appropriate reference genomes. positional arguments: {init,import} sub-command help init Initialize a new database import Add a new genome to database Runtime & auxiliary options: --help, -h Show this help message and exit --version, -V show program's version number and exit ``` If a new database should be created, use `referenceseeker_db init`: ```bash usage: referenceseeker_db init [-h] [--output OUTPUT] --db DB optional arguments: -h, --help show this help message and exit --output OUTPUT, -o OUTPUT output directory (default = current working directory) --db DB, -d DB Name of the new ReferenceSeeker database ``` This new database or an existing one can be used to import genomes in Fasta, GenBank or EMBL format: ```bash usage: referenceseeker_db import [-h] --db DB --genome GENOME [--id ID] [--taxonomy TAXONOMY] [--status {complete,chromosome,scaffold,contig}] [--organism ORGANISM] optional arguments: -h, --help show this help message and exit --db DB, -d DB ReferenceSeeker database path --genome GENOME, -g GENOME Genome path [Fasta, GenBank, EMBL] --id ID, -i ID Unique genome identifier (default sequence id of first record) --taxonomy TAXONOMY, -t TAXONOMY Taxonomy ID (default = 12908 [unclassified sequences]) --status {complete,chromosome,scaffold,contig}, -s {complete,chromosome,scaffold,contig} Assembly level (default = contig) --organism ORGANISM, -o ORGANISM Organism name (default = "NA") ``` ## Dependencies ReferenceSeeker needs the following dependencies: - Python (3.8, 3.9), Biopython (>=1.78), xopen(>=1.1.0) - Mash (2.3) <https://github.com/marbl/Mash> - MUMmer (4.0.0-beta2) <https://github.com/gmarcais/mummer> ReferenceSeeker has been tested against aforementioned versions. ## Citation > Schwengers et al., (2020). ReferenceSeeker: rapid determination of appropriate reference genomes. Journal of Open Source Software, 5(46), 1994, https://doi.org/10.21105/joss.01994
/referenceseeker-1.7.3.tar.gz/referenceseeker-1.7.3/README.md
0.414425
0.927363
README.md
pypi
from pathlib import Path from tempfile import TemporaryDirectory import os import nox ROOT = Path(__file__).parent PYPROJECT = ROOT / "pyproject.toml" DOCS = ROOT / "docs" REFERENCING = ROOT / "referencing" nox.options.sessions = [] def session(default=True, **kwargs): def _session(fn): if default: nox.options.sessions.append(kwargs.get("name", fn.__name__)) return nox.session(**kwargs)(fn) return _session @session(python=["3.8", "3.9", "3.10", "3.11", "3.12", "pypy3"]) def tests(session): session.install("-r", ROOT / "test-requirements.txt") if session.posargs and session.posargs[0] == "coverage": if len(session.posargs) > 1 and session.posargs[1] == "github": github = os.environ["GITHUB_STEP_SUMMARY"] else: github = None session.install("coverage[toml]") session.run("coverage", "run", "-m", "pytest", REFERENCING) if github is None: session.run("coverage", "report") else: with open(github, "a") as summary: summary.write("### Coverage\n\n") summary.flush() # without a flush, output seems out of order. session.run( "coverage", "report", "--format=markdown", stdout=summary, ) else: session.run("pytest", *session.posargs, REFERENCING) @session() def audit(session): session.install("pip-audit", ROOT) session.run("python", "-m", "pip_audit") @session(tags=["build"]) def build(session): session.install("build", "twine") with TemporaryDirectory() as tmpdir: session.run("python", "-m", "build", ROOT, "--outdir", tmpdir) session.run("twine", "check", "--strict", tmpdir + "/*") @session(tags=["style"]) def style(session): session.install("ruff") session.run("ruff", "check", ROOT) @session() def typing(session): session.install("pyright==1.1.307", ROOT) session.run("pyright", REFERENCING) @session(tags=["docs"]) @nox.parametrize( "builder", [ nox.param(name, id=name) for name in [ "dirhtml", "doctest", "linkcheck", "man", "spelling", ] ], ) def docs(session, builder): session.install("-r", DOCS / "requirements.txt") with TemporaryDirectory() as tmpdir_str: tmpdir = Path(tmpdir_str) argv = ["-n", "-T", "-W"] if builder != "spelling": argv += ["-q"] posargs = session.posargs or [tmpdir / builder] session.run( "python", "-m", "sphinx", "-b", builder, DOCS, *argv, *posargs, ) @session(tags=["docs", "style"], name="docs(style)") def docs_style(session): session.install( "doc8", "pygments", "pygments-github-lexers", ) session.run("python", "-m", "doc8", "--config", PYPROJECT, DOCS) @session(default=False) def requirements(session): session.install("pip-tools") for each in [DOCS / "requirements.in", ROOT / "test-requirements.in"]: session.run( "pip-compile", "--resolver", "backtracking", "-U", each.relative_to(ROOT), )
/referencing-0.30.2.tar.gz/referencing-0.30.2/noxfile.py
0.478529
0.20201
noxfile.py
pypi
============ Introduction ============ When authoring JSON documents, it is often useful to be able to reference other JSON documents, or to reference subsections of other JSON documents. This kind of JSON referencing has historically been defined by various specifications, with slightly differing behavior. The JSON Schema specifications, for instance, define :kw:`$ref` and :kw:`$dynamicRef` keywords to allow schema authors to combine multiple schemas together for reuse or deduplication as part of authoring JSON schemas. The `referencing <index>` library was written in order to provide a simple, well-behaved and well-tested implementation of JSON reference resolution in a way which can be used across multiple specifications or specification versions. Core Concepts ------------- There are 3 main objects to be aware of: * `referencing.Registry`, which represents a specific immutable set of resources (either in-memory or retrievable) * `referencing.Specification`, which represents a specific specification, such as JSON Schema Draft 7, which can have differing referencing behavior from other specifications or even versions of JSON Schema. JSON Schema-specific specifications live in the `referencing.jsonschema` module and are named like `referencing.jsonschema.DRAFT202012`. * `referencing.Resource`, which represents a specific resource (often a Python `dict`) *along* with a specific `referencing.Specification` it is to be interpreted under. As a concrete example, the simple JSON Schema ``{"type": "integer"}`` may be interpreted as a schema under either Draft 2020-12 or Draft 4 of the JSON Schema specification (amongst others); in draft 2020-12, the float ``2.0`` must be considered an integer, whereas in draft 4, it potentially is not. If you mean the former (i.e. to associate this schema with draft 2020-12), you'd use ``referencing.Resource(contents={"type": "integer"}, specification=referencing.jsonschema.DRAFT202012)``, whereas for the latter you'd use `referencing.jsonschema.DRAFT4`. A resource may be identified via one or more URIs, either because they identify themselves in a way proscribed by their specification (e.g. an :kw:`$id` keyword in suitable versions of the JSON Schema specification), or simply because you wish to externally associate a URI with the resource, regardless of a specification-specific way to refer to itself. You could add the aforementioned simple JSON Schema resource to a `referencing.Registry` by creating an empty registry and then identifying it via some URI: .. testcode:: from referencing import Registry, Resource from referencing.jsonschema import DRAFT202012 resource = Resource(contents={"type": "integer"}, specification=DRAFT202012) registry = Registry().with_resource(uri="http://example.com/my/resource", resource=resource) print(registry) .. testoutput:: <Registry (1 uncrawled resource)> .. note:: `referencing.Registry` is an entirely immutable object. All of its methods which add resources to itself return *new* registry objects containing the added resource. You could also confirm your resource is in the registry if you'd like, via `referencing.Registry.contents`, which will show you the contents of a resource at a given URI: .. testcode:: print(registry.contents("http://example.com/my/resource")) .. testoutput:: {'type': 'integer'} Populating Registries --------------------- There are a few different methods you can use to populate registries with resources. Which one you want to use depends on things like: * do you already have an instance of `referencing.Resource`, or are you creating one out of some loaded JSON? If not, does the JSON have some sort of identifier that can be used to determine which specification it belongs to (e.g. the JSON Schema :kw:`$schema` keyword)? * does your resource have an internal ID (e.g. the JSON Schema :kw:`$id` keyword)? * do you have additional (external) URIs you want to refer to the same resource as well? * do you have one resource to add or many? We'll assume for example's sake that we're dealing with JSON Schema resources for the following examples, and we'll furthermore assume you have some initial `referencing.Registry` to add them to, perhaps an empty one: .. testcode:: from referencing import Registry initial_registry = Registry() Recall that registries are immutable, so we'll be "adding" our resources by creating new registries containing the additional resource(s) we add. In the ideal case, you have a JSON Schema with an internal ID, and which also identifies itself for a specific version of JSON Schema e.g.: .. code:: json { "$id": "urn:example:my-schema", "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "integer" } If you have such a schema in some JSON text, and wish to add a resource to our registry and be able to identify it using its internal ID (``urn:example:my-schema``) you can simply use: .. testcode:: import json loaded = json.loads( """ { "$id": "urn:example:my-schema", "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "integer" } """, ) resource = Resource.from_contents(loaded) registry = resource @ initial_registry which will give you a registry with our resource added to it. Let's check by using `Registry.contents`, which takes a URI and should show us the contents of our resource: .. testcode:: print(registry.contents("urn:example:my-schema")) .. testoutput:: {'$id': 'urn:example:my-schema', '$schema': 'https://json-schema.org/draft/2020-12/schema', 'type': 'integer'} If your schema did *not* have a :kw:`$schema` keyword, you'd get an error: .. testcode:: another = json.loads( """ { "$id": "urn:example:my-second-schema", "type": "integer" } """, ) print(Resource.from_contents(another)) .. testoutput:: Traceback (most recent call last): ... referencing.exceptions.CannotDetermineSpecification: {'$id': 'urn:example:my-second-schema', 'type': 'integer'} which is telling you that the resource you've tried to create is ambiguous -- there's no way to know which version of JSON Schema you intend it to be written for. You can of course instead directly create a `Resource`, instead of using `Resource.from_contents`, which will allow you to specify which version of JSON Schema you're intending your schema to be written for: .. testcode:: import referencing.jsonschema second = Resource(contents=another, specification=referencing.jsonschema.DRAFT202012) and now of course can add it as above: .. testcode:: registry = second @ registry print(registry.contents("urn:example:my-second-schema")) .. testoutput:: {'$id': 'urn:example:my-second-schema', 'type': 'integer'} As a shorthand, you can also use `Specification.create_resource` to create a `Resource` slightly more tersely. E.g., an equivalent way to create the above resource is: .. testcode:: second_again = referencing.jsonschema.DRAFT202012.create_resource(another) print(second_again == second) .. testoutput:: True If your resource doesn't contain an :kw:`$id` keyword, you'll get a different error if you attempt to add it to a registry: .. testcode:: third = Resource( contents=json.loads("""{"type": "integer"}"""), specification=referencing.jsonschema.DRAFT202012, ) registry = third @ registry .. testoutput:: Traceback (most recent call last): ... referencing.exceptions.NoInternalID: Resource(contents={'type': 'integer'}, _specification=<Specification name='draft2020-12'>) which is now saying that there's no way to add this resource to a registry directly, as it has no ``$id`` -- you must provide whatever URI you intend to use to refer to this resource to be able to add it. You can do so using `referencing.Registry.with_resource` instead of the `@ operator <referencing.Registry.__rmatmul__>` which we have used thus far, and which takes the explicit URI you wish to use as an argument: .. testcode:: registry = registry.with_resource(uri="urn:example:my-third-schema", resource=third) which now allows us to use the URI we associated with our third resource to retrieve it: .. testcode:: print(registry.contents("urn:example:my-third-schema")) .. testoutput:: {'type': 'integer'} If you have more than one resource to add, you can use `Registry.with_resources` (with an ``s``) to add many at once, or, if they meet the criteria to use ``@``, you can use ``[one, two, three] @ registry`` to add all three resources at once. You may also want to have a look at `Registry.with_contents` for a further method to add resources to a registry without constructing a `Resource` object yourself. Dynamically Retrieving Resources -------------------------------- Sometimes one wishes to dynamically retrieve or construct `Resource`\ s which *don't* already live in-memory within a `Registry`. This might be resources retrieved dynamically from a database, from files somewhere on disk, from some arbitrary place over the internet, or from the like. We'll refer to such resources not present in-memory as *external resources*. The ``retrieve`` argument to ``Registry`` objects can be used to configure a callable which will be used anytime a requested URI is *not* present in the registry, thereby allowing you to retrieve it from whichever location it lives in. Here's an example of automatically retrieving external references by downloading them via :httpx:`httpx </>`, illustrated by then automatically retrieving one of the JSON Schema metaschemas from the network: .. code:: python from referencing import Registry, Resource import httpx def retrieve_via_httpx(uri): response = httpx.get(uri) return Resource.from_contents(response.json()) registry = Registry(retrieve=retrieve_via_httpx) resolver = registry.resolver() print(resolver.lookup("https://json-schema.org/draft/2020-12/schema")) .. note:: In the case of JSON Schema, the specifications generally discourage implementations from automatically retrieving these sorts of external resources over the network due to potential security implications. See :kw:`schema-references` in particular. `referencing` will of course therefore not do any such thing automatically, and this section generally assumes that you have personally considered the security implications for your own use case. Caching ^^^^^^^ A common concern in these situations is also to *cache* the resulting resource such that repeated lookups of the same URI do not repeatedly call your retrieval function and thereby make network calls, hit the filesystem, etc. You are of course free to use whatever caching mechanism is convenient even if it uses caching functionality entirely unrelated to this library (e.g. one specific to ``httpx`` in the above example, or one using `functools.lru_cache` internally). Nonetheless, because it is so common to retrieve a JSON string and construct a resource from it, `referencing.retrieval.to_cached_resource` is a decorator which can help. If you use it, your retrieval callable should return a `str`, not a `Resource`, as the decorator will handle deserializing your response and constructing a `Resource` from it (this is mostly because otherwise, deserialized JSON is generally not hashable if it ends up being a Python `dict`). The above example would be written: .. code:: python from referencing import Registry, Resource import httpx import referencing.retrieval @referencing.retrieval.to_cached_resource() def cached_retrieve_via_httpx(uri): return httpx.get(uri).text registry = Registry(retrieve=cached_retrieve_via_httpx) resolver = registry.resolver() print(resolver.lookup("https://json-schema.org/draft/2020-12/schema")) It is otherwise functionally equivalent to the above, other than that retrieval will not repeatedly make a web request.
/referencing-0.30.2.tar.gz/referencing-0.30.2/docs/intro.rst
0.933529
0.717612
intro.rst
pypi
import re from difflib import unified_diff from typing import Optional, Callable, TypeVar, Type, Iterable, IO from colorama.ansi import AnsiCodes, Fore, Style _ANSI_decorations = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]') def __get_diff_color(line: str) -> Optional[str]: if line.startswith('+'): return Fore.GREEN elif line.startswith('-'): return Fore.RED elif line.startswith('@'): return Fore.BLUE else: return None def decorate(input: str, *decorations: Optional[Iterable[AnsiCodes]]) -> str: if decorations is None or len(decorations) == 0: return input prefix = ''.join(map(str, filter(lambda dec: dec is not None, decorations))) return f'{prefix}{input}{Fore.RESET}{Style.RESET_ALL}' def remove_decorations(input: str) -> str: return re.sub(_ANSI_decorations, '', input) def pretty_diff(actual: str, expected: str) -> str: actual_lines = actual.replace('\n', '↵\n').splitlines() expected_lines = expected.replace('\n', '↵\n').splitlines() diff_lines = unified_diff( expected_lines, actual_lines, fromfile='got', tofile='expected', lineterm='', ) colored_lines = [ decorate(line, __get_diff_color(line)) for line in diff_lines ] return '\n'.join(colored_lines) T = TypeVar('T') def pretty_assert(name: str, actual: T, expected: T, compare: Callable[[T, T], Optional[str]], type: Type = str) -> bool: """ Execute the `compare` function on `actual` and `expected` and pretty-print a report. :param name: The name of the assertion :param actual: The actual value :param expected: The expected value :param compare: The comparison function. Takes the actual value and the expected value as parameters :param type: The type of the arguments - defaults to `str` :return: The value of the compare function applied to actual and expected """ msg = compare(actual, expected) if msg is None: return True print(f'Different {decorate(name, Style.BRIGHT, Fore.BLUE)}: \n{msg}') if type is str: print() else: print(f'expected {decorate(expected, Fore.GREEN)}' f', got {decorate(actual, Fore.RED)}') return False __print = print def print(*args, sep: Optional[str] = ' ', end: Optional[str] = '\n', file: Optional[IO] = None, flush: bool = False, decorations: Iterable[str] = ()): __print(*map(lambda arg: decorate(arg, *decorations), args), sep=sep, end=end, file=file, flush=flush)
/refery-2.0.1-py3-none-any.whl/src/prettify.py
0.77193
0.328853
prettify.py
pypi
# reffix: Fixing BibTeX reference list with DBLP API 🔧 [![reffix](https://github.com/kasnerz/reffix/actions/workflows/python-package.yml/badge.svg)](https://github.com/kasnerz/reffix/actions/workflows/python-package.yml) ![GitHub](https://img.shields.io/github/license/kasnerz/reffix) ![GitHub issues](https://img.shields.io/github/issues/kasnerz/reffix) ![PyPI](https://img.shields.io/pypi/v/reffix) ![PyPI downloads](https://img.shields.io/pypi/dm/reffix) ![Github stars](https://img.shields.io/github/stars/kasnerz/reffix?style=social) ➡️ *Reffix* is a simple tool for improving the BibTeX list of references in your paper. It can fix several common errors such as incorrect capitalization, missing URLs, or using arXiv pre-prints instead of published version. ➡️ *Reffix* queries the **[DBLP API](https://dblp.org/faq/How+to+use+the+dblp+search+API.html)**, so it does not require any local database of papers. ➡️ *Reffix* uses a conservative approach to keep your bibliography valid. ➡️ The tool is developed with NLP papers in mind, but it can be used on any BibTeX list of references containing computer science papers present on [DBLP](https://dblp.org). ## Quickstart 👉️ You can now install `reffix` from [PyPI](https://pypi.org/project/reffix/): ``` pip install -U reffix reffix [BIB_FILE] ``` See the Installation and Usage section below for more details. ## Example **Before the update (Google Scholar):** - ❎ arXiv version - ❎ no URL - ❎ capitalization lost ``` { 'ENTRYTYPE': 'article', 'ID': 'duvsek2020evaluating', 'author': 'Du{\\v{s}}ek, Ond{\\v{r}}ej and Kasner, Zden{\\v{e}}k', 'journal': 'arXiv preprint arXiv:2011.10819', 'title': 'Evaluating semantic accuracy of data-to-text generation with ' 'natural language inference', 'year': '2020' } ``` **After the update (DBLP + preserving capitalization):** - ✔️ ACL version - ✔️ URL included - ✔️ capitalization preserved ``` { 'ENTRYTYPE': 'inproceedings', 'ID': 'duvsek2020evaluating', 'author': 'Ondrej Dusek and\nZdenek Kasner', 'bibsource': 'dblp computer science bibliography, https://dblp.org', 'biburl': 'https://dblp.org/rec/conf/inlg/DusekK20.bib', 'booktitle': 'Proceedings of the 13th International Conference on Natural ' 'Language\n' 'Generation, {INLG} 2020, Dublin, Ireland, December 15-18, ' '2020', 'editor': 'Brian Davis and\n' 'Yvette Graham and\n' 'John D. Kelleher and\n' 'Yaji Sripada', 'pages': '131--137', 'publisher': 'Association for Computational Linguistics', 'timestamp': 'Mon, 03 Jan 2022 00:00:00 +0100', 'title': '{Evaluating} {Semantic} {Accuracy} of {Data-to-Text} ' '{Generation} with {Natural} {Language} {Inference}', 'url': 'https://aclanthology.org/2020.inlg-1.19/', 'year': '2020' } ``` ## Main features - **Completing references** – *reffix* queries the DBLP API with the paper title and the first author's name to find a complete reference for each entry in the BibTeX file. - **Replacing arXiv preprints** – *reffix* can try to replace arXiv pre-prints with the version published at a conference or in a journal whenever possible. - **Preserving titlecase** – in order to [preserve correct casing](https://tex.stackexchange.com/questions/10772/bibtex-loses-capitals-when-creating-bbl-file), *reffix* wraps individual uppercased words in the paper title in curly brackets. - **Conservative approach**: + the original .bib file is preserved + no references are deleted + papers are updated only if the title and at least one of the authors match + the version of the paper corresponding to the original entry should be selected first - **Interactive mode** – you can confirm every change manually. The package uses [bibtexparser](https://github.com/sciunto-org/python-bibtexparser) for parsing the BibTex files, [DBLP API](https://dblp.org/faq/How+to+use+the+dblp+search+API.html) for updating the references, and the [titlecase](https://github.com/ppannuto/python-titlecase) package for optional extra titlecasing. ## Installation You can install `reffix` from [PyPI](https://pypi.org/project/reffix/): ``` pip install reffix ``` For development, you can install the package in the editable mode: ``` pip install -e .[dev] ``` ## Usage Run the script with the .bib file as the first argument: ``` reffix [IN_BIB_FILE] ``` By default, the program will run in batch mode, save the outputs in the file with an extra ".fixed" suffix, and keep the arXiv versions. The following command will run reffix in interactive mode, save the outputs to a custom file, and replace arXiv versions: ``` reffix [IN_BIB_FILE] -o [OUT_BIB_FILE] -i -a ``` ### Flags | short | long | description | | ----- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `-o` | `--out` | Output filename. If not specified, the default filename `<original_name>.fixed.bib` is used. | | `-i` | `--interact` | Interactive mode. Every replacement of an entry with DBLP result has to be confirmed manually. | | `-a` | `--replace_arxiv` | Replace arXiv versions. If a non-arXiv version (e.g. published at a conference or in a journal) is found at DBLP, it is preferred to the arXiv version. | | `-t` | `--force_titlecase` | Force titlecase for all entries. The `titlecase` package is used to fix casing of titles which are not titlecased. (Note that the capitalizaton rules used by the package may be a bit different.) | | `-s` | `--sort_by` | Multiple sort conditions compatible with [bibtexparser.BibTexWriter](https://bibtexparser.readthedocs.io/en/master/_modules/bibtexparser/bwriter.html) applied in the provided order. Example: `-s ENTRYTYPE year` sorts the list by the entry type as its primary key and year as its secondary key. `ID` can be used to refer to the Bibtex key. The default None value keeps the original order of Bib entries. | ## Notes Although *reffix* uses a conservative approach, it provides no guarantees that the output references are actually correct. If you want to make sure that *reffix* does not introduce any unwanted changes, please use the interactive mode (flag `-i`). The tool depends on **DBLP API** which may change any time in the future. I will try to update the script if necessary, but it may still occasionally break. I welcome any pull requests with improvements. Please be considerate regarding the DBLP API and do not generate high traffic for their servers :-) ## Contact For any questions or suggestions, send an e-mail to kasner@ufal.mff.cuni.cz.
/reffix-1.1.0.tar.gz/reffix-1.1.0/README.md
0.555918
0.84759
README.md
pypi
# endregion from pathlib import Path from typing import Callable, Set, Tuple, Union from refind_btrfs.common import BtrfsLogo, constants from refind_btrfs.common.abc.commands import IconCommand from refind_btrfs.common.abc.factories import BaseLoggerFactory from refind_btrfs.common.enums import ( BtrfsLogoHorizontalAlignment, BtrfsLogoVerticalAlignment, ) from refind_btrfs.common.exceptions import RefindConfigError class PillowCommand(IconCommand): def __init__( self, logger_factory: BaseLoggerFactory, ) -> None: minimum_offset: Callable[[int], int] = lambda _: 0 medium_offset: Callable[[int], int] = lambda delta: delta // 2 maximum_offset: Callable[[int], int] = lambda delta: delta self._logger = logger_factory.logger(__name__) self._validated_icons: Set[Path] = set() self._embed_offset_initializers: dict[ Union[BtrfsLogoHorizontalAlignment, BtrfsLogoVerticalAlignment], Callable[[int], int], ] = { BtrfsLogoHorizontalAlignment.LEFT: minimum_offset, BtrfsLogoHorizontalAlignment.CENTER: medium_offset, BtrfsLogoHorizontalAlignment.RIGHT: maximum_offset, BtrfsLogoVerticalAlignment.TOP: minimum_offset, BtrfsLogoVerticalAlignment.CENTER: medium_offset, BtrfsLogoVerticalAlignment.BOTTOM: maximum_offset, } def validate_custom_icon( self, refind_config_path: Path, source_icon_path: Path, custom_icon_path: Path ) -> Path: custom_icon_absolute_path = refind_config_path.parent / custom_icon_path if not custom_icon_absolute_path.exists(): raise RefindConfigError( f"The '{custom_icon_absolute_path}' path does not exist!" ) validated_icons = self._validated_icons if custom_icon_absolute_path not in validated_icons: refind_directory = refind_config_path.parent logger = self._logger try: # pylint: disable=import-outside-toplevel from PIL import Image logger.info( "Validating the " f"'{custom_icon_absolute_path.relative_to(refind_directory)}' file." ) with Image.open(custom_icon_absolute_path, "r") as custom_icon_image: expected_formats = ["PNG", "JPEG", "BMP", "ICNS"] custom_icon_image_format = custom_icon_image.format if custom_icon_image_format not in expected_formats: raise RefindConfigError( f"The '{custom_icon_absolute_path.name}' image's " f"format ('{custom_icon_image_format}') is not supported!" ) except OSError as e: logger.exception("Image.open('r') call failed!") raise RefindConfigError( f"Could not read the '{custom_icon_absolute_path}' file!" ) from e validated_icons.add(custom_icon_absolute_path) return PillowCommand._discern_destination_icon_relative_path( refind_config_path, source_icon_path, custom_icon_absolute_path ) def embed_btrfs_logo_into_source_icon( self, refind_config_path: Path, source_icon_path: Path, btrfs_logo: BtrfsLogo ) -> Path: source_icon_absolute_path = PillowCommand._discern_source_icon_absolute_path( refind_config_path, source_icon_path ) absolute_paths = PillowCommand._discern_absolute_paths_for_btrfs_logo_embedding( refind_config_path, source_icon_absolute_path, btrfs_logo ) btrfs_logo_absolute_path = absolute_paths[0] destination_icon_absolute_path = absolute_paths[1] if not destination_icon_absolute_path.exists(): logger = self._logger refind_directory = refind_config_path.parent try: # pylint: disable=import-outside-toplevel from PIL import Image logger.info( "Embedding " f"the '{btrfs_logo_absolute_path.name}' " "logo into " f"the '{source_icon_absolute_path.relative_to(refind_directory)}' icon." ) with Image.open( btrfs_logo_absolute_path ) as btrfs_logo_image, Image.open( source_icon_absolute_path ) as source_icon_image: expected_format = "PNG" source_icon_image_format = source_icon_image.format if source_icon_image_format != expected_format: raise RefindConfigError( f"The '{source_icon_absolute_path.name}' image's " f"format ('{source_icon_image_format}') is not supported!" ) btrfs_logo_image_width = btrfs_logo_image.width source_icon_image_width = source_icon_image.width if source_icon_image_width < btrfs_logo_image_width: raise RefindConfigError( f"The '{source_icon_absolute_path.name}' image's width " f"({source_icon_image_width} px) is less than " "the selected Btrfs logo's width!" ) btrfs_logo_image_height = btrfs_logo_image.height source_icon_image_height = source_icon_image.height if source_icon_image_height < btrfs_logo_image_height: raise RefindConfigError( f"The '{source_icon_absolute_path.name}' image's height " f"({source_icon_image_height} px) is less than " "the selected Btrfs logo's height!" ) try: horizontal_alignment = btrfs_logo.horizontal_alignment x_delta = source_icon_image_width - btrfs_logo_image_width x_offset = self._embed_offset_initializers[ horizontal_alignment ](x_delta) vertical_alignment = btrfs_logo.vertical_alignment y_delta = source_icon_image_height - btrfs_logo_image_height y_offset = self._embed_offset_initializers[vertical_alignment]( y_delta ) resized_btrfs_logo_image = Image.new( btrfs_logo_image.mode, source_icon_image.size ) resized_btrfs_logo_image.paste( btrfs_logo_image, ( x_offset, y_offset, ), ) destination_icon_image = Image.alpha_composite( source_icon_image, resized_btrfs_logo_image ) destination_directory = ( refind_directory / constants.SNAPSHOT_STANZAS_DIR_NAME / constants.ICONS_DIR ) if not destination_directory.exists(): logger.info( "Creating the " f"'{destination_directory.relative_to(refind_directory)}' " "destination directory." ) destination_directory.mkdir(parents=True) logger.info( "Saving the " f"'{destination_icon_absolute_path.relative_to(refind_directory)}' " "file." ) destination_icon_image.save(destination_icon_absolute_path) except OSError as e: logger.exception("Image.save() call failed!") raise RefindConfigError( f"Could not save the '{e.filename}' file!" ) from e except OSError as e: logger.exception("Image.open('r') call failed!") raise RefindConfigError( f"Could not read the '{e.filename}' file!" ) from e return PillowCommand._discern_destination_icon_relative_path( refind_config_path, source_icon_path, destination_icon_absolute_path ) @staticmethod def _discern_source_icon_absolute_path( refind_config_path: Path, icon_path: Path ) -> Path: refind_config_path_parents = refind_config_path.parents icon_path_parts = icon_path.parts for parent in refind_config_path_parents: if parent.name not in icon_path_parts: return parent / str(icon_path).removeprefix(constants.FORWARD_SLASH) raise RefindConfigError( f"Could not discern the '{icon_path.name}' file's absolute path!" ) @staticmethod def _discern_destination_icon_relative_path( refind_config_path: Path, source_icon_path: Path, destination_icon_path: Path, ) -> Path: source_icon_path_parts = source_icon_path.parts refind_config_path_parents = refind_config_path.parents for parent in refind_config_path_parents: if parent.name not in source_icon_path_parts: return constants.ROOT_DIR / destination_icon_path.relative_to(parent) raise RefindConfigError( f"Could not discern the '{destination_icon_path.name}' file's relative path!" ) @staticmethod def _discern_absolute_paths_for_btrfs_logo_embedding( refind_config_path: Path, source_icon_absolute_path: Path, btrfs_logo: BtrfsLogo ) -> Tuple[Path, Path]: variant = btrfs_logo.variant size = btrfs_logo.size horizontal_alignment = btrfs_logo.horizontal_alignment vertical_alignment = btrfs_logo.vertical_alignment btrfs_logos_directory = constants.BTRFS_LOGOS_DIR btrfs_logo_name = f"{variant.value}_{size.value}.png" btrfs_logo_absolute_path = btrfs_logos_directory / btrfs_logo_name refind_directory = refind_config_path.parent destination_directory = ( refind_directory / constants.SNAPSHOT_STANZAS_DIR_NAME / constants.ICONS_DIR ) destination_icon_name = ( f"{source_icon_absolute_path.stem}_{btrfs_logo_absolute_path.stem}_" f"h-{horizontal_alignment.value}_v-{vertical_alignment.value}.png" ) destination_icon_absolute_path = destination_directory / destination_icon_name return (btrfs_logo_absolute_path, destination_icon_absolute_path)
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/system/pillow_command.py
0.778397
0.177722
pillow_command.py
pypi
# endregion from __future__ import annotations from pathlib import Path from typing import Optional, Self from uuid import UUID from refind_btrfs.common import constants from refind_btrfs.utility.helpers import ( find_all_matched_files_in, is_none_or_whitespace, none_throws, try_parse_int, try_parse_uuid, ) from .filesystem import Filesystem class Partition: def __init__(self, uuid: str, name: str, label: str) -> None: self._uuid = uuid self._name = name self._label = label self._part_type_code: Optional[int] = None self._part_type_uuid: Optional[UUID] = None self._filesystem: Optional[Filesystem] = None def __eq__(self, other: object) -> bool: if self is other: return True if isinstance(other, Partition): return self.uuid == other.uuid return False def __hash__(self) -> int: return hash(self.uuid) def with_part_type(self, part_type: str) -> Self: self._part_type_code = try_parse_int(part_type, base=16) self._part_type_uuid = try_parse_uuid(part_type) return self def with_filesystem(self, filesystem: Filesystem) -> Self: self._filesystem = filesystem return self def is_esp(self, uuid: UUID) -> bool: filesystem = self.filesystem if filesystem is not None: if uuid == constants.EMPTY_UUID: is_matched = ( self.part_type_code == constants.ESP_PART_TYPE_CODE or self.part_type_uuid == constants.ESP_PART_TYPE_UUID ) else: parsed_uuid = try_parse_uuid(self.uuid) is_matched = uuid == parsed_uuid return ( is_matched and filesystem.is_mounted() and filesystem.is_of_type(constants.ESP_FS_TYPE) ) return False def is_root(self) -> bool: filesystem = self.filesystem if filesystem is not None: directory = constants.ROOT_DIR return filesystem.is_mounted_at(directory) return False def is_boot(self) -> bool: filesystem = self.filesystem if filesystem is not None: directory = constants.ROOT_DIR / constants.BOOT_DIR return filesystem.is_mounted_at(directory) return False def search_paths_for(self, filename: str) -> Optional[list[Path]]: if is_none_or_whitespace(filename): raise ValueError("The 'filename' parameter must be initialized!") filesystem = none_throws(self.filesystem) if filesystem.is_mounted(): search_directory = Path(filesystem.mount_point) all_matches = find_all_matched_files_in(search_directory, filename) return list(all_matches) return None @property def uuid(self) -> str: return self._uuid @property def name(self) -> str: return self._name @property def label(self) -> str: return self._label @property def part_type_code(self) -> Optional[int]: return self._part_type_code @property def part_type_uuid(self) -> Optional[UUID]: return self._part_type_uuid @property def filesystem(self) -> Optional[Filesystem]: return self._filesystem
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/device/partition.py
0.823648
0.154695
partition.py
pypi
# endregion import re from refind_btrfs.common import constants from refind_btrfs.common.exceptions import PartitionError from refind_btrfs.utility.helpers import ( checked_cast, has_items, is_none_or_whitespace, try_parse_int, ) from .subvolume import Subvolume class MountOptions: def __init__(self, raw_mount_options: str) -> None: split_mount_options = [ option.strip() for option in raw_mount_options.split(constants.COLUMN_SEPARATOR) ] simple_options: list[tuple[int, str]] = [] parameterized_options: dict[str, tuple[int, str]] = {} parameterized_option_prefix_pattern = re.compile( constants.PARAMETERIZED_OPTION_PREFIX_PATTERN ) for position, option in enumerate(split_mount_options): if not is_none_or_whitespace(option): if parameterized_option_prefix_pattern.match(option): split_parameterized_option = option.split( constants.PARAMETERIZED_OPTION_SEPARATOR ) option_name = checked_cast(str, split_parameterized_option[0]) option_value = checked_cast(str, split_parameterized_option[1]) if option_name in parameterized_options: raise PartitionError( f"The '{option_name}' mount option " f"cannot be defined multiple times!" ) parameterized_options[option_name] = (position, option_value) else: simple_options.append((position, option)) self._simple_options = simple_options self._parameterized_options = parameterized_options def __str__(self) -> str: simple_options = self._simple_options parameterized_options = self._parameterized_options result: list[str] = [constants.EMPTY_STR] * sum( (len(simple_options), len(parameterized_options)) ) if has_items(simple_options): for simple_option in simple_options: result[simple_option[0]] = simple_option[1] if has_items(parameterized_options): for option_name, option_value in parameterized_options.items(): result[option_value[0]] = constants.PARAMETERIZED_OPTION_SEPARATOR.join( (option_name, option_value[1]) ) if has_items(result): return constants.COLUMN_SEPARATOR.join(result) return constants.EMPTY_STR def is_matched_with(self, subvolume: Subvolume) -> bool: parameterized_options = self._parameterized_options subvol_tuple = parameterized_options.get(constants.SUBVOL_OPTION) subvolid_tuple = parameterized_options.get(constants.SUBVOLID_OPTION) subvol_matched = False subvolid_matched = False if subvol_tuple is not None: subvol_value = subvol_tuple[1] logical_path = subvolume.logical_path subvol_prefix_pattern = re.compile(f"^{constants.DIR_SEPARATOR_PATTERN}") subvol_matched = subvol_prefix_pattern.sub( constants.EMPTY_STR, subvol_value ) == subvol_prefix_pattern.sub(constants.EMPTY_STR, logical_path) if subvolid_tuple is not None: subvolid_value = subvolid_tuple[1] num_id = subvolume.num_id subvolid_matched = try_parse_int(subvolid_value) == num_id return subvol_matched or subvolid_matched def migrate_from_to( self, source_subvolume: Subvolume, destination_subvolume: Subvolume ) -> None: if not self.is_matched_with(source_subvolume): raise PartitionError( "The mount options are not matched with the " "'source_subvolume' parameter (by 'subvol' or 'subvolid')!" ) parameterized_options = self._parameterized_options subvol_tuple = parameterized_options.get(constants.SUBVOL_OPTION) subvolid_tuple = parameterized_options.get(constants.SUBVOLID_OPTION) if subvol_tuple is not None: subvol_value = subvol_tuple[1] source_logical_path = source_subvolume.logical_path destination_logical_path = destination_subvolume.logical_path subvol_pattern = re.compile( rf"(?P<prefix>^{constants.DIR_SEPARATOR_PATTERN}?){source_logical_path}$" ) parameterized_options[constants.SUBVOL_OPTION] = ( subvol_tuple[0], subvol_pattern.sub( rf"\g<prefix>{destination_logical_path}", subvol_value ), ) if subvolid_tuple is not None: num_id = destination_subvolume.num_id parameterized_options[constants.SUBVOLID_OPTION] = ( subvolid_tuple[0], str(num_id), ) @property def simple_options(self) -> list[str]: return [simple_option[1] for simple_option in self._simple_options] @property def parameterized_options(self) -> dict[str, str]: return { option_name: option_value[1] for option_name, option_value in self._parameterized_options.items() }
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/device/mount_options.py
0.533397
0.183045
mount_options.py
pypi
# endregion from __future__ import annotations from copy import deepcopy from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Iterable, NamedTuple, Optional, Self, Set from uuid import UUID from more_itertools import take from refind_btrfs.common import BootFilesCheckResult, constants from refind_btrfs.common.abc.factories import BaseDeviceCommandFactory from refind_btrfs.common.enums import PathRelation from refind_btrfs.common.exceptions import SubvolumeError from refind_btrfs.utility.helpers import ( discern_path_relation_of, has_items, is_none_or_whitespace, none_throws, replace_root_part_in, ) if TYPE_CHECKING: from refind_btrfs.boot import BootStanza from .partition_table import PartitionTable class NumIdRelation(NamedTuple): self_id: int parent_id: int class UuidRelation(NamedTuple): self_uuid: UUID parent_uuid: UUID class Subvolume: def __init__( self, filesystem_path: Path, logical_path: str, time_created: datetime, uuid_relation: UuidRelation, num_id_relation: NumIdRelation, is_read_only: bool, ) -> None: self._name: Optional[str] = None self._filesystem_path = filesystem_path self._logical_path = logical_path self._time_created = time_created self._uuid = uuid_relation.self_uuid self._parent_uuid = uuid_relation.parent_uuid self._num_id = num_id_relation.self_id self._parent_num_id = num_id_relation.parent_id self._is_read_only = is_read_only self._created_from: Optional[Subvolume] = None self._static_partition_table: Optional[PartitionTable] = None self._boot_files_check_result: Optional[BootFilesCheckResult] = None self._snapshots: Optional[Set[Subvolume]] = None def __eq__(self, other: object) -> bool: if self is other: return True if isinstance(other, Subvolume): return self.uuid == other.uuid return False def __hash__(self) -> int: return hash(self.uuid) def __lt__(self, other: object) -> bool: if isinstance(other, Subvolume): attributes_for_comparison = [ none_throws(subvolume.created_from).time_created if subvolume.is_newly_created() else subvolume.time_created for subvolume in (self, other) ] return attributes_for_comparison[0] < attributes_for_comparison[1] return False def with_boot_files_check_result(self, boot_stanza: BootStanza) -> Self: boot_stanza_check_result = boot_stanza.boot_files_check_result if boot_stanza_check_result is not None: self_filesystem_path_str = str(self.filesystem_path) self_logical_path = self.logical_path boot_stanza_name = boot_stanza_check_result.required_by_boot_stanza_name expected_logical_path = boot_stanza_check_result.expected_logical_path required_file_paths = boot_stanza_check_result.matched_boot_files matched_boot_files: list[str] = [] unmatched_boot_files: list[str] = [] for file_path in required_file_paths: replaced_file_path = Path( replace_root_part_in( file_path, expected_logical_path, self_filesystem_path_str ) ) append_func = ( matched_boot_files.append if replaced_file_path.exists() else unmatched_boot_files.append ) append_func(replaced_file_path.name) self._boot_files_check_result = BootFilesCheckResult( boot_stanza_name, self_logical_path, matched_boot_files, unmatched_boot_files, ) return self def with_snapshots(self, snapshots: Iterable[Subvolume]) -> Self: self._snapshots = set(snapshots) return self def as_named(self) -> Self: type_prefix = "ro" if self.is_read_only else "rw" type_prefix += "snap" if self.is_snapshot() else "subvol" if self.is_newly_created(): created_from = none_throws(self.created_from) time_created = created_from.time_created num_id = created_from.num_id else: time_created = self.time_created num_id = self.num_id formatted_time_created = time_created.strftime("%Y-%m-%d_%H-%M-%S") self._name = f"{type_prefix}_{formatted_time_created}_ID{num_id}" return self def as_located_in(self, parent_directory: Path) -> Self: if not self.is_named(): raise ValueError("The '_name' attribute must be initialized!") name = none_throws(self.name) self._filesystem_path = parent_directory / name return self def as_writable(self) -> Self: self._is_read_only = False return self def as_newly_created_from(self, other: Subvolume) -> Self: self._created_from = other if other.has_static_partition_table(): self._static_partition_table = deepcopy( none_throws(other.static_partition_table) ) return self def to_destination(self, directory: Path) -> Self: return ( Subvolume( constants.EMPTY_PATH, constants.EMPTY_STR, datetime.min, UuidRelation(constants.EMPTY_UUID, self.uuid), NumIdRelation(0, self.num_id), False, ) .as_newly_created_from(self) .as_named() .as_located_in(directory) ) def initialize_partition_table_using( self, device_command_factory: BaseDeviceCommandFactory ) -> None: if not self.has_static_partition_table(): static_device_command = device_command_factory.static_device_command() self._static_partition_table = ( static_device_command.get_partition_table_for(self) ) def is_named(self) -> bool: return not is_none_or_whitespace(self.name) def is_snapshot(self) -> bool: return self.parent_uuid != constants.EMPTY_UUID def is_snapshot_of(self, subvolume: Subvolume) -> bool: return self.is_snapshot() and self.parent_uuid == subvolume.uuid def is_located_in(self, parent_directory: Path) -> bool: if self.is_newly_created(): created_from = none_throws(self.created_from) filesystem_path = created_from.filesystem_path else: filesystem_path = self.filesystem_path path_relation = discern_path_relation_of((parent_directory, filesystem_path)) expected_results: list[PathRelation] = [ PathRelation.SAME, PathRelation.SECOND_NESTED_IN_FIRST, ] return path_relation in expected_results def is_newly_created(self) -> bool: return self.created_from is not None def is_static_partition_table_matched_with(self, subvolume: Subvolume) -> bool: if self.has_static_partition_table(): static_partition_table = none_throws(self.static_partition_table) return static_partition_table.is_matched_with(subvolume) return False def has_static_partition_table(self) -> bool: return self.static_partition_table is not None def has_unmatched_boot_files(self) -> bool: boot_files_check_result = self.boot_files_check_result if boot_files_check_result is not None: return boot_files_check_result.has_unmatched_boot_files() return False def has_snapshots(self) -> bool: return has_items(self.snapshots) def can_be_added(self, comparison_iterable: Iterable[Subvolume]) -> bool: if self not in comparison_iterable: return not any( subvolume.is_newly_created() and subvolume.is_snapshot_of(self) for subvolume in comparison_iterable ) return False def can_be_removed( self, parent_directory: Path, comparison_iterable: Iterable[Subvolume] ) -> bool: if self not in comparison_iterable: if self.is_newly_created() or self.is_located_in(parent_directory): return not any( self.is_snapshot_of(subvolume) for subvolume in comparison_iterable ) return True return False def select_snapshots(self, count: int) -> Optional[list[Subvolume]]: if self.has_snapshots(): snapshots = none_throws(self.snapshots) return take(count, sorted(snapshots, reverse=True)) return None def modify_partition_table_using( self, source_subvolume: Subvolume, device_command_factory: BaseDeviceCommandFactory, ) -> None: self.initialize_partition_table_using(device_command_factory) static_partition_table = none_throws(self.static_partition_table) if not static_partition_table.is_matched_with(self): static_device_command = device_command_factory.static_device_command() static_partition_table.migrate_from_to(source_subvolume, self) static_device_command.save_partition_table(static_partition_table) def validate_static_partition_table(self, subvolume: Subvolume) -> None: logical_path = self.logical_path if not self.has_static_partition_table(): raise SubvolumeError( f"The '{logical_path}' subvolume's static " "partition table is not initialized!" ) static_partition_table = none_throws(self.static_partition_table) root = static_partition_table.root if root is None: raise SubvolumeError( f"Could not find the root partition in the '{logical_path}' " "subvolume's static partition table!" ) if not static_partition_table.is_matched_with(subvolume): raise SubvolumeError( f"The '{logical_path}' subvolume's static partition table is not " "matched with the root subvolume (by 'subvol' or 'subvolid')!" ) def validate_boot_files_check_result(self) -> None: if self.has_unmatched_boot_files(): boot_files_check_result = none_throws(self.boot_files_check_result) boot_stanza_name = boot_files_check_result.required_by_boot_stanza_name logical_path = boot_files_check_result.expected_logical_path unmatched_boot_files = boot_files_check_result.unmatched_boot_files raise SubvolumeError( f"Detected boot files required by the '{boot_stanza_name}' boot " f"stanza which do not exist in the '{logical_path}' subvolume: " f"{constants.DEFAULT_ITEMS_SEPARATOR.join(unmatched_boot_files)}!" ) @property def name(self) -> Optional[str]: return self._name @property def filesystem_path(self) -> Path: return self._filesystem_path @property def logical_path(self) -> str: return self._logical_path @property def time_created(self) -> datetime: return self._time_created @property def uuid(self) -> UUID: return self._uuid @property def parent_uuid(self) -> UUID: return self._parent_uuid @property def num_id(self) -> int: return self._num_id @property def parent_num_id(self) -> int: return self._parent_num_id @property def is_read_only(self) -> bool: return self._is_read_only @property def created_from(self) -> Optional[Subvolume]: return self._created_from @property def static_partition_table(self) -> Optional[PartitionTable]: return self._static_partition_table @property def boot_files_check_result(self) -> Optional[BootFilesCheckResult]: return self._boot_files_check_result @property def snapshots(self) -> Optional[Set[Subvolume]]: return self._snapshots
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/device/subvolume.py
0.770422
0.155078
subvolume.py
pypi
# endregion from __future__ import annotations import re from functools import cached_property from pathlib import Path from typing import Iterable, Optional, Self from uuid import UUID from more_itertools import only, take from refind_btrfs.common import constants from refind_btrfs.common.enums import FstabColumn from refind_btrfs.utility.helpers import has_items, is_none_or_whitespace, none_throws from .partition import Partition from .subvolume import Subvolume class PartitionTable: def __init__(self, uuid: str, pt_type: str) -> None: self._uuid = uuid self._pt_type = pt_type self._esp_uuid = constants.EMPTY_UUID self._fstab_file_path: Optional[Path] = None self._partitions: Optional[list[Partition]] = None def __eq__(self, other: object) -> bool: if self is other: return True if isinstance(other, PartitionTable): return self.uuid == other.uuid return False def __hash__(self) -> int: return hash(self.uuid) def with_esp_uuid(self, esp_uuid: UUID) -> Self: self._esp_uuid = esp_uuid return self def with_fstab_file_path(self, fstab_file_path: Path) -> Self: self._fstab_file_path = fstab_file_path return self def with_partitions(self, partitions: Iterable[Partition]) -> Self: self._partitions = list(partitions) return self def is_matched_with(self, subvolume: Subvolume) -> bool: root = self.root if root is not None: filesystem = none_throws(root.filesystem) mount_options = none_throws(filesystem.mount_options) return mount_options.is_matched_with(subvolume) return False def has_partitions(self) -> bool: return has_items(self.partitions) def migrate_from_to( self, source_subvolume: Subvolume, destination_subvolume: Subvolume ) -> None: root = none_throws(self.root) filesystem = none_throws(root.filesystem) mount_options = none_throws(filesystem.mount_options) destination_filesystem_path = destination_subvolume.filesystem_path mount_options.migrate_from_to(source_subvolume, destination_subvolume) self._fstab_file_path = destination_filesystem_path / constants.FSTAB_FILE def transform_fstab_line(self, fstab_line: str) -> str: if PartitionTable.is_valid_fstab_entry(fstab_line): root = none_throws(self.root) filesystem = none_throws(root.filesystem) root_mount_point = filesystem.mount_point split_fstab_entry = fstab_line.split() fstab_mount_point = split_fstab_entry[FstabColumn.FS_MOUNT_POINT.value] if root_mount_point == fstab_mount_point: fstab_mount_options = split_fstab_entry[ FstabColumn.FS_MOUNT_OPTIONS.value ] pattern = re.compile( r"(?P<whitespace_before>\s+)" f"{fstab_mount_options}" r"(?P<whitespace_after>\s+)" ) root_mount_options = str(filesystem.mount_options) return pattern.sub( r"\g<whitespace_before>" f"{root_mount_options}" r"\g<whitespace_after>", fstab_line, ) return fstab_line @staticmethod def is_valid_fstab_entry(value: Optional[str]) -> bool: if is_none_or_whitespace(value): return False fstab_line = none_throws(value) comment_pattern = re.compile(r"^\s*#.*") if not comment_pattern.match(fstab_line): columns_count = len(FstabColumn) split_fstab_entry = take(columns_count, fstab_line.split()) return ( has_items(split_fstab_entry) and len(split_fstab_entry) == columns_count ) return False @property def uuid(self) -> str: return self._uuid @property def pt_type(self) -> str: return self._pt_type @property def esp_uuid(self) -> UUID: return self._esp_uuid @property def fstab_file_path(self) -> Optional[Path]: return self._fstab_file_path @property def partitions(self) -> Optional[list[Partition]]: return self._partitions @cached_property def esp(self) -> Optional[Partition]: if self.has_partitions(): return only( partition for partition in none_throws(self.partitions) if partition.is_esp(self.esp_uuid) ) return None @cached_property def root(self) -> Optional[Partition]: if self.has_partitions(): return only( partition for partition in none_throws(self.partitions) if partition.is_root() ) return None @cached_property def boot(self) -> Optional[Partition]: if self.has_partitions(): return only( partition for partition in none_throws(self.partitions) if partition.is_boot() ) return None
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/device/partition_table.py
0.826046
0.185984
partition_table.py
pypi
# endregion from __future__ import annotations import re from typing import Iterable, Optional, Self, Union from refind_btrfs.common.abc.factories import BaseDeviceCommandFactory from refind_btrfs.utility.helpers import has_items, none_throws from .partition import Partition from .partition_table import PartitionTable class BlockDevice: def __init__(self, name: str, d_type: str, major_minor: str) -> None: self._name = name self._d_type = d_type major_minor_parsed = BlockDevice.try_parse_major_minor(major_minor) self._major_number = major_minor_parsed[0] self._minor_number = major_minor_parsed[1] self._physical_partition_table: Optional[PartitionTable] = None self._live_partition_table: Optional[PartitionTable] = None self._dependencies: Optional[list[BlockDevice]] = None def with_dependencies(self, dependencies: Iterable[BlockDevice]) -> Self: self._dependencies = list(dependencies) return self def initialize_partition_tables_using( self, device_command_factory: BaseDeviceCommandFactory, ) -> None: if not self.has_physical_partition_table(): physical_device_command = device_command_factory.physical_device_command() self._physical_partition_table = ( physical_device_command.get_partition_table_for(self) ) if not self.has_live_partition_table(): live_device_command = device_command_factory.live_device_command() self._live_partition_table = live_device_command.get_partition_table_for( self ) def is_matched_with(self, name: str) -> bool: if self.name == name: return True else: dependencies = self.dependencies if has_items(dependencies): return any( dependency.is_matched_with(name) for dependency in none_throws(dependencies) ) return False def has_physical_partition_table(self) -> bool: return self.physical_partition_table is not None def has_live_partition_table(self) -> bool: return self.live_partition_table is not None def has_esp(self) -> bool: return self.esp is not None def has_root(self) -> bool: return self.root is not None def has_boot(self) -> bool: return self.boot is not None @staticmethod def try_parse_major_minor(value: str) -> Union[list[int], list[None]]: match = re.fullmatch(r"\d+:\d+", value) if match: return [int(split_number) for split_number in match.group().split(":")] return [None, None] @property def name(self) -> str: return self._name @property def d_type(self) -> str: return self._d_type @property def major_number(self) -> Optional[int]: return self._major_number @property def minor_number(self) -> Optional[int]: return self._minor_number @property def physical_partition_table( self, ) -> Optional[PartitionTable]: return self._physical_partition_table @property def live_partition_table( self, ) -> Optional[PartitionTable]: return self._live_partition_table @property def dependencies(self) -> Optional[list[BlockDevice]]: return self._dependencies @property def esp(self) -> Optional[Partition]: if self.has_physical_partition_table(): return none_throws(self.physical_partition_table).esp return None @property def root(self) -> Optional[Partition]: if self.has_live_partition_table(): return none_throws(self.live_partition_table).root return None @property def boot(self) -> Optional[Partition]: if self.has_live_partition_table(): return none_throws(self.live_partition_table).boot return None
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/device/block_device.py
0.852537
0.183923
block_device.py
pypi
# endregion from __future__ import annotations from pathlib import Path from typing import Optional, Self from refind_btrfs.common.abc.factories import BaseSubvolumeCommandFactory from refind_btrfs.utility.helpers import is_none_or_whitespace from .mount_options import MountOptions from .subvolume import Subvolume class Filesystem: def __init__(self, uuid: str, label: str, fs_type: str, mount_point: str) -> None: self._uuid = uuid self._label = label self._fs_type = fs_type self._mount_point = mount_point self._dump: Optional[int] = None self._fsck: Optional[int] = None self._mount_options: Optional[MountOptions] = None self._subvolume: Optional[Subvolume] = None def with_dump_and_fsck(self, dump: int, fsck: int) -> Self: self._dump = dump self._fsck = fsck return self def with_mount_options(self, raw_mount_options: str) -> Self: self._mount_options = ( MountOptions(raw_mount_options) if not is_none_or_whitespace(raw_mount_options) else None ) return self def initialize_subvolume_using( self, subvolume_command_factory: BaseSubvolumeCommandFactory ) -> None: if not self.has_subvolume(): filesystem_path = Path(self.mount_point) subvolume_command = subvolume_command_factory.subvolume_command() subvolume = subvolume_command.get_subvolume_from(filesystem_path) if subvolume is not None: snapshots = subvolume_command.get_all_source_snapshots_for(subvolume) self._subvolume = subvolume.with_snapshots(snapshots) def is_of_type(self, fs_type: str) -> bool: return self.fs_type == fs_type def is_mounted(self) -> bool: return not is_none_or_whitespace(self.mount_point) def is_mounted_at(self, path: Path) -> bool: return self.is_mounted() and Path(self.mount_point) == path def has_subvolume(self) -> bool: return self.subvolume is not None @property def uuid(self) -> str: return self._uuid @property def label(self) -> str: return self._label @property def fs_type(self) -> str: return self._fs_type @property def mount_point(self) -> str: return self._mount_point @property def dump(self) -> Optional[int]: return self._dump @property def fsck(self) -> Optional[int]: return self._fsck @property def mount_options(self) -> Optional[MountOptions]: return self._mount_options @property def subvolume(self) -> Optional[Subvolume]: return self._subvolume
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/device/filesystem.py
0.874158
0.180395
filesystem.py
pypi
# endregion import errno import os import re from enum import Enum from inspect import ismethod from pathlib import Path from typing import Any, Iterator, Optional, Sized, Type, TypeVar, cast from uuid import UUID from more_itertools import first from typeguard import check_type from refind_btrfs.common import constants from refind_btrfs.common.enums import PathRelation TParam = TypeVar("TParam") def check_access_rights() -> None: if os.getuid() != constants.ROOT_UID: error_code = errno.EPERM raise PermissionError(error_code, os.strerror(error_code)) def try_parse_int(value: str, base: int = 10) -> Optional[int]: try: return int(value, base) except ValueError: return None def try_parse_uuid(value: str) -> Optional[UUID]: try: return UUID(hex=value) except ValueError: return None def try_convert_str_to_enum(value: str, enum_type: Type[Enum]) -> Optional[Enum]: upper_value = value.upper() member_names = [member.name for member in enum_type] if upper_value in member_names: return enum_type[upper_value] return None def try_convert_bytes_to_uuid(value: bytes) -> Optional[UUID]: try: return UUID(bytes=value) except ValueError: return None def is_empty(value: Optional[str]) -> bool: if value is None: return False return value == constants.EMPTY_STR def is_none_or_whitespace(value: Optional[str]) -> bool: if value is None: return True return is_empty(value) or value.isspace() def strip_quotes(value: Optional[str]) -> str: if not is_none_or_whitespace(value): return ( none_throws(value) .strip(constants.SINGLE_QUOTE) .strip(constants.DOUBLE_QUOTE) ) return constants.EMPTY_STR def has_method(obj: Any, method_name: str) -> bool: if hasattr(obj, method_name): attr = getattr(obj, method_name) return ismethod(attr) return False def has_items(value: Optional[Sized]) -> bool: return value is not None and len(value) > 0 def is_singleton(value: Optional[Sized]) -> bool: return value is not None and len(value) == 1 def item_count_suffix(value: Sized) -> str: assert has_items( value ), "The 'value' parameter must be initialized and contain least one item!" return constants.EMPTY_STR if is_singleton(value) else "s" def find_all_matched_files_in(root_directory: Path, filename: str) -> Iterator[Path]: if root_directory.exists() and root_directory.is_dir(): children = root_directory.iterdir() for child in children: if child.is_file(): if child.name == filename: yield child elif child.is_dir(): yield from find_all_matched_files_in(child, filename) def find_all_directories_in( root_directory: Path, max_depth: int, current_depth: int = 0 ) -> Iterator[Path]: if current_depth > max_depth: return if root_directory.exists() and root_directory.is_dir(): yield root_directory.resolve() subdirectories = (child for child in root_directory.iterdir() if child.is_dir()) for subdirectory in subdirectories: yield from find_all_directories_in( subdirectory, max_depth, current_depth + 1 ) def discern_path_relation_of(path_pair: tuple[Path, Path]) -> PathRelation: first_resolved = path_pair[0].resolve() second_resolved = path_pair[1].resolve() if first_resolved == second_resolved: return PathRelation.SAME first_parents = first_resolved.parents if second_resolved in first_parents: return PathRelation.FIRST_NESTED_IN_SECOND second_parents = second_resolved.parents if first_resolved in second_parents: return PathRelation.SECOND_NESTED_IN_FIRST return PathRelation.UNRELATED def discern_distance_between(path_pair: tuple[Path, Path]) -> Optional[int]: path_relation = discern_path_relation_of(path_pair) if path_relation != PathRelation.UNRELATED: distance = 0 if path_relation != PathRelation.SAME: if path_relation == PathRelation.FIRST_NESTED_IN_SECOND: first_parts = path_pair[0].parts second_stem = path_pair[1].stem for part in reversed(first_parts): if part != second_stem: distance += 1 else: break elif path_relation == PathRelation.SECOND_NESTED_IN_FIRST: first_stem = path_pair[0].stem second_parts = path_pair[1].parts for part in reversed(second_parts): if part != first_stem: distance += 1 else: break return distance return None def normalize_dir_separators_in( path: str, separator_replacement: tuple[ str, str ] = constants.DEFAULT_DIR_SEPARATOR_REPLACEMENT, ) -> str: path_with_replaced_separators = path.replace(*separator_replacement) pattern = re.compile(rf"(?P<prefix>^({constants.DIR_SEPARATOR_PATTERN}){{2,}})") match = pattern.match(path_with_replaced_separators) if match: prefix = match.group("prefix") path_with_replaced_separators = path_with_replaced_separators.removeprefix( first(prefix) * (len(prefix) - 1) ) return path_with_replaced_separators def replace_root_part_in( full_path: str, current_root_part: str, replacement_root_part: str, separator_replacement: tuple[ str, str ] = constants.DEFAULT_DIR_SEPARATOR_REPLACEMENT, ) -> str: pattern = re.compile( rf"(?P<prefix>^{constants.DIR_SEPARATOR_PATTERN}?)" f"{current_root_part}" rf"(?P<suffix>{constants.DIR_SEPARATOR_PATTERN})" ) substituted_full_path = pattern.sub( rf"\g<prefix>{replacement_root_part}\g<suffix>", full_path ) return normalize_dir_separators_in(substituted_full_path, separator_replacement) def replace_item_in( items_list: list[TParam], current: TParam, replacement: Optional[TParam] = None ) -> None: if not has_items(items_list): return if current in items_list: index = items_list.index(current) if replacement is None: replacement = current items_list[index] = replacement def none_throws(value: Optional[TParam], message: str = "Unexpected 'None'") -> TParam: if value is None: raise AssertionError(message) return value def default_if_none(value: Optional[TParam], default: TParam) -> TParam: if value is None: return default return value def checked_cast(destination_type: Type[TParam], value: Any) -> TParam: check_type(value, destination_type) return cast(TParam, value)
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/utility/helpers.py
0.627723
0.287518
helpers.py
pypi
# endregion from __future__ import annotations from typing import Iterable, Optional, Self from more_itertools import last from refind_btrfs.common import constants from refind_btrfs.common.exceptions import RefindConfigError from refind_btrfs.device import BlockDevice, MountOptions, Subvolume from refind_btrfs.utility.helpers import ( has_items, is_none_or_whitespace, none_throws, replace_root_part_in, strip_quotes, ) class BootOptions: def __init__(self, raw_options: Optional[str]) -> None: root_location: Optional[tuple[int, str]] = None root_mount_options: Optional[tuple[int, MountOptions]] = None initrd_options: list[tuple[int, str]] = [] other_options: list[tuple[int, str]] = [] if not is_none_or_whitespace(raw_options): split_options = strip_quotes(raw_options).split() for position, option in enumerate(split_options): if not is_none_or_whitespace(option): if option.startswith(constants.ROOT_PREFIX): normalized_option = option.removeprefix(constants.ROOT_PREFIX) if root_location is not None: root_option = constants.ROOT_PREFIX.rstrip( constants.PARAMETERIZED_OPTION_SEPARATOR ) raise RefindConfigError( f"The '{root_option}' boot option " f"cannot be defined multiple times!" ) root_location = (position, normalized_option) elif option.startswith(constants.ROOTFLAGS_PREFIX): normalized_option = option.removeprefix( constants.ROOTFLAGS_PREFIX ) if root_mount_options is not None: rootflags_option = constants.ROOTFLAGS_PREFIX.rstrip( constants.PARAMETERIZED_OPTION_SEPARATOR ) raise RefindConfigError( f"The '{rootflags_option}' boot option " f"cannot be defined multiple times!" ) root_mount_options = (position, MountOptions(normalized_option)) elif option.startswith(constants.INITRD_PREFIX): normalized_option = option.removeprefix(constants.INITRD_PREFIX) initrd_options.append((position, normalized_option)) else: other_options.append((position, option)) self._root_location = root_location self._root_mount_options = root_mount_options self._initrd_options = initrd_options self._other_options = other_options def __str__(self) -> str: root_location = self._root_location root_mount_options = self._root_mount_options initrd_options = self._initrd_options other_options = self._other_options result: list[str] = [constants.EMPTY_STR] * ( sum((len(initrd_options), len(other_options))) + (1 if root_location is not None else 0) + (1 if root_mount_options is not None else 0) ) if root_location is not None: result[root_location[0]] = constants.ROOT_PREFIX + root_location[1] if root_mount_options is not None: result[root_mount_options[0]] = constants.ROOTFLAGS_PREFIX + str( root_mount_options[1] ) if has_items(initrd_options): for initrd_option in initrd_options: result[initrd_option[0]] = constants.INITRD_PREFIX + initrd_option[1] if has_items(other_options): for other_option in other_options: result[other_option[0]] = other_option[1] if has_items(result): joined_options = constants.BOOT_OPTION_SEPARATOR.join(result) return constants.DOUBLE_QUOTE + joined_options + constants.DOUBLE_QUOTE return constants.EMPTY_STR def is_matched_with(self, block_device: BlockDevice) -> bool: if block_device.has_root(): root_location = self.root_location if root_location is not None: root_partition = none_throws(block_device.root) filesystem = none_throws(root_partition.filesystem) normalized_root_location = last( strip_quotes(root_location).split( constants.PARAMETERIZED_OPTION_SEPARATOR ) ) root_location_comparers = [ root_partition.label, root_partition.uuid, filesystem.label, filesystem.uuid, ] if ( normalized_root_location in root_location_comparers or block_device.is_matched_with(normalized_root_location) ): root_mount_options = self.root_mount_options subvolume = none_throws(filesystem.subvolume) return ( root_mount_options.is_matched_with(subvolume) if root_mount_options is not None else False ) return False def migrate_from_to( self, source_subvolume: Subvolume, destination_subvolume: Subvolume, include_paths: bool, ) -> None: root_mount_options = self.root_mount_options if root_mount_options is not None: root_mount_options.migrate_from_to(source_subvolume, destination_subvolume) if include_paths: initrd_options = self._initrd_options if has_items(initrd_options): source_logical_path = source_subvolume.logical_path destination_logical_path = destination_subvolume.logical_path self._initrd_options = [ ( initrd_option[0], replace_root_part_in( initrd_option[1], source_logical_path, destination_logical_path, ( constants.FORWARD_SLASH, constants.BACKSLASH, ), ), ) for initrd_option in initrd_options ] @classmethod def merge(cls, all_boot_options: Iterable[BootOptions]) -> Self: all_boot_options_str = [ strip_quotes(str(boot_options)) for boot_options in all_boot_options ] return cls(constants.SPACE.join(all_boot_options_str).strip()) @property def root_location(self) -> Optional[str]: root_location = self._root_location if root_location is not None: return root_location[1] return None @property def root_mount_options(self) -> Optional[MountOptions]: root_mount_options = self._root_mount_options if root_mount_options is not None: return root_mount_options[1] return None @property def initrd_options(self) -> list[str]: return [initrd_option[1] for initrd_option in self._initrd_options] @property def other_options(self) -> list[str]: return [other_option[1] for other_option in self._other_options]
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/boot/boot_options.py
0.753194
0.156685
boot_options.py
pypi
# endregion from typing import Collection from injector import inject from more_itertools import first, last from transitions import Machine, State from refind_btrfs.common.abc.factories import BaseLoggerFactory from refind_btrfs.common.enums import StateNames from refind_btrfs.common.exceptions import ( NoChangesDetectedError, PartitionError, RefindConfigError, SubvolumeError, ) from refind_btrfs.utility.helpers import checked_cast, has_items, is_singleton from .model import Model States = Collection[State] class RefindBtrfsMachine(Machine): @inject def __init__( self, logger_factory: BaseLoggerFactory, model: Model, states: States, ): self._logger = logger_factory.logger(__name__) if not has_items(states) or is_singleton(states): raise ValueError( "The 'states' collection must be initialized and contain at least two items!" ) initial = checked_cast(State, first(states)) expected_initial_name = StateNames.INITIAL.value if initial.name != expected_initial_name: raise ValueError( "The first item of the 'states' collection must " f"be a state named '{expected_initial_name}'!" ) final = checked_cast(State, last(states)) expected_final_name = StateNames.FINAL.value if final.name != expected_final_name: raise ValueError( "The last item of the 'states' collection must " f"be a state named '{expected_final_name}'!" ) conditions = model.conditions super().__init__( model=model, states=list(states), initial=initial, auto_transitions=False, name=__name__, ) self.add_ordered_transitions( loop=False, conditions=conditions, ) self._initial_state = initial def run(self) -> bool: logger = self._logger model = self.model initial_state = self._initial_state self.set_state(initial_state) try: while model.next_state(): if model.is_final(): return True except NoChangesDetectedError as e: logger.warning(e.formatted_message) return True except ( PartitionError, SubvolumeError, RefindConfigError, ) as e: logger.error(e.formatted_message) return False
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/state_management/refind_btrfs_machine.py
0.797083
0.251363
refind_btrfs_machine.py
pypi
# endregion from __future__ import annotations from functools import cached_property from pathlib import Path from typing import Iterable, Iterator, NamedTuple, Optional, Self, Set from uuid import UUID from refind_btrfs.common import constants from refind_btrfs.common.abc import BaseConfig from refind_btrfs.common.enums import ( BootStanzaIconGenerationMode, BtrfsLogoHorizontalAlignment, BtrfsLogoSize, BtrfsLogoVariant, BtrfsLogoVerticalAlignment, ) from refind_btrfs.device import BlockDevice, Subvolume from refind_btrfs.utility.helpers import find_all_directories_in, has_items class SnapshotSearch(NamedTuple): directory: Path is_nested: bool max_depth: int def __eq__(self, other: object) -> bool: if self is other: return True if isinstance(other, SnapshotSearch): self_directory_resolved = self.directory.resolve() other_directory_resolved = other.directory.resolve() return self_directory_resolved == other_directory_resolved return False class SnapshotManipulation(NamedTuple): selection_count: int modify_read_only_flag: bool destination_directory: Path cleanup_exclusion: Set[Subvolume] class BtrfsLogo(NamedTuple): variant: BtrfsLogoVariant size: BtrfsLogoSize horizontal_alignment: BtrfsLogoHorizontalAlignment vertical_alignment: BtrfsLogoVerticalAlignment class Icon(NamedTuple): mode: BootStanzaIconGenerationMode path: Path btrfs_logo: BtrfsLogo class BootStanzaGeneration(NamedTuple): refind_config: str include_paths: bool include_sub_menus: bool source_exclusion: Set[str] icon: Icon def with_include_paths(self, boot_device: Optional[BlockDevice]) -> Self: include_paths = self.include_paths if include_paths: include_paths = boot_device is None return BootStanzaGeneration( self.refind_config, include_paths, self.include_sub_menus, self.source_exclusion, self.icon, ) class PackageConfig(BaseConfig): def __init__( self, esp_uuid: UUID, exit_if_root_is_snapshot: bool, exit_if_no_changes_are_detected: bool, snapshot_searches: Iterable[SnapshotSearch], snapshot_manipulation: SnapshotManipulation, boot_stanza_generation: BootStanzaGeneration, ) -> None: super().__init__(constants.PACKAGE_CONFIG_FILE) self._esp_uuid = esp_uuid self._exit_if_root_is_snapshot = exit_if_root_is_snapshot self._exit_if_no_changes_are_detected = exit_if_no_changes_are_detected self._snapshot_searches = list(snapshot_searches) self._snapshot_manipulation = snapshot_manipulation self._boot_stanza_generation = boot_stanza_generation def _get_directories_for_watch(self) -> Iterator[Path]: snapshot_searches = self.snapshot_searches if has_items(snapshot_searches): for snapshot_search in snapshot_searches: directory = snapshot_search.directory max_depth = snapshot_search.max_depth - 1 yield from find_all_directories_in(directory, max_depth) @property def esp_uuid(self) -> UUID: return self._esp_uuid @property def exit_if_root_is_snapshot(self) -> bool: return self._exit_if_root_is_snapshot @property def exit_if_no_changes_are_detected(self) -> bool: return self._exit_if_no_changes_are_detected @property def snapshot_searches(self) -> list[SnapshotSearch]: return self._snapshot_searches @property def snapshot_manipulation(self) -> SnapshotManipulation: return self._snapshot_manipulation @property def boot_stanza_generation(self) -> BootStanzaGeneration: return self._boot_stanza_generation @cached_property def directories_for_watch(self) -> Set[Path]: return set(self._get_directories_for_watch())
/refind_btrfs-0.6.0-py3-none-any.whl/refind_btrfs/common/package_config.py
0.777891
0.171304
package_config.py
pypi
# Copyright (c) 2011 Paul Makepeace, Real Programmers. All rights reserved. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> import json import re def to_camel(attr): """convert this_attr_name to thisAttrName.""" # Do lower case first letter return (attr[0].lower() + re.sub(r'_(.)', lambda x: x.group(1).upper(), attr[1:])) def from_camel(attr): """convert thisAttrName to this_attr_name.""" # Don't add an underscore for capitalized first letter return re.sub(r'(?<=.)([A-Z])', lambda x: '_' + x.group(1), attr).lower() class Facet(object): def __init__(self, column, type, **options): self.type = type self.name = column self.column_name = column for k, v in options.items(): setattr(self, k, v) def as_dict(self): return dict([(to_camel(k), v) for k, v in self.__dict__.items() if v is not None]) class TextFilterFacet(Facet): def __init__(self, column, query, **options): super(TextFilterFacet, self).__init__( column, query=query, case_sensitive=False, type='text', mode='text', **options) class TextFacet(Facet): def __init__(self, column, selection=None, expression='value', omit_blank=False, omit_error=False, select_blank=False, select_error=False, invert=False, **options): super(TextFacet, self).__init__( column, type='list', omit_blank=omit_blank, omit_error=omit_error, select_blank=select_blank, select_error=select_error, invert=invert, **options) self.expression = expression self.selection = [] if selection is None: selection = [] elif not isinstance(selection, list): selection = [selection] for value in selection: self.include(value) def include(self, value): for s in self.selection: if s['v']['v'] == value: return self.selection.append({'v': {'v': value, 'l': value}}) return self def exclude(self, value): self.selection = [s for s in self.selection if s['v']['v'] != value] return self def reset(self): self.selection = [] return self class BoolFacet(TextFacet): def __init__(self, column, expression=None, selection=None): if selection is not None and not isinstance(selection, bool): raise ValueError('selection must be True or False.') if expression is None: raise ValueError('Missing expression') super(BoolFacet, self).__init__(column, expression=expression, selection=selection) class StarredFacet(BoolFacet): def __init__(self, selection=None): super(StarredFacet, self).__init__('', expression='row.starred', selection=selection) class FlaggedFacet(BoolFacet): def __init__(self, selection=None): super(FlaggedFacet, self).__init__('', expression='row.flagged', selection=selection) class BlankFacet(BoolFacet): def __init__(self, column, selection=None): super(BlankFacet, self).__init__(column, expression='isBlank(value)', selection=selection) class ReconJudgmentFacet(TextFacet): def __init__(self, column, **options): super(ReconJudgmentFacet, self).__init__(column, expression=('forNonBlank(cell.recon.judgment, v, v, ' 'if(isNonBlank(value), "(unreconciled)", "(blank)"))'), **options) # Capitalize 'From' to get around python's reserved word. class NumericFacet(Facet): def __init__(self, column, From=None, to=None, expression='value', select_blank=True, select_error=True, select_non_numeric=True, select_numeric=True, **options): super(NumericFacet, self).__init__( column, From=From, to=to, expression=expression, type='range', select_blank=select_blank, select_error=select_error, select_non_numeric=select_non_numeric, select_numeric=select_numeric, **options) def reset(self): self.From = None self.to = None return self class FacetResponse(object): """Class for unpacking an individual facet response.""" def __init__(self, facet): for k, v in facet.items(): if isinstance(k, bool) or isinstance(k, basestring): setattr(self, from_camel(k), v) self.choices = {} class FacetChoice(object): def __init__(self, c): self.count = c['c'] self.selected = c['s'] if 'choices' in facet: for choice in facet['choices']: self.choices[choice['v']['v']] = FacetChoice(choice) if 'blankChoice' in facet: self.blank_choice = FacetChoice(facet['blankChoice']) else: self.blank_choice = None if 'bins' in facet: self.bins = facet['bins'] self.base_bins = facet['baseBins'] class FacetsResponse(object): """FacetsResponse unpacking the compute-facets response. It has two attributes: facets & mode. Mode is either 'row-based' or 'record-based'. facets is a list of facets produced by compute-facets, in the same order as they were specified in the Engine. By coupling the engine object with a custom container it's possible to look up the computed facet by the original facet's object. """ def __init__(self, engine, facets): class FacetResponseContainer(object): facets = None def __init__(self, facet_responses): self.facets = [FacetResponse(fr) for fr in facet_responses] def __iter__(self): for facet in self.facets: yield facet def __getitem__(self, index): if not isinstance(index, int): index = engine.facet_index_by_id[id(index)] assert self.facets[index].name == engine.facets[index].name return self.facets[index] self.facets = FacetResponseContainer(facets['facets']) self.mode = facets['mode'] class Engine(object): """An Engine keeps track of Facets, and responses to facet computation.""" facets = [] facet_index_by_id = {} # dict of facets by Facet object id def __init__(self, *facets, **kwargs): self.set_facets(*facets) self.mode = kwargs.get('mode', 'row-based') def set_facets(self, *facets): """facets may be a Facet or list of Facets.""" self.remove_all() for facet in facets: self.add_facet(facet) def facets_response(self, response): """Unpack a compute-facets response.""" return FacetsResponse(self, response) def __len__(self): return len(self.facets) def as_json(self): """Return a JSON string suitable for use as a POST parameter.""" return json.dumps({ 'facets': [f.as_dict() for f in self.facets], # XXX how with json? 'mode': self.mode, }) def add_facet(self, facet): # Record the facet's object id so facet response can be looked up by id self.facet_index_by_id[id(facet)] = len(self.facets) self.facets.append(facet) def remove_all(self): """Remove all facets.""" self.facet_index_by_id = {} self.facets = [] def reset_all(self): """Reset all facets.""" for facet in self.facets: facet.reset() class Sorting(object): """Class representing the current sorting order for a project. Used in RefineProject.get_rows()""" def __init__(self, criteria=None): self.criteria = [] if criteria is None: criteria = [] if not isinstance(criteria, list): criteria = [criteria] for criterion in criteria: # A string criterion defaults to a string sort on that column if isinstance(criterion, basestring): criterion = { 'column': criterion, 'valueType': 'string', 'caseSensitive': False, } criterion.setdefault('reverse', False) criterion.setdefault('errorPosition', 1) criterion.setdefault('blankPosition', 2) self.criteria.append(criterion) def as_json(self): return json.dumps({'criteria': self.criteria}) def __len__(self): return len(self.criteria)
/refine-client-0.2.1.tar.gz/refine-client-0.2.1/google/refine/facet.py
0.831998
0.162413
facet.py
pypi
# refineC [![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/genomewalker/refine-contigs?include_prereleases&label=version)](https://github.com/genomewalker/refine-contigs/releases) [![refine-contigs](https://github.com/genomewalker/refine-contigs/workflows/refineC_ci/badge.svg)](https://github.com/genomewalker/refine-contigs/actions) [![PyPI](https://img.shields.io/pypi/v/refine-contigs)](https://pypi.org/project/refine-contigs/) [![Conda](https://img.shields.io/conda/v/genomewalker/refine-contigs)](https://anaconda.org/genomewalker/refine-contigs) refineC is a simple tool to identify potential misassemblies in contigs recovered from ancient metagenomes. The assembly of ancient metagenomics data is challenging due to the short length and post-mortem damage in reads. The short length of the reads pushes the limits of the assemblers, and the recovered contigs might contain misassemblies that can, for example, lead to misleading ecological insights, spurious pangenomic analyses, erroneous prediction of the functional potential, or impact the outcome of the binning process by mixing distantly related or unrelated phylogenetic gene markers. With the assembly of ancient metagenomics data, we can face different problems: - **Inter-genomic mosaic**: Chimeric contigs containing a mixture of sequence from multiple organisms - **Intra-genomic mosaic**: Chimeric contigs mixing different genomic regions from the same organism - **Temporal mosaic**: Chimeric contigs containing a mixture of sequence from different times (and organisms) At the moment, refineC can mitigate the effects of the first and second type of problems by exploiting the information of de Bruijn (Megahit/Spades) and overlap-based assemblers (PLASS/PenguiN). While the de Bruijn assemblers will assemble longer contigs, the overlap-based assemblers will recover more of the ancient sequence space present in the ancient sample. In any case, both types of assemblers will end up generating misassembled contigs, especially when we fine-tune the assemblers to recover as much as possible. RefineC follows a simple approach to identify the misassemblies: - Perform an all-vs-all contig comparison - Identify groups of contigs that share a certain amount of sequence identity and coverage - Find overlapping regions for each contig supported by other contigs and extract the longest one. Keep the leftover parts of the contigs if they are longer than a certain threshold - Remove redundancy of the overlapping fragments by sequence clustering - Add the new contig set to the original set of contigs without hits in the all-vs-all comparison - Remove redundancy by sequence clustering ![assets/images/refineC-wf.png](assets/images/refineC-wf.png#center) In addition, `refineC` has a **merge** option where it tries to find contigs that might be merged using Minimus2. On some occasions, there are overlaps between contigs that are very well supported by other contigs in the sample, and refineC cannot collapse them. This usually happens when terminal regions of the contigs are overlapping other terminal regions. For this reason, although not usually recommended, we use Minimus to merge overlapping contigs. We apply a conservative approach to the already refined contigs, where we find overlaps in the same manner as in the `split` subcommand, but in this case, we select the maximum clique in a component to be merged by Minimus2. # Installation We recommend having [**conda**](https://docs.conda.io/en/latest/) installed to manage the virtual environments ### Using pip First, we create a conda virtual environment with: ```bash wget https://raw.githubusercontent.com/genomewalker/refine-contigs/master/environment.yml conda env create -f environment.yml ``` Then we proceed to install using pip: ```bash pip install refine-contigs ``` ### Using conda ```bash conda install -c conda-forge -c bioconda -c genomewalker refine-contigs ``` ### Install from source to use the development version Using pip ```bash pip install git+ssh://git@github.com/genomewalker/refine-contigs.git ``` By cloning in a dedicated conda environment ```bash git clone git@github.com:genomewalker/refine-contigs.git cd refine-contigs conda env create -f environment.yml conda activate refine-contigs pip install -e . ``` # Usage refineC only needs a contig file. For a complete list of option ``` $ refineC --help usage: refineC [-h] [--version] [--debug] {split,merge} ... Finds misassemblies in ancient data positional arguments: {split,merge} positional arguments split Find misassemblies merge Merge potential overlaps optional arguments: -h, --help show this help message and exit --version Print program version --debug Print debug messages (default: False) ``` For the split mode: ``` $refineC split --help usage: refineC split [-h] [--tmp DIR] [--threads INT] [--keep-files] [--output OUT] [--prefix PREFIX] --contigs FILE [--min-id FLOAT] [--min-cov FLOAT] [--glob-cls-id FLOAT] [--glob-cls-cov FLOAT] [--frag-min-len INT] [--frag-cls-id FLOAT] [--frag-cls-cov FLOAT] optional arguments: -h, --help show this help message and exit --tmp DIR Temporary directory (default:./tmp) --threads INT Number of threads (default: 2) --keep-files Keep temporary data (default: False) --output OUT Fasta file name to save the merged contigs (default: contigs) --prefix PREFIX Prefix for contigs name (default: contig) required arguments: --contigs FILE Contig file to check for misassemblies overlap identification arguments: --min-id FLOAT Minimun id to use for the overlap (default: 0.9) --min-cov FLOAT Minimun percentage of the coverage for the overlap (default: 0.25) global clustering arguments: --glob-cls-id FLOAT Minimum identity to cluster the refined dataset (default: 0.99) --glob-cls-cov FLOAT Minimum coverage to cluster the refined dataset (default: 0.9) fragment refinement arguments: --frag-min-len INT Minimum fragment length to keep (default: 500) --frag-cls-id FLOAT Minimum identity to cluster the fragments (default: 0.9) --frag-cls-cov FLOAT Minimum coverage to cluster the fragments (default: 0.6) ``` And for the `merge` mode: ``` $refineC merge --help usage: refineC merge [-h] [--tmp DIR] [--threads INT] [--keep-files] [--output OUT] [--prefix PREFIX] --contigs FILE [--min-id FLOAT] [--min-cov FLOAT] [--glob-cls-id FLOAT] [--glob-cls-cov FLOAT] [--mnm2-threads INT] [--mnm2-overlap INT] [--mnm2-minid FLOAT] [--mnm2-maxtrim INT] [--mnm2-conserr FLOAT] optional arguments: -h, --help show this help message and exit --tmp DIR Temporary directory (default:./tmp) --threads INT Number of threads (default: 2) --keep-files Keep temporary data (default: False) --output OUT Fasta file name to save the merged contigs (default: contigs) --prefix PREFIX Prefix for contigs name (default: contig) required arguments: --contigs FILE Contig file to check for misassemblies overlap identification arguments: --min-id FLOAT Minimun id to use for the overlap (default: 0.9) --min-cov FLOAT Minimun percentage of the coverage for the overlap (default: 0.25) global clustering arguments: --glob-cls-id FLOAT Minimum identity to cluster the refined dataset (default: 0.99) --glob-cls-cov FLOAT Minimum coverage to cluster the refined dataset (default: 0.9) minimus2 arguments: --mnm2-threads INT Number of threads used by minimus2 (default: 1) --mnm2-overlap INT Assembly 1 vs 2 minimum overlap (default: 500) --mnm2-minid FLOAT Minimum overlap percent identity for alignments (default: 95.0) --mnm2-maxtrim INT Maximum sequence trimming length (default: 1000) --mnm2-conserr FLOAT Maximum consensus error (default: 0.06) ``` One would run refineC `split` mode as: ```bash refineC split --contigs b40d22c9e7.assm.combined.fasta --min-id 95.0 --min-cov 0.25 --prefix ctg --output contigs-merged --threads 32 ``` *--contigs*: Here, we specify the location of the contigs to process *--min-id*: Minimum identity for the overlaps between contig pairs *--min-cov*: Minimum coverage between contig pairs *--prefix*: Prefix for the contig name *--output*: Name for the output file *--threads*: Number of threads # Misassemblies in ancient metagenomic data As a proof of concept, we generated synthetic ancient data with [aMGSIM](https://github.com/genomewalker/aMGSIM) from _Methanosarcina bakeri_ MS (4,533,209 nt). In total, we generated 625,270 PE reads (20X) with a modal read length of 63nt and the following parameters for the Briggs model [0.03, 0.1, 0.01, 0.2]. We performed three different assemblies: - Megahit with default parameters - Megahit with fine-tuned parameters - Experimental assembly workflow for ancient metagenomic data (Will be public soon) The statistics of the **Megahit with default parameters** for the contigs longer than 1000nt are: - Number of contigs: 1,339 - Assembly length: 3,855,942 - Average contig length: 2,879 - Maximum contig length: 16,423 The following table shows the anvi'o estimates for this assembly: ``` ╒════════════╤══════════╤══════════════╤════════════════╤════════════════╤══════════════╤════════════════╕ │ bin name │ domain │ confidence │ % completion │ % redundancy │ num_splits │ total length │ ╞════════════╪══════════╪══════════════╪════════════════╪════════════════╪══════════════╪════════════════╡ │ bin1 │ ARCHAEA │ 0.8 │ 90.79 │ 6.58 │ 1339 │ 3855942 │ ╘════════════╧══════════╧══════════════╧════════════════╧════════════════╧══════════════╧════════════════╛ ``` As you can see, this would be a _medium-quality_ recovered MAG. The statistics are not so bad, and we recover ~85% of the size of the genome. But when we look in detail, we find misassembled contigs that contain fragments from distant regions of the genome: ![assets/images/refineC-methano-basicMH.png](assets/images/refineC-methano-basicMH.png#center) This is not exclusive of Megahit; with SPAdes, we have similar situations. Here the misassembly cover different regions and different strand: ![assets/images/refineC-methano-SPAdes.png](assets/images/refineC-methano-SPAdes.png#center) If we push the limits from Megahit and we fine-tune some of its options, we can get an assembly with better statistics: - Number of contigs: 934 - Assembly length: 4,206,221 - Average contig length: 4,503 - Maximum contig length: 26,014 with same values of redundancy but larger completion, and we recover a larger potential fraction of the genome (~92%): ``` ╒════════════╤══════════╤══════════════╤════════════════╤════════════════╤══════════════╤════════════════╕ │ bin name │ domain │ confidence │ % completion │ % redundancy │ num_splits │ total length │ ╞════════════╪══════════╪══════════════╪════════════════╪════════════════╪══════════════╪════════════════╡ │ bin1 │ ARCHAEA │ 0.9 │ 94.74 │ 6.58 │ 934 │ 4206221 │ ╘════════════╧══════════╧══════════════╧════════════════╧════════════════╧══════════════╧════════════════╛ ``` But of course this also translates in potentially having more misassemblies. As an example: <p align="center"> <img src="assets/images/refineC-methano-wfMH.png" width=50% align=center> <p /> Finally, as an example of using an assembly workflow specially designed for ancient metagenomics data that combines de Bruijn and overlap-based methods in combination with refineC, we obtain an assembly like: - Number of contigs: 1,175 - Assembly length: 4,419,745 - Average contig length: 3,761 - Maximum contig length: 17,453 In this case, the estimates from anvi'o show lower redundancy values, with the same completion, and it potentially recovered 97% of the genome length ``` ╒════════════╤══════════╤══════════════╤════════════════╤════════════════╤══════════════╤════════════════╕ │ bin name │ domain │ confidence │ % completion │ % redundancy │ num_splits │ total length │ ╞════════════╪══════════╪══════════════╪════════════════╪════════════════╪══════════════╪════════════════╡ │ bin1 │ ARCHAEA │ 0.9 │ 94.74 │ 3.95 │ 1175 │ 4419745 │ ╘════════════╧══════════╧══════════════╧════════════════╧════════════════╧══════════════╧════════════════╛ ``` In this case, we follow a conservative approach to minimize the risk of misassemblies. This translates in a slightly smaller contig average size as refineC will fragment the contigs not well supported. For example, taking the contig `mh_000000000843` shown before to be misassembled in the **Megahit with fine-tuned parameters** assembly, the contig should be split into two parts, from 1-9757 and from 9536-14953. As refineC is a reference-free method, it uses the available information within the de Bruijn and overlap-based assemblies and produces the following splits: | Contig | Start | End | Class | Length | |:--------------------------:|:-----:|:-----:|:-----------:|:------:| | 98f2aa9f20_mh_000000000843 | 1 | 2179 | overlap | 2178 | | 98f2aa9f20_mh_000000000843 | 5010 | 9757 | overlap | 4747 | | 98f2aa9f20_mh_000000000843 | 2179 | 5010 | non-overlap | 2831 | | 98f2aa9f20_mh_000000000843 | 9757 | 14953 | non-overlap | 5196 | RefineC breaks the contig `98f2aa9f20_mh_000000000843` into four fragments, two of them are supported by other contigs, and two of them are unique to this contig. Increasing the fragmentation is a small price to pay to have higher quality contigs.
/refine-contigs-1.0.4.tar.gz/refine-contigs-1.0.4/README.md
0.571049
0.940408
README.md
pypi
import pandas as pd from bioservices.kegg import KEGG import io import re from libsbml import Model as libModel from refinegems.io import parse_gff_for_gp_info from refinegems.entities import get_model_genes, compare_gene_lists, get_model_reacs_or_metabs from refinegems.analysis_db import get_bigg2other_db, compare_bigg_model __author__ = "Famke Baeuerle" def get_kegg_genes(organismid: str) -> pd.DataFrame: """Extracts list of genes from KEGG given an organism Args: - organismid (str): KEGG ID of organism which the model is based on Returns: pd.DataFrame: Table of all genes denoted in KEGG for the organism """ k = KEGG() gene_list = k.list(organismid) return pd.read_table(io.StringIO(gene_list), header=None) def get_locus_ec(genes_kegg_notmodel: pd.DataFrame) -> pd.DataFrame: """Creates columns with EC numbers for the locus tags of the genes Args: - genes_kegg_notmodel (pd.DataFrame): Genes present in KEGG but not in the model Returns: pd.DataFrame: Table of genes with locus tag and EC number """ k = KEGG() ec_dict = {} for gene in genes_kegg_notmodel: entry = k.parse(k.get(gene)) try: ec_dict[entry['ENTRY']] = (entry['ORTHOLOGY']) except(KeyError): pass real_ec = {} for entry, ortho in ec_dict.items(): for key, value in ortho.items(): m = re.search('(?:EC).*', value) if m: real_ec[entry[:12]] = '[' + m.group(0) locus_ec = pd.DataFrame.from_dict( real_ec, orient='index').reset_index().rename( columns={ 'index': 'locus_tag', 0: 'EC-number'}) def slice_ec(ec): new = ec[4:] new2 = new[:-1] return new2 locus_ec['EC'] = locus_ec.apply( lambda row: slice_ec( row['EC-number']), axis=1) locus_ec = locus_ec.drop('EC-number', axis=1) return locus_ec def get_locus_ec_kegg(locus_ec: pd.DataFrame) -> pd.DataFrame: """Searches for KEGG reactions based on EC numbers Args: - locus_ec (pd.DataFrame): Genes with locus tag and EC number Returns: pd.DataFrame: Table of genes with locus tag, EC number and KEGG Id """ def get_kegg_reaction(ec_number): k = KEGG() gene = k.parse(k.get(ec_number)) try: return gene['REACTION'][-1] except(KeyError): pass return None def drop_nonreac(kegg_id): if len(kegg_id) != 11: return None else: return kegg_id def slice_kegg(kegg): return kegg[4:-1] locus_ec['KEGG_Ids'] = locus_ec.apply( lambda row: get_kegg_reaction( row['EC']), axis=1) locus_ec = locus_ec.dropna() locus_ec.loc[:, 'KEGG_Ids2'] = locus_ec.apply( lambda row: drop_nonreac( row['KEGG_Ids']), axis=1) locus_ec = locus_ec.dropna() locus_ec['KEGG'] = locus_ec.apply( lambda row: slice_kegg( row['KEGG_Ids2']), axis=1) locus_ec_kegg = locus_ec.dropna().drop( 'KEGG_Ids', axis=1).drop( 'KEGG_Ids2', axis=1) return locus_ec_kegg def get_locus_ec_kegg_bigg(locus_ec_kegg: pd.DataFrame, bigg_kegg: pd.DataFrame) -> pd.DataFrame: """Merges table with genes from model with BiGG / KEGG mapping to add BiGG Ids Args: - locus_ec_kegg (pd.DataFrame): Genes with locus tag, EC number and KEGG Id - bigg_kegg (pd.DataFrame): BiGG IDs with corresponding KEGG Ids Returns: pd.DataFrame: Table of genes with locus tag, EC number, KEGG Id and BiGG Id """ locus_ec_kegg_bigg = locus_ec_kegg.merge(bigg_kegg, on=['KEGG']) return locus_ec_kegg_bigg def get_locus_ec_kegg_bigg_gpr(locus_ec_kegg_bigg: pd.DataFrame, locus_gpr: pd.DataFrame) -> pd.DataFrame: """Merges table with genes from model if locus tag / GPR mapping to add GPRs Args: - locus_ec_kegg_bigg (pd.DataFrame): Genes with locus tag, EC number, KEGG Id and BiGG Id - locus_gpr (pd.DataFrame): Mapping from locus tags to GPRs Returns: pd.DataFrame: Table of genes with locus tag, EC number, KEGG Id, BiGG Id and GPR """ def slice_locus(locus): return locus[:-1] locus_ec_kegg_bigg['locus_tag'] = locus_ec_kegg_bigg.apply( lambda row: slice_locus(row['locus_tag']), axis=1) return locus_ec_kegg_bigg.merge(locus_gpr, how='left', on='locus_tag') def kegg_gene_comp(model: libModel, organismid: str, gff_file: str) -> pd.DataFrame: """Exectues all steps to compare genes of the model to KEGG genes Args: - model (libModel): Model loaded with libSBML - organismid (str): KEGG ID of organism which the model is based on - gff_file (str): Path to gff file of organism of interest Returns: pd.DataFrame: Table containing missing reactions with locus tag, EC number, KEGG Id, BiGG Id and GPR """ model_genes = get_model_genes(model, True) model_reactions = get_model_reacs_or_metabs(model) kegg_genes = get_kegg_genes(organismid) bigg_kegg = get_bigg2other_db('KEGG') genes_kegg_notmodel = compare_gene_lists(model_genes, kegg_genes) locus_gpr = parse_gff_for_gp_info(gff_file) locus_ec = get_locus_ec(genes_kegg_notmodel) locus_ec_kegg = get_locus_ec_kegg(locus_ec) locus_ec_kegg_bigg = get_locus_ec_kegg_bigg(locus_ec_kegg, bigg_kegg) locus_ec_kegg_bigg_gpr = get_locus_ec_kegg_bigg_gpr( locus_ec_kegg_bigg, locus_gpr) missing_reactions = compare_bigg_model( locus_ec_kegg_bigg_gpr, model_reactions) return missing_reactions
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/analysis_kegg.py
0.76908
0.424352
analysis_kegg.py
pypi
import ast import math from libsbml import Model as libModel import refinegems.analysis_kegg as rga_kegg import refinegems.analysis_biocyc as rga_biocyc from refinegems.curate import update_annotations_from_others from refinegems.cvterms import add_cv_term_metabolites, add_cv_term_reactions from refinegems.entities import create_gp, create_species, create_reaction import pandas as pd import numpy as np from typing import Union from colorama import init as colorama_init from colorama import Fore __author__ = "Famke Baeuerle and Gwendolyn O. Gusak" '''Skeleton for functions that could be used for a lab strain/organism which is in no database contained def get_genes_from_gff(): pass def get_related_metabs_reactions_blast(): pass def gff_gene_comp(): pass ''' def gap_analysis(model_libsbml: libModel, gapfill_params: dict[str: str], filename: str) -> Union[pd.DataFrame, tuple]: # (Genbank) GFF file """| Main function to infer gaps in a model by comparing the locus tags of the GeneProducts | to KEGG/BioCyc/both Args: - model_libsbml (libModel): Model loaded with libSBML - gapfill_params (dict): Dictionary obtained from YAML file containing the parameter mappings - filename (str): Path to output file for gapfill analysis result Returns: - Case 'KEGG' pd.DataFrame: Table containing the columns 'bigg_id' 'locus_tag' 'EC' 'KEGG' 'name' 'GPR' - Case 'BioCyc' tuple: Five tables (1) - (4) (1) pd.DataFrame: Gap fill statistics with the columns 'Missing entity' 'Total' 'Have BiGG ID' 'Can be added' 'Notes' (2) pd.DataFrame: Genes with the columns 'locus_tag' 'protein_id' 'model_id' 'name' (3) pd.DataFrame: Metabolites with the columns 'bigg_id' 'name' 'BioCyc' 'compartment' 'Chemical Formula' 'InChI-Key' 'ChEBI' 'charge' (4) pd.DataFrame: Reactions with the columns 'bigg_id' 'name' 'BioCyc' 'locus_tag' 'Reactants' 'Products' 'EC' 'Fluxes' 'Spontaneous?' 'bigg_reaction' - Case 'KEGG+BioCyc': tuple: Five tables (1)-(4) from output of 'BioCyc' & (5) from output of 'KEGG' -> Table reactions contains additionally column 'KEGG' """ colorama_init(autoreset=True) db_to_compare = gapfill_params['db_to_compare'] result = None if db_to_compare not in ['KEGG', 'BioCyc', 'KEGG+BioCyc']: # 'GFF', print(f'{Fore.RED}To use the module gapfill the parameter of db_to_compare has to be set to one of the following' + ' options:\n- \'KEGG\'\n- \'BioCyc\'\n- \'KEGG+BioCyc\'\nAdditionally, the required parameters' + ' for each option need to be specified.\n- \'biggreactions\' and \'gapfill\' are required for all options.' + '\n- \'organismid\' is required only for the options \'KEGG\' and \'KEGG+BioCyc\'.\n- \'biocyc_tables\'' + ' is only required for the options \'BioCyc\' and \'KEGG+BioCyc\'.') # \n- \'GFF\' return if db_to_compare == 'KEGG': if gapfill_params['organismid']: missing_kegg = rga_kegg.kegg_gene_comp(model_libsbml, gapfill_params['organismid'], gapfill_params['gff_file'] ) result = missing_kegg else: print(f'{Fore.RED}To use the KEGG comparison the specification of the organismid is obligatory.\n' + 'If there is no organismid available for your organism in KEGG but an entry for your organism exists in BioCyc, use the option \'BioCyc\'.\n' + 'If no entry for your organism exists in KEGG and/or BioCyc, the gap analysis cannot be done.') # use one of the options \'BioCyc\' or \'GFF\' elif db_to_compare == 'BioCyc': missing_biocyc = rga_biocyc.biocyc_gene_comp(model_libsbml, gapfill_params['biocyc_files'] ) result = missing_biocyc ''' Implement here call of function that can be used with lab strain/organism which is in no database contained elif db_to_compare == 'GFF': gff_genes = gff_gene_comp(model_libsbml, gapfill_params['gff_file'] ) result = gff_genes ''' elif db_to_compare == 'KEGG+BioCyc': missing_kegg_reacs = rga_kegg.kegg_gene_comp(model_libsbml, gapfill_params['organismid'], gapfill_params['gff_file'] ) missing_kegg_reacs.drop(['name', 'locus_tag', 'EC'], axis=1, inplace=True) missing_biocyc = rga_biocyc.biocyc_gene_comp(model_libsbml, gapfill_params['biocyc_files'] ) stats, missing_biocyc_genes, missing_biocyc_metabs, missing_biocyc_reacs = missing_biocyc missing_combined_reacs = missing_biocyc_reacs.merge(missing_kegg_reacs[['bigg_id', 'KEGG']], how='left', on='bigg_id') result = (stats, missing_biocyc_genes, missing_biocyc_metabs, missing_combined_reacs, missing_kegg_reacs) if type(result) == tuple: with pd.ExcelWriter(f'{filename}.xlsx') as writer: result[0].to_excel(writer, sheet_name='gap fill statistics', index=False) result[1].to_excel(writer, sheet_name='genes', index=False) result[2].to_excel(writer, sheet_name='metabolites', index=False) result[3].to_excel(writer, sheet_name='reactions', index=False) if len(result) == 5: result[4].to_excel(writer, sheet_name='KEGG reactions', index=False) else: with pd.ExcelWriter(f'{filename}.xlsx') as writer: result.to_excel(writer, sheet_name='KEGG reactions', index=False) return result def gapfill_model(model_libsbml: libModel, gap_analysis_result: Union[str, tuple]) -> libModel: """Main function to fill gaps in a model from a table Args: - model_libsbml (libModel): Model loaded with libSBML - gap_analysis_result (str|tuple): Path to Excel file from gap_analysis|Tuple of pd.DataFrames obtained from gap_analysis Returns: libModel: Gap filled model """ model = model_libsbml missing_genes_df, missing_metabs_df, missing_reacs_df = None, None, None if type(gap_analysis_result) == tuple: # Tuple of pandas dataframes from gap_analysis missing_genes_df = gap_analysis_result[1] missing_metabs_df = gap_analysis_result[2] missing_reacs_df = gap_analysis_result[3] else: # Excel file from user-input with pd.ExcelFile(gap_analysis_result) as reader: gp_analysis_res = pd.read_excel(reader, sheet_name=['genes', 'metabolites', 'reactions']) missing_genes_df = gp_analysis_res.get('genes') missing_metabs_df = gp_analysis_res.get('metabolites') missing_reacs_df = gp_analysis_res.get('reactions') missing_genes_df = missing_genes_df.replace(np.nan, None) missing_metabs_df = missing_metabs_df.replace(np.nan, None) missing_reacs_df = missing_reacs_df.replace(np.nan, None) # (1) Add all missing genes needed for the missing reactions for _, row in missing_genes_df.iterrows(): gp, model = create_gp(model_libsbml, row['model_id'], row['name'], row['locus_tag'], row['protein_id']) # (2) Add all missing metabolites needed for the missing reactions for _, row in missing_metabs_df.iterrows(): sp, model = create_species(model_libsbml, row['bigg_id'], row['name'], row['compartment'], row['charge'], row['Chemical Formula']) if 'BioCyc' in missing_metabs_df.columns: biocyc_row = ast.literal_eval(str(row['BioCyc']).replace('nan', 'None')) if biocyc_row: for biocyc_id in biocyc_row: if biocyc_id: add_cv_term_metabolites(biocyc_id, 'BioCyc', sp) add_cv_term_metabolites(biocyc_id, 'METACYC', sp) if 'InChI-Key' in missing_metabs_df.columns: inchi_key = str(row['InChI-Key']) inchi_key = inchi_key.removeprefix('InChIKey=') if 'InChIKey=' in inchi_key else inchi_key inchi_key = inchi_key if inchi_key != 'nan' else None if inchi_key: add_cv_term_metabolites(inchi_key, 'InChI-Key', sp) if 'ChEBI' in missing_metabs_df.columns: chebi_id = str(int(row.get('ChEBI'))) if not math.isnan(float(row.get('ChEBI'))) else None if chebi_id: add_cv_term_metabolites(chebi_id, 'ChEBI', sp) model = update_annotations_from_others(model) # (3) Add all missing reactions for _, row in missing_reacs_df.iterrows(): reaction_dict = ast.literal_eval(str(row['bigg_reaction'])) reactants = reaction_dict.get('reactants') products = reaction_dict.get('products') genes = ast.literal_eval(str(row['gene_product'])) if row['Spontaneous?'] != 'T' else 'G_spontaneous' compartment = row['compartment'] compartment = compartment if compartment != 'exchange' else None reac, model = create_reaction( model=model_libsbml, reaction_id=row['bigg_id'], name=row['name'], reactants=reactants, products=products, fluxes=ast.literal_eval(str(row['fluxes'])), compartment=compartment, genes=genes ) if 'bigg_aliases' in missing_reacs_df.columns: bigg_aliases_row = ast.literal_eval(str(row['bigg_aliases'])) if bigg_aliases_row: for bigg_id in bigg_aliases_row: if bigg_id != reac.getId(): add_cv_term_reactions(bigg_id, 'BIGG', reac) if 'KEGG' in missing_reacs_df.columns: kegg_row = ast.literal_eval(str(row['KEGG']).replace('nan', 'None')) if kegg_row: for kegg_id in kegg_row: if kegg_id: add_cv_term_reactions(kegg_id, 'KEGG', reac) if 'BioCyc' in missing_reacs_df.columns: biocyc_row = ast.literal_eval(str(row['BioCyc'])) if biocyc_row: for biocyc_id in biocyc_row: add_cv_term_reactions(biocyc_id, 'BioCyc', reac) add_cv_term_reactions(biocyc_id, 'METACYC', reac) if 'EC' in missing_reacs_df.columns: ec_row = ast.literal_eval(str(row['EC']).replace('nan', 'None')) if ec_row: for ec_num in ec_row: if ec_num: add_cv_term_reactions(ec_num, 'EC', reac) return model def gapfill( model_libsbml: libModel, gapfill_params: dict[str: str], filename: str ) -> Union[tuple[pd.DataFrame, libModel], tuple[tuple, libModel]]: """| Main function to fill gaps in a model by comparing the locus tags of the GeneProducts to | KEGG/BioCyc/(Genbank) GFF file Args: - model_libsbml (libModel): Model loaded with libSBML - gapfill_params (dict): Dictionary obtained from YAML file containing the parameter mappings - filename (str): Path to output file for gapfill analysis result - gapfill_model_out (str): Path where gapfilled model should be written to Returns: tuple: ``gap_analysis()`` table(s) (1) & libSBML model (2) (1) pd.DataFrame|tuple(pd.DataFrame): Result from function ``gap_analysis()`` (2) libModel: Gap filled model """ gap_analysis_result = gap_analysis(model_libsbml, gapfill_params, filename) model = gapfill_model(model_libsbml, gap_analysis_result) return gap_analysis_result, model
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/gapfill.py
0.676299
0.288258
gapfill.py
pypi
import pandas as pd from tqdm.auto import tqdm from libsbml import Model as libModel from refinegems.cvterms import add_cv_term_reactions, add_cv_term_metabolites, metabol_db_dict, get_id_from_cv_term from refinegems.entities import create_gpr_from_locus_tag, create_reaction __author__ = "Famke Baeuerle" def add_reactions_from_table(model: libModel, table: pd.DataFrame, email: str) -> libModel: """Wrapper function to use with table format given in data/manual_curation.xlsx, sheet gapfill: Adds all reactions with their info given in the table to the given model Args: - model (libModel): Model loaded with libSBML - table (pd-DataFrame): Table in format of sheet gapfill from manual_curation.xlsx located in the data folder - email (str): User Email to access the NCBI Entrez database Returns: libModel: Modified model with new reactions """ for reaction_info in tqdm(table.groupby('BIGG')): reac_id = reaction_info[0] if model.getReaction(str(reac_id)) is None: reactants = dict(table.loc[table['BIGG'] == reac_id, ['educts', 'stoich_e']].dropna().values) products = dict(table.loc[table['BIGG'] == reac_id, ['products', 'stoich_p']].dropna().values) fluxes = table.loc[table['BIGG'] == reac_id, ['lower_bound', 'upper_bound']].dropna().to_dict('records')[0] name = table.loc[table['BIGG'] == reac_id, ['name']].dropna().to_dict('records')[0]['name'] reversible = table.loc[table['BIGG'] == reac_id, ['reversible']].dropna().to_dict('records')[0]['reversible'] fast = table.loc[table['BIGG'] == reac_id, ['fast']].dropna().to_dict('records')[0]['fast'] try: sbo = table.loc[table['BIGG'] == reac_id, ['sbo']].dropna().to_dict('records')[0]['sbo'] except (IndexError): print('SBO Term for ' + str(reac_id) + ' will be set to standard "SBO:0000167" (biochemical or transport reaction)') sbo = "SBO:0000167" reaction, model = create_reaction(model, reac_id, name, reactants, products, fluxes, reversible, fast, sbo) for (columnName, columnData) in table.loc[table['BIGG'] == reac_id].drop(['educts', 'stoich_e','products', 'stoich_p','lower_bound', 'upper_bound', 'name', 'reversible', 'fast'], axis=1).fillna(0).iteritems(): for entry in columnData.values: if not entry == 0: if columnName == 'locus': reaction.getPlugin(0).createGeneProductAssociation().createGeneProductRef().setGeneProduct(str(entry)) if model.getPlugin(0).getGeneProductByLabel(str(entry)) is None: gpr, model = create_gpr_from_locus_tag(model, str(entry), email) else: add_cv_term_reactions(str(entry), str(columnName), reaction) return model def update_annotations_from_table(model: libModel, table: pd.DataFrame) -> libModel: """Wrapper function to use with table format given in data/manual_curation.xlsx, sheet metabs: Updates annotation of metabolites given in the table Args: - model (libModel): Model loaded with libSBML - table (pd-DataFrame): Table in format of sheet metabs from manual_curation.xlsx located in the data folder Returns: libModel: Modified model with new annotations """ table = table.drop(['Name', 'FORMULA', 'Notiz'], axis=1).fillna(0) table['PUBCHEM'] = table['PUBCHEM'].astype(int) for metab_info in tqdm(table.groupby('BIGG')): met = metab_info[0] for comp in ['_c', '_e', '_p']: try: metab = model.getSpecies('M_' + met + comp) #metab.unsetAnnotation() if not metab.isSetMetaId(): metab.setMetaId('meta_' + metab.getId()) for (columnName, columnData) in table.loc[table['BIGG'] == met].iteritems(): for entry in columnData.values: if not entry == 0: add_cv_term_metabolites(str(entry), str(columnName), metab) except (AttributeError): print(met + comp + ' not in model') return model def update_annotations_from_others(model: libModel) -> libModel: """Synchronizes metabolite annotations for core, periplasm and extracelullar Args: - model (libModel): Model loaded with libSBML Returns: libModel: Modified model with synchronized annotations """ for metab in model.getListOfSpecies(): base = metab.getId()[:-2] for comp in ['_c', '_e', '_p']: other_metab = model.getSpecies(base + comp) if other_metab is not None: if not other_metab.isSetMetaId(): other_metab.setMetaId('meta_' + other_metab.getId()) for db_id, code in metabol_db_dict.items(): id = get_id_from_cv_term(metab, code) for entry in id: if entry is not None: add_cv_term_metabolites(entry, db_id, other_metab) return model
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/curate.py
0.645902
0.330242
curate.py
pypi
import io import re import sqlite3 import requests import pandas as pd from enum import Enum from sqlite3 import Error from os import path __author__ = 'Gwendolyn O. Gusak' PATH_TO_DB_DATA = path.join(path.dirname(path.realpath(__file__)), 'database') PATH_TO_DB = path.join(PATH_TO_DB_DATA, 'data.db') VERSION_FILE = path.join(PATH_TO_DB_DATA, 'current_bigg_db_version.txt') VERSION_URL = 'http://bigg.ucsd.edu/api/v2/database_version' class ValidationCodes(Enum): """Validation codes for the database Args: - Enum (Enum): Provided as input to get a number mapping for the codes """ COMPLETE = 0, # All tables are in data.db EMPTY = 1, # data.db is either empty or incorrect BIGG = 2, # Only BiGG tables are in data.db SBO_MEDIA = 3, # Only SBO & Media tables are in data.db (Can only occurr together) BIGG_SBO_MEDIA = 4, # Only BiGG, SBO and media tables are in data.db MODELSEED_COMPOUNDS = 5, # Only ModelSEED compounds table is in data.db BIGG_MSEED_COMPPOUNDS = 6, # Only Bigg and ModelSEED compounds tables are in data.db SBO_MEDIA_MSEED_COMPOUNDS = 7 # Only SBO, media and ModelSEED compounds tables are in data.db def is_valid_database(db_cursor: sqlite3.Cursor) -> int: """Verifies if database has: - 2 tables with names 'bigg_metabolites' & 'bigg_reactions' - 2 tables with names 'bigg_to_sbo' & 'ec_to_sbo' - 2 tables with names 'media' & 'media_composition' - 1 table with name 'modelseed_compounds' Args: - db_cursor (sqlite3.Cursor): Cursor from open connection to the database (data.db) Returns: int: Corresponding to one of the ValidationCodes """ print('Verifying database...') # Fetches the table names as string tuples from the connected database db_cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [string[0] for string in db_cursor.fetchall()] bigg_tables_contained = len([s for s in tables if re.match('^bigg_(?!to)(.*?)', s, re.IGNORECASE)]) == 2 sbo_tables_contained = len([s for s in tables if re.match('(.*?)_sbo$', s, re.IGNORECASE)]) == 2 media_tables_contained = len([s for s in tables if re.match('media', s, re.IGNORECASE)]) == 2 sbo_media_tables_contained = sbo_tables_contained and media_tables_contained # These can only occur together modelseed_cmpd_tbl_contained = len([s for s in tables if s == 'modelseed_compounds']) == 1 bigg_sbo_media_tbls_contained = bigg_tables_contained and sbo_media_tables_contained bigg_modelseed_cmpd_tbls_contained = bigg_tables_contained and modelseed_cmpd_tbl_contained sbo_media_modelseed_cmpd_tbls_contained = sbo_media_tables_contained and modelseed_cmpd_tbl_contained all_tables_contained = bigg_sbo_media_tbls_contained and modelseed_cmpd_tbl_contained if all_tables_contained: return ValidationCodes.COMPLETE elif bigg_modelseed_cmpd_tbls_contained: return ValidationCodes.BIGG_MSEED_COMPOUNDS elif sbo_media_modelseed_cmpd_tbls_contained: return ValidationCodes.SBO_MEDIA_MSEED_COMPOUNDS elif bigg_sbo_media_tbls_contained: return ValidationCodes.BIGG_SBO_MEDIA elif bigg_tables_contained: return ValidationCodes.BIGG elif sbo_media_tables_contained: return ValidationCodes.SBO_MEDIA elif modelseed_cmpd_tbl_contained: return ValidationCodes.MODELSEED_COMPOUNDS else: return ValidationCodes.EMPTY def create_sbo_media_database(db_cursor: sqlite3.Cursor): """Creates the SBO annotation database with 2 tables ('bigg_to_sbo' & 'ec_to_sbo') & the media database with 2 tables ('media', 'media_compositions') from file './data/database/sbo_media_db.sql' Args: - db_cursor (sqlite3.Cursor): Cursor from open connection to the database (data.db) """ print('Adding SBO and media tables...') with open(path.join(PATH_TO_DB_DATA, 'sbo_media_db.sql')) as schema: db_cursor.executescript(schema.read()) def update_bigg_db(latest_version: str, db_connection: sqlite3.Connection): """Updates the BiGG tables 'bigg_metabolites' & 'bigg_reactions' within a database (data.db) Args: - latest_version (str): String containing the latest version of the BiGG database - db_connection (sqlite3.Connection): Open connection to the database (data.db) """ print('Adding BiGG tables...') db_connection.execute('DROP TABLE IF EXISTS bigg_metabolites') db_connection.execute('DROP TABLE IF EXISTS bigg_reactions') # Store currently used version with open(VERSION_FILE, 'w') as file: file.write(latest_version) # Create BiGG metabolites table BIGG_MODELS_METABS_URL = 'http://bigg.ucsd.edu/static/namespace/bigg_models_metabolites.txt' bigg_models_metabs = requests.get(BIGG_MODELS_METABS_URL).text bigg_models_metabs_df = pd.read_csv(io.StringIO(bigg_models_metabs), dtype=str, sep='\t') bigg_models_metabs_df.to_sql('bigg_metabolites', db_connection, index=False) # Create BiGG reactions table BIGG_MODELS_REACS_URL = 'http://bigg.ucsd.edu/static/namespace/bigg_models_reactions.txt' bigg_models_reacs = requests.get(BIGG_MODELS_REACS_URL).text bigg_models_reacs_df = pd.read_csv(io.StringIO(bigg_models_reacs), dtype=str, sep='\t') bigg_models_reacs_df.to_sql('bigg_reactions', db_connection, index=False) def get_latest_bigg_databases(db_connection: sqlite3.Connection, is_missing: bool=True): """Gets the latest BiGG tables for metabolites & reactions if: - No version file is locally available - The version in the local version file is NOT the latest - No BiGG tables currently exist in the database Args: - db_connection (sqlite3.Connection): Open connection to the database (data.db) - is_missing (bool, optional): True if no BiGG tables are in the database. Defaults to True. """ # Check if BiGG database had an update LATEST_VERSION = requests.get(VERSION_URL).json()['bigg_models_version'] if not path.exists(VERSION_FILE) or is_missing: update_bigg_db(LATEST_VERSION, db_connection) else: with open(VERSION_FILE, 'r') as file: version = file.readline().strip() if version != LATEST_VERSION: update_bigg_db(LATEST_VERSION, db_connection) def get_modelseed_compounds_database(db_connection: sqlite3.Connection): """Retrieves the compounds table from ModelSEED from the respective GitHub repository Args: - db_connection (sqlite3.Connection): Open connection to the database (data.db) """ print('Adding the ModelSEED compounds table...') MODELSEED_COMPOUNDS_URL = 'https://raw.githubusercontent.com/ModelSEED/ModelSEEDDatabase/master/Biochemistry/compounds.tsv' modelseed_compounds = requests.get(MODELSEED_COMPOUNDS_URL).text modelseed_df = pd.read_csv(io.StringIO(modelseed_compounds), sep='\t') modelseed_df.to_sql('modelseed_compounds', db_connection, index=False, if_exists='replace') def initialise_database(): """Initialises/updates the database (data.db) After initialisation the database contains: - 2 tables with names 'bigg_metabolites' & 'bigg_reactions' - 2 tables with names 'bigg_to_sbo' & 'ec_to_sbo' - 2 tables with names 'media' & 'media_composition' - 1 table with name 'modelseed_compounds' """ # Initialise empty connection con = None print('Initialising database...') # Try to open connection & get cursor try: con = sqlite3.connect(PATH_TO_DB) cursor = con.cursor() validity_code = is_valid_database(cursor) if validity_code == ValidationCodes.BIGG: print('Only BiGG tables contained in database.') create_sbo_media_database(cursor) get_modelseed_compounds_database(con) elif validity_code == ValidationCodes.SBO_MEDIA: print('Only SBO and media tables contained in database.') get_latest_bigg_databases(con) get_modelseed_compounds_database(con) elif validity_code == ValidationCodes.MODELSEED_COMPOUNDS: print('Only ModelSEED compounds table contained in database.') create_sbo_media_database(cursor) get_latest_bigg_databases(con) elif validity_code == ValidationCodes.BIGG_SBO_MEDIA: print('Only BiGG, SBO and media tables contained in database.') get_modelseed_compounds_database(con) elif validity_code == ValidationCodes.BIGG_MSEED_COMPPOUNDS: print('Only BiGG and ModelSEED compounds tables contained in database.') create_sbo_media_database(cursor) elif validity_code == ValidationCodes.SBO_MEDIA_MSEED_COMPOUNDS: print('Only SBO, media and ModelSEED compounds tables contained in database.') get_latest_bigg_databases(con) elif validity_code == ValidationCodes.EMPTY: print('Incorrect or empty database. Initialise database with required tables...') create_sbo_media_database(cursor) get_latest_bigg_databases(con) get_modelseed_compounds_database(con) elif validity_code == ValidationCodes.COMPLETE: print('Verifying if BiGG tables are up-to-date...') get_latest_bigg_databases(con, False) except Error as e: print(e) finally: if con: print('All tables in database up-to-date. Initialisation complete.') con.close()
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/databases.py
0.542136
0.157525
databases.py
pypi
import re import pandas as pd from Bio import Entrez from libsbml import Model as libModel from libsbml import GeneProduct, Species, Reaction from refinegems.cvterms import add_cv_term_genes, add_cv_term_metabolites, add_cv_term_reactions from refinegems.io import search_ncbi_for_gpr from typing import Union __author__ = "Famke Baeuerle and Gwendolyn O. Gusak" # Function originally from refineGEMs.genecomp/refineGEMs.KEGG_analysis --- Modified def get_model_genes(model: libModel, kegg: bool=False) -> pd.DataFrame: """Extracts KEGG Genes/Locus tags from given model Args: - model (model-libsbml): Model loaded with libSBML - kegg (bool): True if KEGG Genes should be extracted, otherwise False Returns: pd.DataFrame: Table with all KEGG Genes/Locus tags in the model """ genes_in_model = [] for gene in model.getPlugin(0).getListOfGeneProducts(): if kegg: cv_terms = gene.getCVTerms() if cv_terms: for cv_term in cv_terms: for idx in range(cv_term.getNumResources()): uri = cv_term.getResourceURI(idx) if 'kegg.genes' in uri: genes_in_model.append(uri.split('kegg.genes:')[1]) else: genes_in_model.append(gene.getLabel()) return pd.DataFrame(genes_in_model) # Function originally from refineGEMs.genecomp/refineGEMs.KEGG_analysis --- Modified def compare_gene_lists(gps_in_model: pd.DataFrame, db_genes: pd.DataFrame, kegg: bool=True) -> pd.DataFrame: """Compares the provided tables according to column 0/'Locus_tag' Args: - gps_in_model (pd.DataFrame): Table containing the KEGG Gene IDs/Locus tags in the model - db_genes (pd.DataFrame): Table containing the KEGG Gene IDs for the organism from KEGG/ locus tags (Accession-2) from BioCyc - kegg (bool): True if KEGG Genes should be extracted, otherwise False Returns: pd.DataFrame: Table containing all missing genes """ in_db = db_genes.set_index(0) if kegg else db_genes.set_index('locus_tag') in_model = gps_in_model.set_index(0) genes_in_db_not_in_model = in_db[~in_db.index.isin(in_model.index)] return genes_in_db_not_in_model.reset_index().iloc[:, 0] if kegg else genes_in_db_not_in_model.reset_index() # Function originally from refineGEMs.genecomp/refineGEMs.KEGG_analysis --- Modified def get_model_reacs_or_metabs(model_libsbml: libModel, metabolites: bool=False, col_name: str='bigg_id') -> pd.DataFrame: """Extracts table of reactions/metabolites with BiGG IDs from model Args: - model_libsbml (libModel): Model loaded with libSBML - metabolites (bool): Set to True if metabolites from model should be extracted - col_name (str): Name to be used for column in Table, default: 'bigg_id' Returns: pd.DataFrame: Table with model identifiers for either metabolites or reactions """ reac_or_metab_list = model_libsbml.getListOfSpecies() if metabolites else model_libsbml.getListOfReactions() list_of_reacs_or_metabs = [] for reac_or_metab in reac_or_metab_list: list_of_reacs_or_metabs.append(reac_or_metab.id[2:]) reac_or_metab_list_df = pd.Series(list_of_reacs_or_metabs) reac_or_metab_list_df = pd.DataFrame(reac_or_metab_list_df, columns=[col_name]) return reac_or_metab_list_df def create_gpr_from_locus_tag(model: libModel, locus_tag: str, email: str) -> tuple[GeneProduct, libModel]: """Creates GeneProduct in the given model Args: - model (libModel): Model loaded with libSBML - locus_tag (str): NCBI compatible locus_tag - email (str): User Email to access the NCBI Entrez database Returns: tuple: libSBML GeneProduct (1) & libSBML model (2) (1) GeneProduct: Created gene product (2) libModel: Model containing the created gene product """ Entrez.email = email name, locus = search_ncbi_for_gpr(locus_tag) gpr = model.getPlugin(0).createGeneProduct() gpr.setName(name) gpr.setId(locus_tag) gpr.setMetaId('meta_' + locus_tag) gpr.setLabel(locus_tag) gpr.setSBOTerm("SBO:0000243") add_cv_term_genes(locus_tag, 'NCBI', gpr) return gpr, model def create_gp(model: libModel, model_id: str, name: str, locus_tag: str, protein_id: str) -> tuple[GeneProduct, libModel]: """Creates GeneProduct in the given model Args: - model (libModel): Model loaded with libSBML - model_id (str): ID identical to ID that CarveMe adds from the NCBI FASTA input file - name (str): Name of the GeneProduct - locus_tag (str): Genome-specific locus tag used as label in the model - protein_id (str): NCBI Protein/RefSeq ID Returns: tuple: libSBML GeneProduct (1) & libSBML model (2) (1) GeneProduct: Created gene product (2) libModel: Model containing the created gene product """ id_db = None gp = model.getPlugin(0).createGeneProduct() gp.setId(model_id) gp.setName(name) gp.setLabel(locus_tag) gp.setSBOTerm('SBO:0000243') gp.setMetaId(f'meta_{model_id}') if re.fullmatch('^(((AC|AP|NC|NG|NM|NP|NR|NT|NW|WP|XM|XP|XR|YP|ZP)_\d+)|(NZ_[A-Z]{2,4}\d+))(\.\d+)?$', protein_id, re.IGNORECASE): id_db = 'REFSEQ' elif re.fullmatch('^(\w+\d+(\.\d+)?)|(NP_\d+)$', protein_id, re.IGNORECASE): id_db = 'NCBI' if id_db: add_cv_term_genes(protein_id, id_db, gp) return gp, model def create_species( model: libModel, metabolite_id: str, name: str, compartment_id: str, charge: int, chem_formula: str ) -> tuple[Species, libModel]: """Creates Species/Metabolite in the given model Args: - model (libModel): Model loaded with libSBML - metabolite_id (str): Metabolite ID within model (If model from CarveMe, preferable a BiGG ID) - name (str): Name of the metabolite - compartment_id (str): ID of the compartment where metabolite resides - charge (int): Charge for the metabolite - chem_formula (str): Chemical formula for the metabolite Returns: tuple: libSBML Species (1) & libSBML model (2) (1) Species: Created species/metabolite (2) libModel: Model containing the created metabolite """ metabolite = model.createSpecies() metabolite.setId(f'M_{metabolite_id}') if name: metabolite.setName(name) metabolite.setMetaId(f'meta_M_{metabolite_id}') metabolite.setSBOTerm('SBO:0000247') metabolite.setInitialAmount(float('NaN')) metabolite.setHasOnlySubstanceUnits(True) metabolite.setBoundaryCondition(False) metabolite.setConstant(False) metabolite.setCompartment(compartment_id) metabolite.getPlugin(0).setCharge(charge) metabolite.getPlugin(0).setChemicalFormula(chem_formula) add_cv_term_metabolites(metabolite_id[:-2], 'BIGG', metabolite) return metabolite, model def get_reversible(fluxes: dict[str: str]) -> bool: """Infer if reaction is reversible from flux bounds Args: - fluxes (dict): Dictionary containing the keys 'lower_bound' & 'upper_bound' with values in ['cobra_default_lb', 'cobra_0_bound', 'cobra_default_ub'] Returns: bool: True if reversible else False """ return (fluxes['lower_bound'] == 'cobra_default_lb') and (fluxes['upper_bound'] == 'cobra_default_ub') def create_reaction( model: libModel, reaction_id: str, name:str, reactants: dict[str: int], products: dict[str: int], fluxes: dict[str: str], reversible: bool=None, fast: bool=None, compartment: str=None, sbo: str=None, genes: Union[str, list[str]]=None ) -> tuple[Reaction, libModel]: """Creates new reaction in the given model Args: - model (libModel): Model loaded with libSBML - reaction_id (str): BiGG ID of the reaction to create - name (str): Human readable name of the reaction - reactants (dict): Metabolites as keys and their stoichiometry as values - products (dict): Metabolites as keys and their stoichiometry as values - fluxes (dict): Dictionary with lower_bound and upper_bound as keys - reversible (bool): True/False for the reaction - fast (bool): True/False for the reaction - compartment (str): BiGG compartment ID of the reaction (if available) - sbo (str): SBO term of the reaction - genes (str|list): List of genes belonging to reaction Returns: tuple: libSBML reaction (1) & libSBML model (2) (1) Reaction: Created reaction (2) libModel: Model containing the created reaction """ reaction = model.createReaction() reaction.setId('R_' + reaction_id) if name: reaction.setName(name) reaction.setMetaId('meta_R_' + reaction_id) sbo = sbo if sbo else 'SBO:0000167' # SBO term for biochemical or transport reaction reaction.setSBOTerm(sbo) fast = fast if fast else False reaction.setFast(fast) if compartment: reaction.setCompartment(compartment) # Set compartment for reaction if available reversible = reversible if reversible else get_reversible(fluxes) reaction.setReversible(reversible) if genes: if genes == 'G_spontaneous': reaction.getPlugin(0).createGeneProductAssociation().createGeneProductRef().setGeneProduct(gene) elif len(genes) == 1: reaction.getPlugin(0).createGeneProductAssociation().createGeneProductRef().setGeneProduct(genes[0]) else: gp_ass_or = reaction.getPlugin(0).createGeneProductAssociation().createOr() for gene in genes: # Set GeneProductReferences if available gp_ass_or.createGeneProductRef().setGeneProduct(gene) for metab, stoich in reactants.items(): #reactants as dict with metab:stoich reaction.addReactant(model.getSpecies('M_' + metab), stoich) for metab, stoich in products.items(): #reactants as dict with metab:stoich reaction.addProduct(model.getSpecies('M_' + metab), stoich) reaction.getPlugin(0).setLowerFluxBound(fluxes['lower_bound']) reaction.getPlugin(0).setUpperFluxBound(fluxes['upper_bound']) add_cv_term_reactions(reaction_id, 'BIGG', reaction) return reaction, model
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/entities.py
0.816955
0.397237
entities.py
pypi
import pandas as pd from libsbml import Model as libModel from refinegems.modelseed import get_modelseed_compounds __author__ = "Famke Baeuerle" def correct_charges_from_db(model: libModel, compounds: pd.DataFrame) -> tuple[libModel, dict]: """Adds charges taken from given database to metabolites which have no defined charge Args: - model (libModel): Model loaded with libsbml - compounds (pd.DataFrame): Containing database data with 'BiGG' (BiGG-Ids) and 'charge' (float or int) as columns Returns: tuple: libSBML model (1) & dictionary 'metabolite_id': list(charges) (2) (1) libModel: Model with added charges (2) dict: Metabolites with respective multiple charges """ spe = model.getListOfSpecies() mulchar = dict() for i in spe: if not i.getPlugin('fbc').isSetCharge( ): # we are only interested in metab without charge bigg = i.getId()[2:-2] if len(compounds[compounds['BiGG'] == bigg]['charge'].array) == 1: # eindeutig charge = compounds[compounds['BiGG'] == bigg]['charge'].array[0] i.getPlugin('fbc').setCharge(int(charge)) elif len(compounds[compounds['BiGG'] == bigg]['charge'].array) > 1: charges = compounds[compounds['BiGG'] == bigg]['charge'].array if all(x == charges[0] for x in charges): charge = charges[0] i.getPlugin('fbc').setCharge(int(charge)) else: mulchar[bigg] = charges return model, mulchar def correct_charges_modelseed(model: libModel) -> tuple[libModel, dict]: """Wrapper function which completes the steps to charge correction with the ModelSEED database Args: - model (libModel): Model loaded with libsbml Returns: tuple: libSBML model (1) & dictionary 'metabolite_id': list(charges) (2) (1) libModel: Model with added charges (2) dict: Metabolites with respective multiple charges """ modelseed_compounds = get_modelseed_compounds() model_corr, multiple_charges = correct_charges_from_db( model, modelseed_compounds) return model_corr, multiple_charges
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/charges.py
0.582847
0.42674
charges.py
pypi
import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns import numpy as np from tqdm import tqdm from venn import venn from libsbml import Model as libModel from cobra import Model as cobraModel from refinegems.io import load_medium_from_db, search_sbo_label from refinegems.growth import growth_one_medium_from_default, growth_one_medium_from_minimal from refinegems.investigate import initial_analysis, get_reactions_per_sbo __author__ = "Famke Baeuerle" def plot_initial_analysis(models: list[libModel]): """Creates bar plot of number of entities per Model Args: - models (list[libModel]): Models loaded with libSBML Returns: plot: Pandas Barchart """ numbers = pd.DataFrame([initial_analysis(model) for model in models], columns=['model', 'metabolites', 'reactions', 'genes']) ax = numbers.set_index('model').plot.bar(y=['metabolites', 'reactions', 'genes'], figsize=(8, 5), cmap='Paired', rot=0) # commented is possibility to integrate memote scores #numbers.set_index('model').plot(y='Memote score', ax=ax, use_index=False, linestyle=':', secondary_y='Memote score', color='k', marker='D', legend=True) #ax.right_ax.set_ylabel('Memote score [%]') #ax.right_ax.legend(loc='upper right', bbox_to_anchor=[0.98, 0.9]) #ax.right_ax.set_ylim([75, 95]) ax.legend(title=False, loc='upper left', ncol=3, frameon=False) ylim = numbers.drop('model', axis=1).max().max() + 200 ax.set_ylim([0,ylim]) ax.set_xlabel('') ax.tick_params(axis='x',which='both', bottom=False,top=False) return ax def get_sbo_mapping_multiple(models: list[libModel]) -> pd.DataFrame: """Determines number of reactions per SBO Term and adds label of SBO Terms Args: - models (list[libModel]): Models loaded with libSBML Returns: pd.DataFrame: SBO Terms, number of reactions per Model and SBO Label """ mappings = {} for model in models: mappings[model.id] = get_reactions_per_sbo(model) df = pd.DataFrame.from_dict(mappings) df = df.reset_index().rename({'index': 'SBO-Term'}, axis=1) df['SBO-Name'] = df['SBO-Term'].apply(search_sbo_label) return df def plot_rea_sbo_multiple(models: list[libModel], rename=None): """Plots reactions per SBO Term in horizontal bar chart with stacked bars for the models Args: - models (list[libModel]): Models loaded with libSBML - rename (dict, optional): Rename model ids to custom names. Defaults to None. Returns: plot: Pandas stacked barchart """ map = get_sbo_mapping_multiple(models) id_list = [mod.id for mod in models] map = map[(map[id_list]>3).all(axis=1)] map = map.drop('SBO-Term', axis=1).sort_values(id_list[0]).set_index('SBO-Name') if rename is not None: map = map.rename(rename, axis=1) fig = map.plot.barh(stacked=True, width=.8, figsize=(8,10)) fig.set_ylabel('') fig.set_xlabel('number of reactions', fontsize=16) fig.legend(loc='lower right') return fig def plot_venn(models: list[cobraModel], entity: str, perc: bool=False, rename=None): """Creates Venn diagram to show the overlap of model entities Args: - models (list[cobraModel]): Models loaded with cobrapy - entity (str): Compare on metabolite|reaction - perc (bool, optional): True if percentages should be used. Defaults to False. - rename (dict, optional): Rename model ids to custom names. Defaults to None. Returns: plot: Venn diagram """ intersec = {} for model in models: reas = [] if entity == 'metabolite': for rea in model.metabolites: reas.append(rea.id) if entity == 'reaction': for rea in model.reactions: reas.append(rea.id) if rename is not None: intersec[rename[model.id]] = set(reas) else: intersec[model.id] = set(reas) if perc: fig = venn(intersec, fmt="{percentage:.1f}%") else: fig = venn(intersec) return fig def plot_heatmap_dt(growth: pd.DataFrame): """Creates heatmap of simulated doubling times with additives Args: - growth (pd.DataFrame): Containing growth data from simulate_all Returns: plot: Seaborn Heatmap """ growth=growth.set_index(['medium', 'model']).sort_index().T.stack() growth.columns.name=None growth.index.names = (None,None) growth.index.name=None growth.index = growth.index.get_level_values(1) growth[growth > 500] = 0 growth[growth < 0] = 0 growth.replace([np.inf, -np.inf], 0, inplace=True) over_growth = growth.max().max() + 6 growth.replace(np.nan, over_growth, inplace=True) under_growth = growth.min().min() - 5 vmin= under_growth if under_growth > 1e-5 else 1e-5 #Use same threshhold as in find_missing_essential in growth vmax=over_growth - 1 annot = growth.copy() annot = annot.round().astype(int) annot[annot < 1e-5] = '' annot.replace(over_growth.round().astype(int), 'No data', inplace=True) cmap=matplotlib.cm.get_cmap('YlGn').copy() cmap.set_under('black') cmap.set_over('white') fig, ax = plt.subplots(figsize=(10,8)) sns.heatmap(growth.T, annot=annot.T, annot_kws={"fontsize":15}, vmin=vmin, vmax=vmax, cmap=cmap, linewidth=.5, cbar_kws = {'orientation':'vertical', 'label':'doubling time [min]', 'extend': 'min', 'extendrect':True}, ax=ax, fmt='' ) rotation = 40 if len(growth.index) > 3 else 0 plt.tick_params(rotation=0, bottom=False, top=False, left=False, right=False) ax.set_xticklabels(ax.get_xticklabels(), rotation=rotation, ha="right") return fig def plot_heatmap_native(growth: pd.DataFrame): """Creates a plot were if growth without additives is possible is marked from yellow to green otherwise black Args: - growth (pd.DataFrame): Containing growth data from simulate_all Returns: plot: Seaborn Heatmap """ def get_native_growth(row): if row['complete'] == True: return row['doubling_time [min]'] else: return 0 growth['native_growth'] = growth.apply(get_native_growth, axis=1) growth = growth[['medium', 'model', 'native_growth']] growth=growth.set_index(['medium', 'model']).sort_index().T.stack() growth.columns.name=None growth.index.names = (None,None) growth.index.name=None growth.index = growth.index.get_level_values(1) growth[growth > 500] = 0 growth[growth < 0] = 0 growth.replace([np.inf, -np.inf], 0, inplace=True) over_growth = growth.max().max() + 6 growth.replace(np.nan, over_growth, inplace=True) annot = growth.copy() annot = annot.round().astype(int) annot[annot == np.nan] = 'No data' annot[annot < 1e-5] = '' annot.replace(over_growth.round().astype(int), 'No data', inplace=True) under_growth = growth.min().min() - 5 vmin= under_growth if under_growth > 1e-5 else 1e-5 #Use same threshhold as in find_missing_essential in growth vmax= over_growth - 1 cmap=matplotlib.cm.get_cmap('YlGn').copy() cmap.set_under('black') cmap.set_over('white') fig, ax = plt.subplots(figsize=(10,8)) sns.heatmap(growth.T, annot=annot.T, annot_kws={"fontsize":15}, vmin=vmin, vmax=vmax, cmap=cmap, linewidth=.5, ax=ax, cbar_kws={'orientation':'vertical', 'label':'doubling time [min]', 'extend': 'min', 'extendrect':True}, fmt='' ) plt.xticks(rotation=0) plt.yticks(rotation=0) rotation = 40 if len(growth.index) > 3 else 0 plt.tick_params(rotation=0, bottom=False, top=False, left=False, right=False) ax.set_xticklabels(ax.get_xticklabels(), rotation=rotation, ha="right") return fig def simulate_all(models: list[cobraModel], media: list[str], basis: str, anaerobic: bool) -> pd.DataFrame: """Does a run of growth simulation for multiple models on different media Args: - models (list[cobraModel]): Models loaded with cobrapy - media (list[str]): Media of interest (f.ex. LB, M9, ...) - basis (str): Either default_uptake (adding metabs from default) or minimal_uptake (adding metabs from minimal medium) - anaerobic (bool): If True 'EX_o2_e' is set to 0.0 to simulate anaerobic conditions Returns: pd.DataFrame: table containing the results of the growth simulation """ growth = pd.DataFrame() for medium_id in tqdm(media): medium = load_medium_from_db(medium_id) for model in models: essentials_given = False if (basis=='default_uptake'): growth_one = growth_one_medium_from_default(model, medium, anaerobic).drop('missing exchanges', axis=1) elif (basis == 'minimal_uptake'): growth_one = growth_one_medium_from_minimal(model, medium, anaerobic).drop('missing exchanges', axis=1) if growth_one['essential'].dropna().size == 0: essentials_given = True else: growth_list = growth_one['essential'].dropna().to_list() growth_string = ', '.join(growth_list) essentials_given = growth_string growth_one = growth_one.drop('essential', axis=1) growth_one['complete'] = essentials_given growth_one = growth_one.dropna() growth_one['model'] = model.id growth_one = growth_one[['model', 'medium', 'doubling_time [min]', 'growth_value', 'complete']] growth_one['doubling_time [min]'].astype(float).round(2) growth_one['growth_value'].astype(float).round(2) growth = growth.append( growth_one, ignore_index=True) return growth
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/comparison.py
0.833426
0.460592
comparison.py
pypi
import logging from libsbml import BIOLOGICAL_QUALIFIER, BQB_IS, BQB_OCCURS_IN, BQB_IS_HOMOLOG_TO, MODEL_QUALIFIER, BQM_IS_DESCRIBED_BY, Unit, CVTerm, Species, Reaction, GeneProduct, Group, SBase __author__ = "Famke Baeuerle and Gwendolyn O. Gusak" metabol_db_dict = { 'BIGG': 'bigg.metabolite:', 'BIOCYC': 'biocyc:META:', 'BioCyc': 'biocyc:META:', 'BRENDA': 'brenda:', 'CHEBI': 'CHEBI:', 'ChEBI': 'CHEBI:', 'HMDB': 'hmdb:HMDB', 'Human Metabolome Database': 'hmdb:HMDB', 'INCHI': 'inchi:', 'InChI': 'inchi:', 'InChI-Key': 'inchikey:', 'KEGG': 'kegg.compound:', #'KEGG Compound': 'kegg.compound:', 'METACYC': 'metacyc.compound:', 'MXNREF': 'metanetx.chemical:', 'MetaNetX': 'metanetx.chemical:', 'PUBCHEM': 'pubchem.compound:', 'REACTOME': 'reactome:', 'Reactome': 'reactome:', 'SEED': 'seed.compound:', #'UPA': 'unipathway.compound:', #closed due to financial issues #'UniPathway Compound': 'unipathway.compound:' 'VMH': 'vmhmetabolite:' } reaction_db_dict = { 'BIGG': 'bigg.reaction:', 'BioCyc': 'biocyc:META:', 'BRENDA': 'brenda:', 'EC': 'ec-code:', 'HMDB': 'hmdb:HMDB', 'KEGG': 'kegg.reaction:', 'METACYC': 'metacyc.reaction:', 'MXNREF': 'metanetx.reaction:', 'MetaNetX': 'metanetx.reaction:', 'REACTOME': 'reactome:', 'Reactome': 'reactome:', 'RHEA': 'rhea:', 'SEED': 'seed.reaction:', #'UPA': 'unipathway.reaction:', #'UniPathway Reaction': 'unipathway.reaction:' 'VMH': 'vmhreaction:' } gene_db_dict = { 'KEGG': 'kegg.genes:', 'NCBI': 'ncbiprotein:', 'REFSEQ': 'refseq:', 'UNIPROT': 'uniprot:' } pathway_db_dict = {'KEGG': 'kegg.pathway:'} MIRIAM = 'https://identifiers.org/' OLD_MIRIAM = 'http://identifiers.org/' def add_cv_term_units(unit_id: str, unit: Unit, relation: int): """Adds CVTerm to a unit Args: - unit_id (str): ID to add as URI to annotation - unit (Unit): Unit to add CVTerm to - relation (int): Provides model qualifier to be added """ cv = CVTerm() cv.setQualifierType(MODEL_QUALIFIER) cv.setModelQualifierType(relation) if relation == BQM_IS_DESCRIBED_BY: cv.addResource(f'https://identifiers.org/{unit_id}') else: cv.addResource(f'https://identifiers.org/UO:{unit_id}') unit.addCVTerm(cv) def add_cv_term_metabolites(entry: str, db_id: str, metab: Species): """Adds CVTerm to a metabolite Args: - entry (str): Id to add as annotation - db_id (str): Database to which entry belongs. Must be in metabol_db_dict.keys(). - metab (Species): Metabolite to add CVTerm to """ if db_id == 'HMDB' or db_id == 'Human Metabolome Database': if entry[:4] == 'HMDB': entry = entry[4:] cv = CVTerm() cv.setQualifierType(BIOLOGICAL_QUALIFIER) cv.setBiologicalQualifierType(BQB_IS) cv.addResource('https://identifiers.org/' + metabol_db_dict[db_id] + entry) metab.addCVTerm(cv) metab.addCVTerm(cv) def add_cv_term_reactions(entry: str, db_id: str, reac: Reaction): """Adds CVTerm to a reaction Args: - entry (str): Id to add as annotation - db_id (str): Database to which entry belongs. Must be in reaction_db_dict.keys(). - reac (Reaction): Reaction to add CVTerm to """ if db_id == 'HMDB' or db_id == 'Human Metabolome Database': if entry[:4] == 'HMDB': entry = entry[4:] cv = CVTerm() cv.setQualifierType(BIOLOGICAL_QUALIFIER) cv.setBiologicalQualifierType(BQB_IS) cv.addResource( 'https://identifiers.org/' + reaction_db_dict[db_id] + entry) reac.addCVTerm(cv) def add_cv_term_genes(entry: str, db_id: str, gene: GeneProduct, lab_strain: bool=False): """Adds CVTerm to a gene Args: - entry (str): Id to add as annotation - db_id (str): Database to which entry belongs. Must be in gene_db_dict.keys(). - gene (GeneProduct): Gene to add CVTerm to - lab_strain (bool, optional): For locally sequenced strains the qualifiers are always HOMOLOG_TO. Defaults to False. """ cv = CVTerm() cv.setQualifierType(BIOLOGICAL_QUALIFIER) if lab_strain: cv.setBiologicalQualifierType(BQB_IS_HOMOLOG_TO) else: cv.setBiologicalQualifierType(BQB_IS) cv.addResource('https://identifiers.org/' + gene_db_dict[db_id] + entry) gene.addCVTerm(cv) def add_cv_term_pathways(entry: str, db_id: str, path: Group): """Add CVTerm to a groups pathway Args: - entry (str): Id to add as annotation - db_id (str): Database to which entry belongs. Must be in pathway_db_dict.keys(). - path (Group): Pathway to add CVTerm to """ cv = CVTerm() cv.setQualifierType(BIOLOGICAL_QUALIFIER) cv.setBiologicalQualifierType(BQB_IS) cv.addResource('https://identifiers.org/' + pathway_db_dict[db_id] + entry) path.addCVTerm(cv) def add_cv_term_pathways_to_entity(entry: str, db_id: str, reac: Reaction): """Add CVTerm to a reaction as OCCURS IN pathway Args: - entry (str): Id to add as annotation - db_id (str): Database to which entry belongss - reac (Reaction): Reaction to add CVTerm to """ cv = CVTerm() cv.setQualifierType(BIOLOGICAL_QUALIFIER) cv.setBiologicalQualifierType(BQB_OCCURS_IN) cv.addResource('https://identifiers.org/' + pathway_db_dict[db_id] + entry) reac.addCVTerm(cv) def get_id_from_cv_term(entity: SBase, db_id: str) -> list[str]: """Extract Id for a specific database from CVTerm Args: - entity (SBase): Species, Reaction, Gene, Pathway - db_id (str): Database of interest Returns: list[str]: Ids of entity belonging to db_id """ num_cvs = entity.getNumCVTerms() all_ids = [] for i in range(0, num_cvs): ann_string = entity.getCVTerm(i) num_res = ann_string.getNumResources() ids = [ann_string.getResourceURI(r).split( '/')[-1] for r in range(0, num_res) if str(db_id) in ann_string.getResourceURI(r)] ids = [id_string.split(':')[-1] for id_string in ids if ':' in id_string] all_ids.extend(ids) return all_ids def generate_cvterm(qt, b_m_qt) -> CVTerm: """Generates a CVTerm with the provided qualifier & biological or model qualifier types Args: - qt (libSBML qualifier type): BIOLOGICAL_QUALIFIER or MODEL_QUALIFIER - b_m_qt (libSBML qualifier): BQM_IS, BQM_IS_HOMOLOG_TO, etc. Returns: CVTerm: With provided qualifier & biological or model qualifier types """ cvterm = CVTerm() cvterm.setQualifierType(qt) if qt == BIOLOGICAL_QUALIFIER: cvterm.setBiologicalQualifierType(b_m_qt) else: cvterm.setModelQualifierType(b_m_qt) return cvterm def print_cvterm(cvterm: CVTerm): """Debug function: Prints the URIs contained in the provided CVTerm along with the provided qualifier & biological/model qualifier types Args: cvterm (CVTerm): A libSBML CVTerm """ if cvterm == None: logging.info('CVTerm currently empty!') else: current_b_m_qt = 0 current_qt = cvterm.getQualifierType() if current_qt == BIOLOGICAL_QUALIFIER: current_b_m_qt = cvterm.getBiologicalQualifierType() elif current_qt == MODEL_QUALIFIER: current_b_m_qt = cvterm.getModelQualifierType() if cvterm.getNumResources() == 0: logging.info('No URIs present.') else: logging.info(f'Current CVTerm contains: {cvterm.getResourceURI(0)}') for i in range(1, cvterm.getNumResources()): logging.info(f' {cvterm.getResourceURI(i)}') logging.info(f'Current CVTerm has QualifierType {current_qt} and Biological/ModelQualifierType {current_b_m_qt}.')
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/cvterms.py
0.635562
0.24659
cvterms.py
pypi
# Get all possible genes by filtering .gff according to 'bio_type=protein_coding' & 'product=hypothetical protein' # Compare the list of genes with the ones already in the model & add all missing genes # Before adding to model check if for all genes that are missing for IMITSC147 identifiers exist # -> Create tables mapping locus tag to old ID, locus tag to new ID & merge # -> Specify user input locus_tag start from NCBI PGAP from libsbml import Model as libModel import math import ast import numpy as np import pandas as pd import libchebipy import requests from refinegems.entities import get_model_genes, get_model_reacs_or_metabs, compare_gene_lists from refinegems.analysis_db import get_bigg2other_db, compare_bigg_model, add_stoichiometric_values_to_reacs, BIGG_METABOLITES_URL from refinegems.io import parse_fasta_headers __author__ = "Gwendolyn O. Gusak and Dr. Reihaneh Mostolizadeh" # Global variable for statistics statistics_dict = { 'Missing entity': ['Protein', 'Metabolite', 'Reaction'], 'Total': [np.NaN, np.NaN, np.NaN], 'Have BiGG ID': [np.NaN, np.NaN, np.NaN], 'Can be added': [np.NaN, np.NaN, np.NaN], 'Notes': [ 'Amount derived from locus tag comparison/Amount remaining for the reactions', 'Only metabolites that are required for the missing reactions', 'Only reactions that belong to the missing genes/proteins' ] } statistics_df = pd.DataFrame(statistics_dict).set_index('Missing entity') # Locus tags in GenBank GFF file == BioCyc Accession-2 == Old locus tags in RefSeq GFF file # Locus tags in RefSeq GFF file == BioCyc Accession-1 # Label in model == Locus tag from GenBank GFF file == BioCyc Accession-2 def get_biocyc_genes2reactions(inpath: str) -> pd.DataFrame: """Parses TSV file from BioCyc to retrieve 'Accession-2' & the corresponding 'Reactions of gene' Args: - inpath (str): Path to file from BioCyc containing the 'Accession-2' to 'Reactions of gene' mapping Returns: pd.DataFrame: Table containing only rows where a 'Reaction of gene' exists """ biocyc_genes = pd.read_table(inpath, usecols=['Accession-2', 'Reactions of gene'], dtype=str) biocyc_genes.rename(columns={'Accession-2': 'locus_tag', 'Reactions of gene': 'Reaction'}, inplace=True) biocyc_genes.replace('', np.nan, inplace=True) biocyc_genes.dropna(inplace=True) return biocyc_genes def get_missing_genes2reactions(model_libsbml: libModel, inpath: str) -> pd.DataFrame: """Retrieves the missing genes and reactions from the BioCyc table according to the 'Accession-2' identifiers Args: - model_libsbml (libModel): Model read in with libSBML - inpath (str): Path to file from BioCyc containing the Accession-2 to Reactions of gene mapping Returns: pd.DataFrame: Table containing only 'Accession-2' & 'Reactions' for the missing genes """ gps_in_model = get_model_genes(model_libsbml) biocyc_genes = get_biocyc_genes2reactions(inpath) missing_biocyc_genes = compare_gene_lists(gps_in_model, biocyc_genes, False) missing_biocyc_reactions = pd.DataFrame( missing_biocyc_genes['Reaction'].str.split('//').tolist(), index=missing_biocyc_genes['locus_tag'] ).stack() missing_biocyc_reactions = missing_biocyc_reactions.reset_index([0, 'locus_tag']) missing_biocyc_reactions.columns = ['locus_tag', 'Reaction'] missing_biocyc_reactions['Reaction'] = missing_biocyc_reactions['Reaction'].str.strip() # Get amount of missing genes from BioCyc for statistics biocyc_genes = missing_biocyc_genes['locus_tag'].unique().tolist() statistics_df.loc['Protein', 'Total'] = len(biocyc_genes) return missing_biocyc_reactions def get_biocyc_reactions(inpath: str) -> pd.DataFrame: """Parses TSV file from BioCyc to retrieve 'Reaction', 'Reactants of reaction', 'Products of reaction', 'EC-Number', 'Reaction-Direction' & 'Spontaneous?' Args: - inpath (str): Path to file from BioCyc containing the following columns: 'Reaction' 'Reactants of reaction' 'Products of reaction' 'EC-Number' 'Reaction-Direction' 'Spontaneous?' Returns: pd.DataFrame: Table containing all biocyc reactions from provided file """ biocyc_reacs = pd.read_table(inpath, usecols= ['Reaction', 'Reactants of reaction', 'Products of reaction', 'EC-Number', 'Reaction-Direction', 'Spontaneous?'], dtype=str ) biocyc_reacs.rename(columns= {'Reactants of reaction': 'Reactants', 'Products of reaction': 'Products', 'EC-Number': 'EC'}, inplace=True ) biocyc_reacs.replace('', np.nan, inplace=True) biocyc_reacs['Spontaneous?'] = biocyc_reacs['Spontaneous?'].fillna('F') biocyc_reacs.dropna(subset=['Reaction', 'Reactants', 'Products', 'EC'], inplace=True) return biocyc_reacs def extract_metabolites_from_reactions(missing_reactions: pd.DataFrame) -> tuple[pd.DataFrame, pd.DataFrame]: """Extracts a set of all reactants & products from the missing reactions Args: - missing_reactions (pd.DataFrame): Table containing all missing reactions found through the missing genes Returns: tuple: Two tables (1) & (2) (1) pd.DataFrame: Table with the column Compound containing all compounds required for the missing BioCyc reactions (2) pd.DataFrame: Table with the column bigg_id containing all compounds required for the missing BiGG reactions """ # Get all BioCyc metabolites necessary for the BioCyc reactions biocyc_reactants = [r for row in missing_reactions['Reactants'] for r in row] biocyc_products = [p for row in missing_reactions['Products'] for p in row] biocyc_metabolites = list(set([*biocyc_reactants, *biocyc_products])) # Get all BiGG metabolites necessary for the BiGG reactions bigg_reactions = [ast.literal_eval(str(reac_dict)) for reac_dict in missing_reactions['bigg_reaction']] bigg_reactants = [r for reac_dict in bigg_reactions for r in reac_dict.get('reactants')] bigg_products = [p for reac_dict in bigg_reactions for p in reac_dict.get('products')] bigg_metabolites = list(set([*bigg_reactants, *bigg_products])) return (pd.DataFrame(biocyc_metabolites, columns=['Compound']), pd.DataFrame(bigg_metabolites, columns=['bigg_id'])) def get_missing_reactions( model_libsbml: libModel, genes2reaction: pd.DataFrame, inpath: str ) -> tuple[tuple[pd.DataFrame, pd.DataFrame], pd.DataFrame]: """Subsets the BioCyc table with the following columns: 'Reaction' 'Reactants of reaction' 'Products of reaction' 'EC-Number' 'Reaction-Direction' 'Spontaneous?' to obtain the missing reactions with all the corresponding data & Adds the according BiGG Reaction identifiers Args: - model_libsbml (libModel): Model read in with libSBML - genes2reaction (pd.DataFrame): Table containing only 'Accession-2' & 'Reactions' for the missing genes - inpath (str): Path to file from BioCyc containing the following columns: 'Reaction' 'Reactants of reaction' 'Products of reaction' 'EC-Number' 'Reaction-Direction' 'Spontaneous?' Returns: tuple: Tuple (1) & table (2) (1) tuple: Two tables (1) & (2) (1) pd.DataFrame: Table containing only the metabolites corresponding to the missing BioCyc reactions (2) pd.DataFrame: Table containing only the metabolites corresponding to the missing BiGG reactions (2) pd.DataFrame: Table containing the missing reactions with the corresponding data """ model_reacs = get_model_reacs_or_metabs(model_libsbml) biocyc_reacs = get_biocyc_reactions(inpath) # Get missing reactions from missing genes missing_reactions = genes2reaction.merge(biocyc_reacs, on='Reaction') # Turn entries with '//' into lists missing_reactions['Reactants'] = missing_reactions['Reactants'].str.split('\s*//\s*') missing_reactions['Products'] = missing_reactions['Products'].str.split('\s*//\s*') missing_reactions['EC'] = missing_reactions['EC'].str.split('\s*//\s*') # Turn locus_tag column into lists of locus tags per reaction locus_tags_as_list = missing_reactions.groupby('Reaction')['locus_tag'].apply(list).reset_index(name='locus_tag') missing_reactions.drop('locus_tag', axis=1, inplace=True) missing_reactions = locus_tags_as_list.merge(missing_reactions, on='Reaction') statistics_df.loc['Reaction', 'Total'] = len(missing_reactions['Reaction'].unique().tolist()) # Get BiGG BioCyc bigg2biocyc_reacs = get_bigg2other_db('BioCyc') # Subset missing_reactions with BiGG BioCyc missing_reactions.rename(columns={'Reaction': 'BioCyc'}, inplace=True) missing_reactions = bigg2biocyc_reacs.merge(missing_reactions, on='BioCyc') # Get amount of missing reactions that have a BiGG ID statistics_df.loc['Reaction', 'Have BiGG ID'] = len(missing_reactions['BioCyc'].unique().tolist()) # Subset missing_reactions with model_reacs missing_reactions = compare_bigg_model(missing_reactions, model_reacs) # Get amount of missing reactions that are not in the model statistics_df.loc['Reaction', 'Can be added'] = len(missing_reactions['bigg_id'].unique().tolist()) # Add reactants & products dictionary with stoichiometric values to the reactions table missing_reactions = add_stoichiometric_values_to_reacs(missing_reactions) # Get all metabolites for the missing reactions biocyc_metabs_from_reacs, bigg_metabs_from_reacs = extract_metabolites_from_reactions(missing_reactions) return (biocyc_metabs_from_reacs, bigg_metabs_from_reacs), missing_reactions def get_biocyc_metabolites(inpath: str) -> pd.DataFrame: """Parses TSV file from BioCyc to retrieve 'Compound (Object ID)' 'Chemical Formula' 'InChI-Key' 'ChEBI' Args: - inpath (str): Path to file from BioCyc containing the following columns: 'Compound' 'Object ID' 'Chemical Formula' 'InChI-Key' 'ChEBI' Returns: pd.DataFrame: Table containing all biocyc metabolites from provided file """ biocyc_metabs = pd.read_table(inpath, usecols=['Object ID', 'Chemical Formula', 'InChI-Key', 'ChEBI'], dtype=str) biocyc_metabs.rename(columns={'Object ID': 'Compound'}, inplace=True) biocyc_metabs.replace('', np.nan, inplace=True) biocyc_metabs.dropna(inplace=True) return biocyc_metabs def get_missing_metabolites( model_libsbml: libModel, metabs_from_reacs: tuple[pd.DataFrame, pd.DataFrame], inpath: str ) -> pd.DataFrame: """Subsets the BioCyc table with the following columns: 'Compound' 'Chemical Formula' 'InChI-Key' 'ChEBI' to obtain the missing metabolites with all the corresponding data & Adds the according BiGG Compound identifiers Args: - model_libsml (libModel): Model read in with libSBML - metabs_from_reacs (tuple): Two tables containing only the metabolites corresponding to the missing reactions for either BioCyc (1) or BiGG (2) - inpath (str): Path to file from BioCyc containing the following columns: 'Compound' 'Chemical Formula' 'InChI-Key' Returns: tuple: Two tables (1) & (2) (1): Table containing the metabolites corresponding to the missing reactions without BiGG IDs (2): Table containing the metabolites corresponding to the missing reactions with BiGG IDs """ model_metabs = get_model_reacs_or_metabs(model_libsbml, True) biocyc_metabs = get_biocyc_metabolites(inpath) biocyc_metabs.rename(columns={'Compound': 'BioCyc'}, inplace=True) biocyc_metabs_from_reacs, bigg_metabs_from_reacs = metabs_from_reacs # Get amount of missing BioCyc metabolites for all missing reactions statistics_df.loc['Metabolite', 'Total'] = len(biocyc_metabs_from_reacs['Compound'].unique().tolist()) # Get BiGG BioCyc bigg2biocyc_metabs = get_bigg2other_db('BioCyc', True) # Subset biocyc_metabs with BiGG BioCyc -> To get only metabolites with BiGG IDs missing_metabolites = bigg2biocyc_metabs.merge(biocyc_metabs, on='BioCyc') # missing_metabolites # Filter for all required BiGG metabolites for the missing BiGG reactions missing_metabolites = bigg_metabs_from_reacs.merge(missing_metabolites, how='left', on='bigg_id') # Get amount of missing BioCyc metabolites that have a BiGG ID missing_biocyc_metabs = missing_metabolites.dropna(subset=['BioCyc']) statistics_df.loc['Metabolite', 'Have BiGG ID'] = len(missing_biocyc_metabs['BioCyc'].unique().tolist()) # Subset missing_metabolites with model_metabs missing_metabolites = compare_bigg_model(missing_metabolites, model_metabs, True) # Get amount of missing metabolites that can & should be added to the model statistics_df.loc['Metabolite', 'Can be added'] = len(missing_metabolites['bigg_id'].unique().tolist()) return missing_metabolites def get_missing_genes(missing_reactions: pd.DataFrame, fasta: str) -> tuple[pd.DataFrame, pd.DataFrame]: """Retrieves all missing genes that belong to the obtained missing reactions Args: - missing_reactions (pd.DataFrame): Table containing all obtained missing reactions - fasta (str): Path to a FASTA file where the headers contain the information protein_id and locus_tag Returns: tuple: Two tables (1) & (2) (1) pd.DataFrame: Table with the columns locus_tag, Protein_id & Model_id (The model_id is similar to how CarveMe generates the GeneProduct ID.) (2) pd.DataFrame: The input pandas dataframe for the reactions where column 'locus_tag' is exchanged by 'gene_product' """ # Get locus tags from the missing reactions locus_tags = list(set([lt for row in missing_reactions['locus_tag'] for lt in row])) locus_tags_df = pd.DataFrame(pd.Series(locus_tags), columns=['locus_tag']) # Get protein and GeneProduct ID for the model from FASTA file ids_df = parse_fasta_headers(fasta, id_for_model=True) # Get the complete dataframe with the protein & model id missing_genes = locus_tags_df.merge(ids_df, on='locus_tag') statistics_df.loc['Protein', 'Can be added'] = len(missing_genes['locus_tag'].unique().tolist()) # Replace the locus tags in the reaction dataframe with the gene model ID def transform_lt_into_gp_model_id(locus_tag_list: list[str]) -> list[str]: return [missing_genes.loc[lt, 'model_id'] for lt in locus_tag_list] missing_genes.set_index('locus_tag', inplace=True) missing_reactions['gene_product'] = missing_reactions['locus_tag'].map(transform_lt_into_gp_model_id) missing_genes.reset_index(inplace=True) missing_reactions.drop('locus_tag', axis=1, inplace=True) return missing_genes, missing_reactions def add_charges_chemical_formulae_to_metabs(missing_metabs: pd.DataFrame) -> pd.DataFrame: """Adds charges & chemical formulae from CHEBI/BiGG to the provided dataframe Args: - missing_metabs (pd.DataFrame): Table containing metabolites & the respective CHEBI & BiGG IDs Returns: pd.DataFrame: Input table extended with the charges & chemical formulas obtained from CHEBI/BiGG """ # Finds the charges through the ChEBI/BiGG API, defaults to: 0 def find_charge(row: pd.Series) -> int: chebi_id = str(int(row.get('ChEBI'))) if not math.isnan(float(row.get('ChEBI'))) else None bigg_id = str(row.get('bigg_id')) charge = None if chebi_id: # Get charge from ChEBI (Returns always a charge) chebi_entity = libchebipy.ChebiEntity('CHEBI:' + chebi_id) return chebi_entity.get_charge() elif bigg_id != 'nan': # Get charge from BiGG if no ChEBI ID available try: charge = requests.get(BIGG_METABOLITES_URL + bigg_id[:-2]).json()['charges'][0] # Take first charge except ValueError: pass # If no charge was found, charge=0 return charge if charge else 0 # Finds the chemical formula through the ChEBI/BiGG API, defaults to: 'No formula' def find_formula(row: pd.Series) -> str: chebi_id = str(int(row.get('ChEBI'))) if not math.isnan(float(row.get('ChEBI'))) else None bigg_id, chem_form = str(row.get('bigg_id')), str(row.get('Chemical Formula')) chem_formula = None if chebi_id: # Get formula from ChEBI chebi_entity = libchebipy.ChebiEntity('CHEBI:' + chebi_id) chem_formula = chebi_entity.get_formula() if not chem_formula: # If no formula was found with ChEBI/No ChEBI ID available if bigg_id != 'nan': # Get formula from BiGG try: chem_formula = requests.get(BIGG_METABOLITES_URL + bigg_id[:-2]).json()['formulae'][0] # Take first formula except ValueError: pass if not chem_formula: # If no formula was found with BiGG ID # Get formula already existing in dataframe or set to 'No formula' chem_formula = chem_form if chem_form != 'nan' else 'No formula' return chem_formula missing_metabs['charge'] = missing_metabs.apply(find_charge, axis=1) missing_metabs['New Chemical Formula'] = missing_metabs.apply(find_formula, axis=1) missing_metabs['Chemical Formula'] = missing_metabs['New Chemical Formula'] missing_metabs.drop('New Chemical Formula', axis=1, inplace=True) return missing_metabs # Inspired by Dr. Reihaneh Mostolizadeh's function to add BioCyc reactions to a model def replace_reaction_direction_with_fluxes(missing_reacs: pd.DataFrame) -> pd.DataFrame: """Extracts the flux lower & upper bounds for each reaction through the entries in column 'Reaction-Direction' Args: - missing_reacs (pd.DataFrame): Table containing reactions & the respective Reaction-Directions Returns: pd.DataFrame: Input table extended with the fluxes lower & upper bounds obtained from the Reaction-Directions """ def get_fluxes(row: pd.Series) -> dict[str: str]: direction = row['Reaction-Direction'] fluxes = {} if type(direction) == float: # Use default bounds as described in readthedocs from COBRApy fluxes['lower_bound'] = 'cobra_0_bound' fluxes['upper_bound'] = 'cobra_default_ub' elif 'RIGHT-TO-LEFT' in direction: fluxes['lower_bound'] = 'cobra_default_lb' fluxes['upper_bound'] = 'cobra_0_bound' elif 'LEFT-TO-RIGHT' in direction: fluxes['lower_bound'] = 'cobra_0_bound' fluxes['upper_bound'] = 'cobra_default_ub' elif 'REVERSIBLE' in direction: fluxes['lower_bound'] = 'cobra_default_lb' fluxes['upper_bound'] = 'cobra_default_ub' return str(fluxes) missing_reacs['fluxes'] = missing_reacs.apply(get_fluxes, axis=1) missing_reacs.drop('Reaction-Direction', axis=1, inplace=True) return missing_reacs def biocyc_gene_comp( model_libsbml: libModel, biocyc_file_paths: list[str] ) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]: """Main function to retrieve the tables for missing genes, metabolites and reactions from BioCyc Args: - model_libsbml (libModel): libSBML Model object - biocyc_file_paths (list): List of the files required for the BioCyc analysis Returns: tuple: Five tables (1) - (5) (1) pd.DataFrame: Table containing the statistics of the BioCyc gapfill analysis (2) pd.DataFrame: Table containing the missing genes that belong to the missing reactions (3) pd.DataFrame: Table containing the missing metabolites with BiGG IDs belonging to the missing reactions (4) pd.DataFrame: Table containing the missing metabolites without BiGG IDs belonging to the missing reactions (5) pd.DataFrame: Table containing the missing reactions """ # Extract missing reactions from all missing genes genes2reactions = get_missing_genes2reactions(model_libsbml, biocyc_file_paths[0]) metabs_from_reacs, missing_reactions_df = get_missing_reactions(model_libsbml, genes2reactions, biocyc_file_paths[1]) missing_reactions_df = replace_reaction_direction_with_fluxes(missing_reactions_df) # Extract missing metabolites that belong to the missing reactions missing_metabolites_df= get_missing_metabolites(model_libsbml, metabs_from_reacs, biocyc_file_paths[2]) missing_metabolites_df = add_charges_chemical_formulae_to_metabs(missing_metabolites_df) # Extract missing genes that belong to the missing reactions missing_genes_df, missing_reactions_df = get_missing_genes(missing_reactions_df, biocyc_file_paths[3]) # Remove index from statistics_df statistics_df.reset_index(inplace=True) return (statistics_df, missing_genes_df, missing_metabolites_df, missing_reactions_df)
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/analysis_biocyc.py
0.732113
0.451871
analysis_biocyc.py
pypi
import re import requests import sqlite3 import pandas as pd import numpy as np from refinegems.io import load_a_table_from_database from refinegems.databases import PATH_TO_DB from typing import Literal from tqdm import tqdm from ratelimit import limits, sleep_and_retry __author__ = "Famke Baeuerle and Gwendolyn O. Gusak" ALL_BIGG_COMPARTMENTS_ONE_LETTER = ('c', 'e', 'p', 'm', 'x', 'r', 'v', 'n', 'g', 'u', 'l', 'h', 'f', 's', 'i', 'w', 'y') ALL_BIGG_COMPARTMENTS_TWO_LETTER = ('im', 'cx', 'um', 'cm', 'mm') BIGG_REACTIONS_URL = 'http://bigg.ucsd.edu/api/v2/universal/reactions/' BIGG_METABOLITES_URL = 'http://bigg.ucsd.edu/api/v2/universal/metabolites/' COMPARTMENTS = ('c', 'e', 'p') def get_search_regex(other_db: Literal['KEGG', 'BioCyc', 'SEED'], metabolites: bool) -> str: """Retrieves the search regex for BioCyc/KEGG/SEED to be used in the BiGG mapping Args: - other_db (Literal): Specifies if the search regex should be for BioCyc/KEGG/SEED - metabolites (bool): Is required if one wants to search for KEGG/SEED Compound IDs in the bigg_models_metabolites.txt Returns: str: Search regex """ if other_db == 'BioCyc': return 'BioCyc: http://identifiers.org/biocyc/META:(.*?);' elif other_db == 'KEGG' or other_db == 'SEED': if metabolites: return f'{other_db} Compound: http://identifiers.org/{other_db.lower()}.compound/(.*?);' else: return f'{other_db} Reaction: http://identifiers.org/{other_db.lower()}.reaction/(.*?);' def compare_ids(id1: str, id2: str) -> bool: """Compares two strings/IDs & Returns True if one string matches most of the other Args: - id1 (str): ID 1 - id2 (str): ID 2 Returns: bool: Indicates if most of one string contained in the other """ id1_split, id2_split, id1_single_comp, id2_single_comp, id1_comp, id2_comp = None, None, None, None, None, None if '_' in id1: id1_split = re.split('_([a-zA-Z]|[0-9])$', id1)[0] if '_' in id2: id2_split = re.split('_([a-zA-Z]|[0-9])$', id2)[0] if id1.endswith(ALL_BIGG_COMPARTMENTS_ONE_LETTER): id1_single_comp = id1[:-1] if id2.endswith(ALL_BIGG_COMPARTMENTS_ONE_LETTER): id2_single_comp = id2[:-1] if id1.endswith(ALL_BIGG_COMPARTMENTS_TWO_LETTER): id1_comp = id1[:-2] if id2.endswith(ALL_BIGG_COMPARTMENTS_TWO_LETTER): id2_comp = id2[:-2] similar_ids = False if id1 == id2: similar_ids = True # Both IDs are same elif id1_split and id2_split and (id1_split == id2_split): similar_ids = True # Both IDs are same but from different compartments elif id2_split and (id1 == id2_split): similar_ids = True # - "" - elif id1_split and (id1_split == id2): similar_ids = True # - "" - elif id1_single_comp and id2_single_comp and (id1_single_comp == id2_single_comp): similar_ids = True elif id2_single_comp and (id1 == id2_single_comp): similar_ids = True elif id1_single_comp and (id1_single_comp == id2_single_comp): similar_ids = True elif id1_comp and id2_comp and (id1_comp == id2_comp): similar_ids = True elif id2_comp and (id1 == id2_comp): similar_ids = True elif id1_comp and (id1_comp == id2): similar_ids = True elif id1_split and id2_single_comp and (id1_split == id2_single_comp): similar_ids = True elif id2_split and id1_single_comp and (id1_single_comp == id2_split): similar_ids = True elif id1_split and id2_comp and (id1_split == id2_comp): similar_ids = True elif id2_split and id1_comp and (id1_comp == id2_split): similar_ids = True elif id1_comp and id2_single_comp and (id1_comp == id2_single_comp): similar_ids = True elif id2_comp and id1_single_comp and (id1_single_comp == id2_comp): similar_ids = True else: similar_ids = False return similar_ids def keep_only_reactions_in_certain_compartments(complete_df: pd.DataFrame) -> pd.DataFrame: """Extracts all possible BiGG ID variations from database for a BiGG reaction ID, gets the metabolite compartments & returns table containing only reactions which happen in one of the provided compartments Args: - complete_df (pd.DataFrame): Table containing at least the columns 'bigg_id' & 'KEGG'/'BioCyc' Returns: pd.DataFrame: Table containing reactions & their compartments """ tqdm.pandas() db = 'KEGG' if 'KEGG' in complete_df.columns else 'BioCyc' complete_df = complete_df[['bigg_id', db]] # Remove all unnecessary columns # (1) Find all occurrencs of a BiGG reaction ID in bigg_reactions table in database def get_all_similar_bigg_ids(bigg_id_in: str) -> list[str]: if '_' in bigg_id_in: bigg_id = re.split('_([a-zA-Z]|[0-9])$', bigg_id_in)[0] elif bigg_id_in.endswith(ALL_BIGG_COMPARTMENTS_ONE_LETTER): bigg_id = bigg_id_in[:-1] elif bigg_id_in.endswith(ALL_BIGG_COMPARTMENTS_TWO_LETTER): bigg_id = bigg_id_in[:-2] else: bigg_id = bigg_id_in query = f"SELECT bigg_id, INSTR(bigg_id, '{bigg_id}') bi FROM bigg_reactions WHERE bi > 0" result = con.execute(query).fetchall() result = [result_tuple[0] for result_tuple in result] if result else [bigg_id_in] result = [res for res in result if compare_ids(bigg_id, res)] return result # (2) Use list of all BiGG IDs obtained from database table bigg_reactions to get 'metabolites' @sleep_and_retry @limits(calls=10, period=1) def get_reaction_compartment(bigg_id: str) -> str: metabs_from_reac = requests.get(BIGG_REACTIONS_URL + bigg_id, allow_redirects=False).json()['metabolites'] comps = [comp_dict.get('compartment_bigg_id') for comp_dict in metabs_from_reac] # Get all compartments for reaction contained_in_compartments = [(comp in COMPARTMENTS) for comp in comps] # Get True for correct compartment if not all(contained_in_compartments): # At least one compartment not correct return np.nan else: # All compartments correct if len(set(comps)) == 1: # Set of found compartments of reaction = 1: Reaction happens in one compartment return comps[0] else: # Not so important but do not remove reaction as reaction in correct compartments return 'exchange' # Probably exchange reaction # Connect to database & get similar IDs (1) print('Getting all similar IDs...') con = sqlite3.connect(PATH_TO_DB) # Open connection to database complete_df.loc[:,'bigg_id_list'] = complete_df.loc[:, 'bigg_id'].progress_map(get_all_similar_bigg_ids) #complete_df.progress_apply(get_all_similar_bigg_ids, axis=1) con.close() # Close connection to database # Adjust table to contain one BiGG ID per row from bigg_id_list (1) complete_df.loc[:, 'id_group'] = complete_df['bigg_id'].ne(complete_df['bigg_id'].shift()).cumsum() # Group similar IDs complete_df.drop(labels='bigg_id', axis=1, inplace=True) # Drop 'bigg_id' as no longer required complete_df = complete_df.explode('bigg_id_list', ignore_index=True) # Expand 'bigg_id_list' column complete_df.rename(columns={'bigg_id_list': 'bigg_id'}, inplace=True) # Rename 'bigg_id_list' to 'bigg_id' # (2) Get all compartments for each reaction from BiGG database API print(f'Getting all IDs with correct compartment {COMPARTMENTS}...') complete_df.loc[:, 'compartment'] = complete_df.loc[:, 'bigg_id'].progress_map(get_reaction_compartment) #complete_df.progress_apply(get_reaction_compartment, axis=1) # (2) # (3) Remove reactions with compartment = NaN complete_df.dropna(subset=['compartment'], inplace=True) return complete_df # Function originally from refineGEMs.genecomp/refineGEMs.KEGG_analysis --- Modified def get_bigg2other_db(other_db: Literal['KEGG', 'BioCyc', 'SEED'], metabolites: bool=False) -> pd.DataFrame: """Uses list of BiGG reactions/metabolites to get a mapping from BiGG to KEGG/BioCyc Id Args: - other_db (Literal): Set to 'KEGG'/'BioCyc'/'SEED' to map KEGG/BioCyc/SEED IDs to BiGG IDs - metabolites (bool): Set to True to map other_db IDs to BiGG IDs for metabolites Returns: pd.DataFrame: Table containing BiGG Ids with corresponding KEGG/BioCyc/SEED Ids """ # Get only rows with BioCyc/KEGG entries db_table_name = 'bigg_metabolites' if metabolites else 'bigg_reactions' reaction_or_compound = 'Compound' if metabolites else 'Reaction' other_db_query = other_db if other_db == 'BioCyc' else ' '.join([other_db, reaction_or_compound]) bigg_db_query = f"SELECT *, INSTR(database_links, '{other_db_query}:') o_db FROM {db_table_name} WHERE o_db > 0" bigg_db_df = load_a_table_from_database(bigg_db_query) db_search_regex = get_search_regex(other_db, metabolites) def find_other_db(database_links: str): m = re.findall( db_search_regex, str(database_links)) if m: return m else: return None bigg_db_df[other_db] = bigg_db_df.apply( lambda row: find_other_db(row['database_links']), axis=1) bigg_db_df = bigg_db_df.explode(other_db, ignore_index=True) if not metabolites: bigg_db_df = keep_only_reactions_in_certain_compartments(bigg_db_df) bigg_df = bigg_db_df[['bigg_id', other_db]] if metabolites else bigg_db_df[['bigg_id', other_db, 'compartment', 'id_group']] return bigg_df # Function originally from refineGEMs.genecomp/refineGEMs.KEGG_analysis --- Modified def compare_bigg_model(complete_df: pd.DataFrame, model_entities: pd.DataFrame, metabolites: bool=False) -> pd.DataFrame: """Compares missing entities obtained through genes extracted via KEGG/BioCyc to entities in the model Needed to back check previous comparisons. Args: - complete_df (pd.DataFrame): Table that contains KEGG/BioCyc Id, BiGG Id & more - model_entities (pd.DataFrame): BiGG Ids of entities in the model - metabolites (bool): True if names of metabolites should be added, otherwise false Returns: pd.DataFrame: Table containing entities present in KEGG/BioCyc but not in the model """ db = 'KEGG' if 'KEGG' in complete_df.columns else 'BioCyc' # Find out which database was used # Get only IDs that are not in model mapp = complete_df.set_index('bigg_id') entities = model_entities.set_index('bigg_id') entities_missing_in_model = mapp[~mapp.index.isin( entities.index)].reset_index() db_ids = entities_missing_in_model.groupby('bigg_id')[db].agg(set) # Get a set of all BioCyc/KEGG IDs belonging to one BiGG ID # Add set of BioCyc/KEGG IDs belonging to one BiGG ID to the dataframe entities_missing_in_model.set_index('bigg_id', inplace=True) entities_missing_in_model.loc[:, db] = db_ids entities_missing_in_model.reset_index(inplace=True) if 'id_group' in entities_missing_in_model.columns: # Remove reaction ID duplicates but keep all related BiGG & BioCyc/KEGG IDs in a list aliases = entities_missing_in_model.groupby(['compartment', 'id_group'])['bigg_id'].agg(set) # Get a set of the 'duplicated' BiGG reaction IDs -> aliases entities_missing_in_model.drop_duplicates(['compartment', 'id_group'], inplace=True, ignore_index=True) # Drop duplicates where compartments & id_group same # Add set of BiGG ID aliases to the dataframe entities_missing_in_model.set_index(['compartment', 'id_group'], inplace=True) entities_missing_in_model.loc[:, 'bigg_aliases'] = aliases entities_missing_in_model.reset_index(inplace=True) entities_missing_in_model.drop(labels='id_group', axis=1, inplace=True) # id_group is not longer necessary entities_missing_in_model.drop_duplicates(subset='bigg_id', inplace=True, ignore_index=True) # Remove BiGG ID duplicates # Add name column to dataframe def get_name_from_bigg(bigg_id: str): bigg_db = 'bigg_metabolites' if metabolites else 'bigg_reactions' query = f"SELECT name FROM {bigg_db} WHERE bigg_id=\'{bigg_id}\'" name_from_bigg = con.execute(query).fetchone()[0] return name_from_bigg con = sqlite3.connect(PATH_TO_DB) # Open connection to database entities_missing_in_model['name'] = entities_missing_in_model['bigg_id'].map(get_name_from_bigg) con.close() # Add compartment ID to all BiGG metabolites if metabolites: def get_compartment_from_id(bigg_id: str): compartment = bigg_id[-1] return compartment if compartment in COMPARTMENTS else np.nan # To filter the incorrect compartments out entities_missing_in_model['compartment'] = entities_missing_in_model.apply( lambda row: get_compartment_from_id(row['bigg_id']), axis=1) entities_missing_in_model.dropna(subset=['compartment'], inplace=True) # Drop all BiGG metabolite IDs which have no valid compartment return entities_missing_in_model def add_stoichiometric_values_to_reacs(missing_reacs: pd.DataFrame) -> pd.DataFrame: """Adds for each reaction a dictionary containing the reactants & products as dictionaries with the BiGG Metabolite ID as key & the respective absolute stoichiometric value as value Args: - missing_reacs (pd.DataFrame): Table containing missing reactions (Only requires a column containing BiGG IDs) Returns: pd.DataFrame: Table where for each BiGG reaction ID a dictionary containing reactants & products exists """ def get_reactants_and_products_dicts(reaction_id: str) -> list[dict]: reactants = {} products = {} metabs_from_reac = requests.get(BIGG_REACTIONS_URL + reaction_id).json()['metabolites'] for compound_dict in metabs_from_reac: complete_bigg_id = None if compound_dict.get('compartment_bigg_id'): complete_bigg_id = f"{compound_dict.get('bigg_id')}_{compound_dict.get('compartment_bigg_id')}" else: complete_bigg_id = compound_dict.get('bigg_id') if compound_dict.get('stoichiometry') < 0: reactants[complete_bigg_id] = abs(compound_dict.get('stoichiometry')) elif compound_dict.get('stoichiometry') > 0: products[complete_bigg_id] = abs(compound_dict.get('stoichiometry')) return str({'reactants': reactants, 'products': products}) missing_reacs['bigg_reaction']= missing_reacs.apply( lambda row: get_reactants_and_products_dicts(str(row['bigg_id'])), axis=1) #, missing_reacs['bigg_products'], result_type='expand' return missing_reacs
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/analysis_db.py
0.68721
0.291674
analysis_db.py
pypi
import cobra import click import yaml import os import re import gffutils import sqlalchemy import logging import pandas as pd from cobra import Model as cobraModel from ols_client import EBIClient from Bio import Entrez, SeqIO from refinegems.databases import PATH_TO_DB, initialise_database from libsbml import Model as libModel from libsbml import SBMLReader, writeSBMLToFile, SBMLValidator, SBMLDocument from datetime import date __author__ = "Famke Baeuerle and Gwendolyn O. Gusak" def load_model_cobra(modelpath: str) -> cobraModel: """Loads model using COBRApy Args: - modelpath (str): Path to GEM Returns: cobraModel: Loaded model by COBRApy """ mod = cobra.io.read_sbml_model(modelpath) return mod def load_model_libsbml(modelpath: str) -> libModel: """Loads model using libSBML Args: - modelpath (str): Path to GEM Returns: libModel: loaded model by libSBML """ reader = SBMLReader() read = reader.readSBMLFromFile(modelpath) # read from file mod = read.getModel() return mod def load_multiple_models(models: list[str], package: str) -> list: """Loads multiple models into a list Args: - models (list): List of paths to models - package (str): COBRApy|libSBML Returns: list: List of model objects loaded with COBRApy|libSBML """ loaded_models = [] for modelpath in models: if package == 'cobra': loaded_models.append(load_model_cobra(modelpath)) elif package == 'libsbml': loaded_models.append(load_model_libsbml(modelpath)) return loaded_models def load_document_libsbml(modelpath: str) -> SBMLDocument: """Loads model document using libSBML Args: - modelpath (str): Path to GEM Returns: SBMLDocument: Loaded document by libSBML """ reader = SBMLReader() read = reader.readSBMLFromFile(modelpath) # read from file return read def load_medium_custom(mediumpath: str) -> pd.DataFrame: """Helper function to read medium csv Args: - mediumpath (str): path to csv file with medium Returns: pd.DataFrame: Table of csv """ medium = pd.read_csv(mediumpath, sep=';') medium['BiGG_R'] = 'R_EX_' + medium['BiGG'] + '_e' medium['BiGG_EX'] = 'EX_' + medium['BiGG'] + '_e' return medium def load_medium_from_db(mediumname: str) -> pd.DataFrame: """Wrapper function to extract subtable for the requested medium from the database 'data.db' Args: - mediumname (str): Name of medium to test growth on Returns: pd.DataFrame: Table containing composition for one medium with metabs added as BiGG_EX exchange reactions """ medium_query = f"SELECT * FROM media m JOIN media_compositions mc ON m.id = mc.medium_id WHERE m.medium = '{mediumname}'" medium = load_a_table_from_database(medium_query) medium = medium[['medium', 'medium_description', 'BiGG', 'substance']] medium['BiGG_R'] = 'R_EX_' + medium['BiGG'] + '_e' medium['BiGG_EX'] = 'EX_' + medium['BiGG'] + '_e' return medium def load_all_media_from_db(mediumpath: str) -> pd.DataFrame: """Helper function to extract media definitions from media_db.csv Args: - mediumpath (str): Path to csv file with medium database Returns: pd.DataFrame: Table from csv with metabs added as BiGG_EX exchange reactions """ media = pd.read_csv(mediumpath, sep=';') media['BiGG_R'] = 'R_EX_' + media['BiGG'] + '_e' media['BiGG_EX'] = 'EX_' + media['BiGG'] + '_e' media['group'] = media['medium'].ne(media['medium'].shift()).cumsum() grouped = media.groupby('group') media_dfs = [] for name, data in grouped: media_dfs.append(data.reset_index(drop=True)) return media_dfs def load_manual_annotations(tablepath: str='data/manual_curation.xlsx', sheet_name: str='metab') -> pd.DataFrame: """Loads metabolite sheet from manual curation table Args: - tablepath (str): Path to manual curation table. Defaults to 'data/manual_curation.xlsx'. - sheet_name (str): Sheet name for metabolite annotations. Defaults to 'metab'. Returns: pd.DataFrame: Table containing specified sheet from Excel file """ man_ann = pd.read_excel(tablepath, sheet_name) return man_ann def load_a_table_from_database(table_name_or_query: str) -> pd.DataFrame: """| Loads the table for which the name is provided or a table containing all rows for which the query evaluates to | true from the refineGEMs database ('data/database/data.db') Args: - table_name_or_query (str): Name of a table contained in the database 'data.db'/ a SQL query Returns: pd.DataFrame: Containing the table for which the name was provided from the database 'data.db' """ sqlalchemy_engine_input = f'sqlite:///{PATH_TO_DB}' engine = sqlalchemy.create_engine(sqlalchemy_engine_input) open_con = engine.connect() db_table = pd.read_sql(table_name_or_query, open_con) open_con.close() return db_table def load_manual_gapfill(tablepath: str='data/manual_curation.xlsx' , sheet_name: str='gapfill') -> pd.DataFrame: """Loads gapfill sheet from manual curation table Args: - tablepath (str): Path to manual curation table. Defaults to 'data/manual_curation.xlsx'. - sheet_name (str): Sheet name for reaction gapfilling. Defaults to 'gapfill'. Returns: pd.DataFrame: Table containing sheet with name 'gapfill'|specified sheet_name from Excel file """ man_gapf = pd.read_excel(tablepath, sheet_name) return man_gapf def parse_dict_to_dataframe(str2list: dict) -> pd.DataFrame: """| Parses dictionary of form {str: list} & | Transforms it into a table with a column containing the strings and a column containing the lists Args: str2list (dict): Dictionary mapping strings to lists Returns: pd.DataFrame: Table with column containing the strings and column containing the lists """ # Get max number of list length max_len_of_list = max(map(len, str2list.values())) # Fill lists with None until all lists have the same size -> Required for pd.DataFrame for key in str2list: current_list = str2list.get(key) while len(current_list) != max_len_of_list: str2list.get(key).append(None) df = pd.DataFrame.from_dict(str2list).stack().T.reset_index() df = df.drop('level_0', axis=1) return df def write_to_file(model: libModel, new_filename: str): """Writes modified model to new file Args: - model (libModel): Model loaded with libSBML - new_filename (str): Filename|Path for modified model """ new_document = model.getSBMLDocument() writeSBMLToFile(new_document, new_filename) logging.info("Modified model written to " + new_filename) def write_report(dataframe: pd.DataFrame, filepath: str): """Writes reports stored in dataframes to xlsx file Args: - dataframe (pd.DataFrame): Table containing output - filepath (str): Path to file with filename """ writer = pd.ExcelWriter(str(os.path.abspath('.')) + '/' + filepath) dataframe.to_excel(writer) writer.save() def validate_libsbml_model(model: libModel) -> int: """Debug method: Validates a libSBML model with the libSBML validator Args: - model (libModel): A libSBML model Returns: int: Integer specifying if vaidate was successful or not """ validator = SBMLValidator() doc = model.getSBMLDocument() return validator.validate(doc) def parse_fasta_headers(filepath: str, id_for_model: bool=False) -> pd.DataFrame: """Parses FASTA file headers to obtain: - the protein_id - and the model_id (like it is obtained from CarveMe) corresponding to the locus_tag Args: - filepath (str): Path to FASTA file - id_for_model (bool): True if model_id similar to autogenerated GeneProduct ID should be contained in resulting table Returns: pd.DataFrame: Table containing the columns locus_tag, Protein_id & Model_id """ keyword_list = ['protein', 'locus_tag'] tmp_dict = dict() if id_for_model: locus2ids = { 'locus_tag': [], 'protein_id': [], 'model_id': [], 'name': [] } else: locus2ids = { 'locus_tag': [], 'protein_id': [], 'name': [] } with open(filepath, 'r') as handle: for record in SeqIO.parse(handle, 'fasta'): header = record.description protein_id = record.id.split('|')[1].split('prot_')[1].split('.')[0].strip() descriptors = re.findall('\[+(.*?)\]', header) if id_for_model: model_id = re.sub("\||\.", "_", record.id) model_id = f'G_{model_id}' descriptors.insert(0, protein_id) tmp_dict['protein_id'] = str(protein_id) for entry in descriptors: entry = entry.strip().split('=') if entry[0] in keyword_list: if entry[0] == 'protein_id': tmp_dict[entry[0]] = entry[1].split('.')[0] else: tmp_dict[entry[0]] = entry[1] locus2ids.get('locus_tag').append(tmp_dict.get('locus_tag')) locus2ids.get('protein_id').append(tmp_dict.get('protein_id')) locus2ids.get('name').append(tmp_dict.get('protein')) if id_for_model: locus2ids.get('model_id').append(model_id) return pd.DataFrame(locus2ids) def search_ncbi_for_gpr(locus: str) -> str: """Fetches protein name from NCBI Args: - locus (str): NCBI compatible locus_tag Returns: str: Protein name|description """ handle = Entrez.efetch( db="protein", id=locus, rettype="gbwithparts", retmode='text') records = SeqIO.parse(handle, "gb") for i, record in enumerate(records): if (locus[0] == 'W'): return record.description, locus else: for feature in record.features: if feature.type == "CDS": return record.description, feature.qualifiers["locus_tag"][0] def parse_gff_for_gp_info(gff_file: str) -> pd.DataFrame: """Parses gff file of organism to find gene protein reactions based on locus tags Args: - gff_file (str): Path to gff file of organism of interest Returns: pd.DataFrame: Table containing mapping from locus tag to GPR """ db = gffutils.create_db( gff_file, ':memory:', merge_strategy='create_unique') mapping_cds = {} for feature in db.all_features(): attr = dict(feature.attributes) try: if str(attr['gbkey'][0]) == 'CDS': mapping_cds[attr['Name'][0]] = attr['Parent'][0] except BaseException: pass mapping_df = pd.DataFrame.from_dict( mapping_cds, columns=['Parent'], orient='index').reset_index().rename( columns={ 'index': 'GPR'}) def extract_locus(feature): try: return db[feature].attributes['old_locus_tag'][0] except BaseException: pass return None mapping_df['locus_tag'] = mapping_df.apply( lambda row: extract_locus(row['Parent']), axis=1) return mapping_df.drop('Parent', axis=1) def search_sbo_label(sbo_number: str) -> str: """Looks up the SBO label corresponding to a given SBO Term number Args: - sbo_number (str): Last three digits of SBO-Term as str Returns: str: Denoted label for given SBO Term """ sbo_number = str(sbo_number) client = EBIClient() sbo = client.get_term('sbo', 'http://biomodels.net/SBO/SBO_0000' + sbo_number) return sbo['_embedded']['terms'][0]['label'] def save_user_input(configpath: str) -> dict[str: str]: """This aims to collect user input from the command line to create a config file, will also save the user input to a config if no config was given Args: - configpath (str): Path to config file if present Returns: dict: Either loaded config file or created from user input """ if os.path.isfile(configpath): with open(configpath) as f: config = yaml.safe_load(f) print(config) return config else: print('No config or no valid config given, you will be asked for input') user_input = {} update_db = click.confirm('Do you want to update the database?') user_input['db_update'] = update_db if update_db: initialise_database() out_path = click.confirm('Do you want to keep the output path "../rg_out/"?', default=True) if not out_path: user_input['out_path'] = click.prompt('Enter your desired output path') else: user_input['out_path'] = '../rg_out/' user_input['visualize'] = click.confirm('Do you want to generate visualizations of your model(s)?') growth_basis = click.prompt('Enter the base uptakes for growth simulation (d for default_uptake, m for minimal_uptake)') if growth_basis == 'd': user_input['growth_basis'] = 'default_uptake' if growth_basis == 'm': user_input['growth_basis'] = 'minimal_uptake' user_input['anaerobic_growth'] = click.confirm('Do you want to simulate anaerobic growth?') multiple = click.confirm('Do you want to simulate and compare multiple models?') user_input['multiple'] = multiple if multiple: list_of_models = [] while True: file_path = click.prompt('Enter file path to model (or "stop" to stop)') if file_path.lower() == 'stop': break elif os.path.isfile(file_path): list_of_models.append(file_path) print('Added file:', file_path) else: print('File does not exist. Please enter a valid file path.') print('The following models will be compared:') print(list_of_models) user_input['multiple_paths'] = list_of_models possible_media = load_a_table_from_database('media')['medium'].to_list() possible_media_str = '|'.join(possible_media) list_of_media = [] while True: medium = click.prompt(f'Enter medium to simulate growth on ({possible_media_str}) (or "stop" to stop)') if medium.lower() == 'stop': break elif medium in possible_media: if medium not in list_of_media: list_of_media.append(medium) else: print(medium + ' is already in the list.') else: print('Please choose a medium from the given list.') user_input['media'] = list_of_media single = click.confirm('Do you want to investigate or curate a single model?') user_input['single'] = single if single: not_valid = True while not_valid: model = click.prompt('Path to your model file.') if os.path.isfile(model): user_input['model'] = model not_valid = False else: print('File does not exist. Please enter a valid file path') user_input['memote'] = click.confirm('Do you want to run MEMOTE (takes some time)?') user_input['modelseed'] = click.confirm('Do you want to compare your model entities to the ModelSEED database?') gap_analysis = click.confirm('Do you want to run a gap analysis?') user_input['gap_analysis'] = gap_analysis if gap_analysis: gap_analysis_params = {} db_to_compare = click.prompt('One of the choices KEGG|BioCyc|KEGG+BioCyc') #|GFF gap_analysis_params['db_to_compare'] = db_to_compare if db_to_compare == 'KEGG' or db_to_compare == 'KEGG+BioCyc': gap_analysis_params['organismid'] = click.prompt('Enter the KEGG Organism ID') gap_analysis_params['gff_file'] = click.prompt('Enter the path to your organisms RefSeq GFF file') if db_to_compare == 'BioCyc' or db_to_compare == 'KEGG+BioCyc': Path0 = click.prompt('Enter the path to your BioCyc TXT file containing a SmartTable with the columns \'Accession-2\' and \'Reaction of gene\'') Path1 = click.prompt('Enter the path to your BioCyc TXT file containing a SmartTable with all reaction relevant information') Path2 = click.prompt('Enter the path to your Biocyc TXT file containing a SmartTable with all metabolite relevant information') Path3 = click.prompt('Enter path to protein FASTA file used as input for CarveMe') gap_analysis_params['biocyc_files'] = [Path0, Path1, Path2, Path3] user_input['gap_analysis_params'] = gap_analysis_params mod = click.confirm('Do you want to use functions to modify your model?') if mod: new_path = click.confirm('Do you want to save your modified model to ' + user_input['out_path'] + '<model.id>_modified_<today>.xml?') if new_path: user_input['model_out'] = 'stdout' else: user_input['model_out'] = click.prompt('Enter path and filename to where to save the modified model') gapfill_model = click.confirm('Do you want to fill gaps in your model?') user_input['gapfill_model'] = gapfill_model if gapfill_model: if not gap_analysis: user_input['gap_analysis_file'] = click.prompt('Enter path to Excel file with which gaps should be filled') user_input['keggpathways'] = click.confirm('Do you want to add KEGG Pathways?') user_input['sboterms'] = click.confirm('Do you want to update the SBO Terms?') user_input['charge_corr'] = click.confirm('Do you want to add charges to uncharged metabolites?') man_cur = click.confirm('Do you want to modify your model with the manual curations table?') user_input['man_cur'] = man_cur if man_cur: entrez_email = click.prompt('Email to access NCBI Entrez') user_input['entrez_email'] = entrez_email man_cur_type = click.prompt('Enter type of curation (gapfill|metabs)') user_input['man_cur_type'] = man_cur_type man_cur_table = click.prompt('Enter the path to the manual curations table') user_input['man_cur_table'] = man_cur_table polish = click.confirm('Do you want to polish the model?') user_input['polish'] = polish if polish: entrez_email = click.prompt('Email to access NCBI Entrez') user_input['entrez_email'] = entrez_email id_db = click.prompt('What database is your model based on? BIGG|VMH') user_input['id_db'] = id_db lab_strain = not click.confirm('Does your modeled organism have a database entry?', default=True) user_input['lab_strain'] = lab_strain protein_fasta = click.prompt('If possible, provide the path to your Protein FASTA file used for CarveMe') user_input['protein_fasta'] = protein_fasta biomass = click.confirm('Do you want to check & normalise the biomass function(s)?') user_input['biomass'] = biomass else: user_input['keggpathways'] = False user_input['polish'] = False user_input['biomass'] = False user_input['sboterms'] = False user_input['charge_corr'] = False user_input['gapfill_model'] = False user_input['man_cur'] = False today = date.today().strftime("%Y%m%d") print('This is your input:') print(user_input) if not os.path.isdir(user_input['out_path']): print('Given out_path is not yet a directory, creating ' + user_input['out_path']) os.makedirs(user_input['out_path']) with open(user_input['out_path'] + 'user_input_' + str(today) + '.yaml', 'w') as f: yaml.dump(user_input, f) print('Your input was saved as yaml to '+ user_input['out_path'] + 'user_input_' + str(today) + '.yaml') return user_input
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/io.py
0.738763
0.310786
io.py
pypi
import logging import pandas as pd import numpy as np from refinegems.io import load_medium_from_db from cobra.medium import minimal_medium from cobra import Reaction from cobra import Model as cobraModel __author__ = "Famke Baeuerle" def set_fluxes_to_simulate(reaction: Reaction) -> Reaction: """Helper function: Set flux bounds to -1000.0 and 1000.0 to enable model simulation with growth_one_medium_from_minimal/default Args: - reaction (Reaction): Reaction with unusable flux bounds Returns: Reaction: Reaction with usable flux bounds """ reaction.bounds = (-1000.0, 1000.0) return reaction def get_default_uptake(model: cobraModel) -> list[str]: """Determines which metabolites are used in the standard medium Args: - model (cobraModel): Model loaded with COBRApy Returns: list[str]: Metabolites consumed in standard medium """ with model: sol = model.optimize() fluxes = sol.fluxes default_uptake = [] for index, value in fluxes.items(): if "EX" in index: if value < 0: default_uptake.append(index) return default_uptake def get_minimal_uptake(model: cobraModel) -> list[str]: """Determines which metabolites are used in a minimal medium Args: - model (cobraModel): Model loaded with COBRApy Returns: list[str]: Metabolites consumed in minimal medium """ with model: minimal = minimal_medium(model) return list(minimal.index) def get_default_secretion(model: cobraModel) -> list[str]: """Checks fluxes after FBA, if positive the metabolite is produced Args: - model (cobraModel): Model loaded with COBRApy Returns: list[str]: BiGG Ids of produced metabolites """ with model: fluxes = model.optimize().fluxes default_secretion = [] for index, value in fluxes.items(): if value > 0: default_secretion.append(index) return default_secretion def get_missing_exchanges(model: cobraModel, medium: pd.DataFrame) -> list[str]: """Look for exchange reactions needed by the medium but not in the model Args: - model (cobraModel): Model loaded with COBRApy - medium (pd.DataFrame): Dataframe with medium definition Returns: list[str]: Ids of all exchanges missing in the model but given in medium """ medium_list = medium['BiGG_EX'].dropna().tolist() missing_exchanges = [] for exchange in medium_list: try: model.reactions.get_by_id(exchange) except(KeyError): missing_exchanges.append(exchange) return missing_exchanges def modify_medium(medium: pd.DataFrame, missing_exchanges: list[str]) -> dict: """Helper function: Remove exchanges from medium that are not in the model to avoid KeyError Args: - medium (pd.DataFrame): Dataframe with medium definition - missing_exchanges (list): Ids of exchanges not in the model Returns: dict: Growth medium definition that can be used with the model (f.ex {'EX_glc__D_e' : 10.0}) """ growth_medium = dict.fromkeys(medium['BiGG_EX'].dropna().tolist(), 10.0) for exchange in missing_exchanges: growth_medium.pop(exchange) return growth_medium def find_missing_essential(model: cobraModel, growth_medium: dict, default_uptake: list[str], anaerobic: bool) -> list[str]: """Report which exchange reactions are needed for growth, combines default uptake and valid new medium Args: - model (cobraModel): Model loaded with COBRApy - growth_medium (dict): Growth medium definition that can be used with the model. Output of modify_medium. - default_uptake (list[str]): Metabolites consumed in standard medium - anaerobic (bool): If True 'EX_o2_e' is set to 0.0 to simulate anaerobic conditions Returns: list[str]: Ids of exchanges of all metabolites which lead to zero growth if blocked """ with model: default_medium = {i: 10.0 for i in default_uptake} if anaerobic and ('EX_o2_e' in default_medium): default_medium['EX_o2_e'] = 0.0 if anaerobic and ('EX_o2_e' in growth_medium): growth_medium['EX_o2_e'] = 0.0 new_medium = {**growth_medium, **default_medium} try: model.medium = new_medium except(ValueError): logging.info('Change upper bounds to 1000.0 and lower bounds to -1000.0 to make model simulatable.') for reaction in model.reactions: set_fluxes_to_simulate(reaction) model.medium = new_medium essential = [] for metab in new_medium.keys(): with model: model.reactions.get_by_id(metab).lower_bound = 0 sol = model.optimize() if sol.objective_value < 1e-5: # and sol.objective_value > -1e-9: # == 0 no negative growth! essential.append(metab) model.reactions.get_by_id(metab).lower_bound = -10 return essential def find_minimum_essential(medium: pd.DataFrame, essential: list[str]) -> list[str]: """Report metabolites necessary for growth and not in custom medium Args: - medium (pd.DataFrame): Dataframe with medium definition - essential (list[str]): Ids of all metabolites which lead to zero growth if blocked. Output of find_missing_essential. Returns: list[str]: Ids of exchanges of metabolites not present in the medium but necessary for growth """ minimum = [] for metab in essential: if metab not in medium['BiGG_EX'].tolist(): minimum.append(metab) return minimum def simulate_minimum_essential(model: cobraModel, growth_medium: dict, minimum: list[str], anaerobic: bool) -> float: """Simulate growth with custom medium plus necessary uptakes Args: - model (cobraModel): Model loaded with COBRApy - growth_medium (dict): Growth medium definition that can be used with the model. Output of modify_medium. - minimum (list[str]): Ids of exchanges of metabolites not present in the medium but necessary for growth. Output of find_minimum_essential. - anaerobic (bool): If True 'EX_o2_e' is set to 0.0 to simulate anaerobic conditions Returns: float: Growth value in mmol per (gram dry weight) per hour """ with model: min_medium = {i: 10.0 for i in minimum} new_medium = {**growth_medium, **min_medium} try: if (new_medium['EX_o2_e'] == 10.0): new_medium['EX_o2_e'] = 20.0 if not anaerobic else 0.0 except KeyError: print('No Oxygen Exchange Reaction') pass try: model.medium = new_medium except(ValueError): logging.info('Change upper bounds to 1000.0 and lower bounds to -1000.0 to make model simulatable.') for reaction in model.reactions: set_fluxes_to_simulate(reaction) model.medium = new_medium sol = model.optimize() return sol.objective_value def get_all_minimum_essential(model: cobraModel, media: list[str]) -> pd.DataFrame: """Returns metabolites necessary for growth and not in media Args: - model (cobraModel): Model loaded with COBRApy - media (list[str]): Containing the names of all media for which the growth essential metabolites not contained in the media should be returned Returns: pd.DataFrame: information on different media which metabs are missing """ default_uptake = get_default_uptake(model) mins = pd.DataFrame() for medium in media: medium_df = load_medium_from_db(medium) missing_exchanges = get_missing_exchanges(model, medium_df) medium_dict = modify_medium(medium_df, missing_exchanges) essential = find_missing_essential(model, medium_dict, default_uptake) minimum = find_minimum_essential(medium_df, essential) mins[medium['medium'][0]] = pd.Series(minimum) return mins def growth_one_medium_from_default(model: cobraModel, medium: pd.DataFrame, anaerobic: bool) -> pd.DataFrame: """Simulates growth on given medium, adding missing metabolites from the default uptake Args: - model (cobraModel): Model loaded with COBRApy - medium (pd.DataFrame): Dataframe with medium definition - anaerobic (bool): If True 'EX_o2_e' is set to 0.0 to simulate anaerobic conditions Returns: pd.DataFrame: Information on growth behaviour on given medium """ default_uptake = get_default_uptake(model) missing_exchanges = get_missing_exchanges(model, medium) medium_dict = modify_medium(medium, missing_exchanges) essential = find_missing_essential(model, medium_dict, default_uptake, anaerobic) minimum = find_minimum_essential(medium, essential) medium_dict = modify_medium(medium, missing_exchanges) growth_value = simulate_minimum_essential(model, medium_dict, minimum, anaerobic) doubling_time = (np.log(2) / growth_value) * 60 medium_name = medium['medium'][0] if not anaerobic else f'{medium["medium"][0]}[-O2]' exchanges = [[medium_name], minimum, missing_exchanges, [growth_value], [doubling_time]] df_growth = pd.DataFrame(exchanges, ['medium', 'essential', 'missing exchanges', 'growth_value', 'doubling_time [min]']).T return df_growth def growth_one_medium_from_minimal(model: cobraModel, medium: pd.DataFrame, anaerobic: bool) -> pd.DataFrame: """Simulates growth on given medium, adding missing metabolites from a minimal uptake Args: - model (cobraModel): Model loaded with COBRApy - medium (pd.DataFrame): Dataframe with medium definition - anaerobic (bool): If True 'EX_o2_e' is set to 0.0 to simulate anaerobic conditions Returns: pd.DataFrame: Information on growth behaviour on given medium """ minimal_uptake = get_minimal_uptake( model) # use this instead of default_uptake missing_exchanges = get_missing_exchanges(model, medium) medium_dict = modify_medium(medium, missing_exchanges) essential = find_missing_essential(model, medium_dict, minimal_uptake, anaerobic) minimum = find_minimum_essential(medium, essential) medium_dict = modify_medium(medium, missing_exchanges) growth_value = simulate_minimum_essential(model, medium_dict, minimum, anaerobic) doubling_time = (np.log(2) / growth_value) * 60 medium_name = medium['medium'][0] if not anaerobic else f'{medium["medium"][0]}[-O2]' exchanges = [[medium_name], minimum, missing_exchanges, [growth_value], [doubling_time]] df_growth = pd.DataFrame(exchanges, ['medium', 'essential', 'missing exchanges', 'growth_value', 'doubling_time [min]']).T return df_growth def get_growth_selected_media(model: cobraModel, media: list[str], basis: str, anaerobic: bool) -> pd.DataFrame: """Simulates growth on all given media Args: - model (cobraModel): Model loaded with COBRApy - media (list[str]): Ids of media to simulate on - basis (str): Either default_uptake (adding metabs from default) or minimal_uptake (adding metabs from minimal medium) - anaerobic (bool): If True 'EX_o2_e' is set to 0.0 to simulate anaerobic conditions Returns: pd.DataFrame: Information on growth behaviour on given media """ growth = pd.DataFrame() for medium in media: medium_df = load_medium_from_db(medium) if (basis == 'default_uptake'): growth_one = growth_one_medium_from_default(model, medium_df, anaerobic) elif (basis == 'minimal_uptake'): growth_one = growth_one_medium_from_minimal(model, medium_df, anaerobic) growth = growth.append(growth_one, ignore_index=True) return growth def get_essential_reactions(model: cobraModel) -> list[str]: """Knocks out each reaction, if no growth is detected the reaction is seen as essential Args: - model (cobraModel): Model loaded with COBRApy Returns: list[str]: BiGG Ids of essential reactions """ ess = [] for reaction in model.reactions: with model as model: reaction.knock_out() model.optimize() if model.objective.value <= 11: print('%s blocked (bounds: %s), new growth rate %f $' % (reaction.id, str(reaction.bounds), model.objective.value)) ess.append(reaction.id) return ess def get_essential_reactions_via_bounds(model: cobraModel) -> list[str]: """Knocks out reactions by setting their bounds to 0, if no growth is detected the reaction is seen as essential Args: - model (cobraModel): Model loaded with COBRApy Returns: list[str]: BiGG Ids of essential reactions """ medium = model.medium ess = [] for content in medium.keys(): model.reactions.get_by_id(content).lower_bound = 0.0 solution = model.optimize().objective_value if solution < 1e-9: ess.append(content) model.reactions.get_by_id(content).lower_bound = -10.0 return ess def find_additives(model:cobraModel, base_medium: dict) -> pd.DataFrame: """Iterates through all exchanges to find metabolites that lead to a higher growth rate compared to the growth rate yielded on the base_medium Args: - model (cobraModel): Model loaded with COBRApy - base_medium (dict): Exchanges as keys and their flux bound as value (f.ex {'EX_glc__D_e' : 10.0}) Returns: pd.DataFrame: Exchanges sorted from highest to lowest growth rate improvement """ with model: medium = model.medium model.medium = base_medium sol = model.optimize() base_growth = sol.objective_value print(base_growth) enhancement = {} for ex in list(model.exchanges): with model: medium = model.medium base_medium[ex.id] = 10.0 model.medium = base_medium sol = model.optimize() if sol.objective_value > base_growth: enhancement[ex.id] = sol.objective_value - base_growth base_medium.pop(ex.id) adds = pd.DataFrame(enhancement.items(), columns=[ 'exchange', 'diff']).sort_values(by=['diff'], ascending=False) return adds
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/growth.py
0.83498
0.54583
growth.py
pypi
from tqdm.auto import tqdm from libsbml import SBMLReader, GroupsExtension from libsbml import Model as libModel from bioservices import KEGG from refinegems.cvterms import add_cv_term_pathways, get_id_from_cv_term, add_cv_term_pathways_to_entity __author__ = "Famke Baeuerle" def load_model_enable_groups(modelpath: str) -> libModel: """Loads model as document using libSBML and enables groups extension Args: - modelpath (str): Path to GEM Returns: libModel: Model loaded with libSBML """ reader = SBMLReader() read = reader.readSBMLFromFile(modelpath) # read from file groupextension = GroupsExtension() groupURI = groupextension.getURI(3, 1, 1) read.enablePackage(groupURI, "groups", True) # enable groups extension read.setPkgRequired('groups', False) # make groups not required model = read.getModel() return model def extract_kegg_reactions(model: libModel) -> tuple[dict, list]: """Extract KEGG Ids from reactions Args: - model (libModel): Model loaded with libSBML. Output of load_model_enable_groups. Returns: tuple: Dictionary 'reaction_id': 'KEGG_id' (1) & List of reactions without KEGG Id (2) (1) dict: Reaction Id as key and Kegg Id as value (2) list: Ids of reactions without KEGG annotation """ list_reac = model.getListOfReactions() kegg_reactions = {} non_kegg_reac = [] for reaction in list_reac: kegg_ids = get_id_from_cv_term(reaction, 'kegg.reaction') if len(kegg_ids) > 0: kegg_reactions[reaction.getId()] = kegg_ids[0] else: non_kegg_reac.append(reaction.getId()) return kegg_reactions, non_kegg_reac def extract_kegg_pathways(kegg_reactions: dict) -> dict: """Finds pathway for reactions in model with KEGG Ids, accesses KEGG API, uses tqdm to report progres to user Args: - kegg_reactions (dict): Reaction Id as key and Kegg Id as value. Output[0] from extract_kegg_reactions. Returns: dict: Reaction Id as key and Kegg Pathway Id as value """ k = KEGG() kegg_pathways = {} print('Extracting pathway Id for each reaction:') for reaction in tqdm(kegg_reactions.keys()): kegg_reaction = k.get(kegg_reactions[reaction]) dbentry = k.parse(kegg_reaction) # sometimes parse does not work -> try and except try: pathways = [x for x in dbentry['PATHWAY']] except BaseException: pathways = [] kegg_pathways[reaction] = pathways return kegg_pathways def add_kegg_pathways(model, kegg_pathways): """Add KEGG reactions as BQB_OCCURS_IN Args: - model (libModel): Model loaded with libSBML. Output of load_model_enable_groups. - kegg_pathways (dict): Reaction Id as key and Kegg Pathway Id as value. Output of extract_kegg_pathways. Returns: libsbml-model: modified model with Kegg pathways """ list_reac = model.getListOfReactions() for reaction in list_reac: if reaction.getId() in kegg_pathways.keys(): for path in kegg_pathways[reaction.getId()]: add_cv_term_pathways_to_entity(path, 'KEGG', reaction) return model def get_pathway_groups(kegg_pathways): """Group reaction into pathways Args: - kegg_pathways (dict): Reaction Id as key and Kegg Pathway Id as value. Output of extract_kegg_pathways. Returns: dict: Kegg Pathway Id as key and reactions Ids as values """ pathway_groups = {} for reaction in kegg_pathways.keys(): for path in kegg_pathways[reaction]: if path not in pathway_groups.keys(): pathway_groups[path] = [reaction] else: pathway_groups[path].append(reaction) return pathway_groups def create_pathway_groups(model: libModel, pathway_groups): """Use group module to add reactions to Kegg pathway Args: - model (libModel): Model loaded with libSBML. Output of load_model_enable_groups. - pathway_groups (dict): Kegg Pathway Id as key and reactions Ids as values. Output of get_pathway_groups. Returns: libModel: modified model with groups for pathways """ k = KEGG() groups = model.getPlugin('groups') group_list = groups.getListOfGroups() keys = list(pathway_groups.keys()) num_reactions = [len(sub) for sub in list(pathway_groups.values())] print('Adding pathways as groups to the model:') for i in tqdm(range(len(pathway_groups))): kegg_pathway = k.get(keys[i]) dbentry = k.parse(kegg_pathway) if groups.getGroup('G_' + keys[i]) is not None: group = groups.getGroup('G_' + keys[i]) group.setName(dbentry['NAME'][0]) group.setMetaId("meta_" + 'G_' + keys[i]) group.setKind('partonomy') group.setSBOTerm("SBO:0000633") # NAME add_cv_term_pathways(keys[i], 'KEGG', group) for reac in pathway_groups[keys[i]]: if group.getMemberByIdRef(reac) is None: member = group.createMember() member.setIdRef(reac) else: group = group_list.createGroup() group.setName(dbentry['NAME'][0]) group.setId('G_' + keys[i]) # important for validity (memote/cobra) group.setMetaId("meta_" + 'G_' + keys[i]) group.setKind('partonomy') group.setSBOTerm("SBO:0000633") # NAME add_cv_term_pathways(keys[i], 'KEGG', group) for reac in pathway_groups[keys[i]]: if group.getMemberByIdRef(reac) is None: member = group.createMember() member.setIdRef(reac) return model def kegg_pathways(modelpath: str) -> tuple[libModel, list[str]]: """Executes all steps to add KEGG pathways as groups Args: - modelpath (str): Path to GEM Returns: tuple: libSBML model (1) & List of reactions without KEGG Id (2) (1) libModel: Modified model with Pathways as groups (2) list: Ids of reactions without KEGG annotation """ model = load_model_enable_groups(modelpath) reactions, non_kegg_reactions = extract_kegg_reactions(model) pathways = extract_kegg_pathways(reactions) pathway_groups = get_pathway_groups(pathways) model_pathways = add_kegg_pathways(model, pathways) model_pathway_groups = create_pathway_groups( model_pathways, pathway_groups) return model_pathway_groups, non_kegg_reactions
/refineGEMs-1.3.0.tar.gz/refineGEMs-1.3.0/refinegems/pathways.py
0.735167
0.288103
pathways.py
pypi
import math from typing import Generic, TypeVar, Tuple, Any, Dict from typing_extensions import TypeGuard, Literal, get_args from numbers import Real from .base import RefinementPredicate _R = TypeVar("_R", bound=Real) _0 = Literal[0] _2 = Literal[2] __all__ = [ 'Greater', 'Less', 'Modulo', 'NonNan', 'PositivePredicate', 'NegativePredicate', 'Divisible' ] class Greater(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: threshold = args[0] threshold_value = get_args(threshold)[0] return value > threshold_value class Less(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: threshold = args[0] threshold_value = get_args(threshold)[0] return value < threshold_value class Modulo(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: divisor = args[0] divisor_value = get_args(divisor)[0] return value % divisor_value == 0 class NonNan(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: return math.isnan(value) class PositivePredicate(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: return Greater[_R].type_guard(value, _0) class NegativePredicate(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: return Less[_R].type_guard(value, _0) class Divisible(Generic[_R], RefinementPredicate): @staticmethod def type_guard(value: _R, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_R]: return Modulo[_R].type_guard(value, _0)
/predicates/numeric.py
0.796292
0.219463
numeric.py
pypi
from typing import Generic, TypeVar, Tuple, Any, Dict from typing_extensions import TypeGuard from csv import reader as CsvReader from ipaddress import ip_address from xml.etree import ElementTree from io import StringIO from .base import RefinementPredicate _S = TypeVar("_S", bound=str) __all__ = [ 'TrimmedPredicate', 'ValidIntPredicate', 'ValidFloatPredicate', 'XmlPredicate', 'CsvPredicate', 'IPv6Predicate', 'IPv4Predicate' ] class TrimmedPredicate(Generic[_S], RefinementPredicate): """Predicate that checks if a `str` has no leading or trailing whitespace""" @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: return value.strip() == value class ValidIntPredicate(Generic[_S], RefinementPredicate): """Predicate that checks if a `str` is a parsable `int`""" @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: try: _ = int(value) return True except: return False class ValidFloatPredicate(Generic[_S], RefinementPredicate): """Predicate that checks if a `str` is a parsable `float`""" @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: try: _ = float(value) return True except: return False class XmlPredicate(Generic[_S], RefinementPredicate): """Predicate that checks if a `str` is well-formed XML""" @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: try: with StringIO(value) as f: _ = ElementTree.parse(f) return True except: return False class CsvPredicate(Generic[_S], RefinementPredicate): """ Predicate that checks if a `str` is well-formed CSV. It uses a custom separator, which by default is ',' """ @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: try: separator = args[0] except IndexError: separator = "," try: with StringIO(value) as f: for _ in CsvReader(f, delimiter=separator): pass return True except: return False class IPv4Predicate(Generic[_S], RefinementPredicate): """Predicate that checks if a `str` is a valid IPv4""" @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: try: return ip_address(value).version == 4 except: return False class IPv6Predicate(Generic[_S], RefinementPredicate): """Predicate that checks if a `str` is a valid IPv6""" @staticmethod def type_guard(value: _S, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> TypeGuard[_S]: try: return ip_address(value).version == 6 except: return False
/predicates/string.py
0.895134
0.254084
string.py
pypi
<p align=center><strong>~Please note this is only a <em>beta</em> release at this stage~</strong></p> # RefineNet: high-res semantic image segmentation [![Best of ACRV Repository](https://img.shields.io/badge/collection-best--of--acrv-%23a31b2a)](https://roboticvision.org/best-of-acrv) ![Primary language](https://img.shields.io/github/languages/top/best-of-acrv/refinenet) [![PyPI package](https://img.shields.io/pypi/pyversions/refinenet)](https://pypi.org/project/refinenet/) [![Conda Version](https://img.shields.io/conda/vn/conda-forge/refinenet.svg)](https://anaconda.org/conda-forge/refinenet) [![Conda Recipe](https://img.shields.io/badge/recipe-refinenet-green.svg)](https://anaconda.org/conda-forge/refinenet) [![Conda Platforms](https://img.shields.io/conda/pn/conda-forge/refinenet.svg)](https://anaconda.org/conda-forge/refinenet) [![License](https://img.shields.io/github/license/best-of-acrv/refinenet)](./LICENSE.txt) RefineNet is a generic multi-path refinement network for high-resolution semantic image segmentation and general dense prediction tasks on images. It achieves high-resolution prediction by explicitly exploiting all the information available along the down-sampling process and using long-range residual connections. <p align="center"> <img alt="RefineNet sample image on PASCAL VOC dataset" src="https://github.com/best-of-acrv/refinenet/raw/develop/docs/refinenet_sample.png" /> </p> This repository contains an open-source implementation of RefineNet in Python, with both the official and lightweight network models from our publications. The package provides PyTorch implementations for using training, evaluation, and prediction in your own systems. The package is easily installable with `conda`, and can also be installed via `pip` if you'd prefer to manually handle dependencies. Our code is free to use, and licensed under BSD-3. We simply ask that you [cite our work](#citing-our-work) if you use RefineNet in your own research. [![@youtube RefineNet Results on the CityScapes dataset](https://github.com/best-of-acrv/refinenet/raw/develop/docs/refinenet_video.jpg)](https://www.youtube.com/watch?v=L0V6zmGP_oQ) ## Related resources This repository brings the work from a number of sources together. Please see the links below for further details: - our original paper: ["RefineNet: Multi-Path Refinement Networks for High-Resolution Semantic Segmentation"](#citing-our-work) - our paper introducing the lightweight version: ["Light-Weight RefineNet for Real-Time Semantic Segmentation"](#citing-out-work) - the original MATLAB implementation: [https://github.com/guosheng/refinenet](https://github.com/guosheng/refinenet) - Vladimir Nekrasov's PyTorch port of RefineNet: [https://github.com/DrSleep/refinenet-pytorch](https://github.com/DrSleep/refinenet-pytorch) - Vladimir Nekrasov's PyTorch port of lightweight RefineNet: [https://github.com/DrSleep/light-weight-refinenet](https://github.com/DrSleep/light-weight-refinenet) ## Installing RefineNet We offer three methods for installing RefineNet: 1. [Through our Conda package](#conda): single command installs everything including system dependencies (recommended) 2. [Through our pip package](#pip): single command installs RefineNet and Python dependences, you take care of system dependencies 3. [Directly from source](#from-source): allows easy editing and extension of our code, but you take care of building and all dependencies ### Conda The only requirement is that you have [Conda installed](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) on your system, and [NVIDIA drivers installed](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&=Ubuntu&target_version=20.04&target_type=deb_network). We provide Conda packages through [Conda Forge](https://conda-forge.org/), which recommends adding their channel globally with strict priority: ``` conda config --add channels conda-forge conda config --set channel_priority strict ``` Once you have access to the `conda-forge` channel, RefineNet is installed by running the following from inside a [Conda environment](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html): ``` u@pc:~$ conda install refinenet ``` We don't explicitly lock the PyTorch installation to a CUDA-enabled version to maximise compatibility with our users' possible setups. If you wish to ensure a CUDA-enabled PyTorch is installed, please use the following installation line instead: ``` u@pc:~$ conda install pytorch=*=*cuda* refinenet ``` You can see a list of our Conda dependencies in the [RefineNet feedstock's recipe](https://github.com/conda-forge/refinenet-feedstock/blob/master/recipe/meta.yaml). ### Pip Before installing via `pip`, you must have the following system dependencies installed: - NVIDIA drivers - CUDA Then RefineNet, and all its Python dependencies can be installed via: ``` u@pc:~$ pip install refinenet ``` ### From source Installing from source is very similar to the `pip` method above due to RefineNet only containing Python code. Simply clone the repository, enter the directory, and install via `pip`: ``` u@pc:~$ pip install -e . ``` _Note: the editable mode flag (`-e`) is optional, but allows you to immediately use any changes you make to the code in your local Python ecosystem._ We also include scripts in the `./scripts` directory to support running RefineNet without any `pip` installation, but this workflow means you need to handle all system and Python dependencies manually. ## Using RefineNet RefineNet can be used either entirely from the command line, or through its Python API. Both call the same underlying implementation, and as such offer equivalent functionality. We provide both options to facilitate use across a wide range of applications. See below for details of each method. ### RefineNet from the command line When installed, either via `pip` or `conda`, a `refinenet` executable is made available on your system `PATH` (the scripts in the `./scripts` directory can be used as an alternative if not installing via a package manager). The `refinenet` executable provides access to all functionality, including training, evaluation, and prediction. See the `--help` flags for details on what the command line utility can do, and how it can be configured: ``` u@pc:~$ refinenet --help ``` ``` u@pc:~$ refinenet train --help ``` ``` u@pc:~$ refinenet evaluate --help ``` ``` u@pc:~$ refinenet predict --help ``` ### RefineNet Python API RefineNet can also be used like any other Python package through its API. The API consists of a `RefineNet` class with three main functions for training, evaluation, and prediction. Below are some examples to help get you started with RefineNet: ```python from refinenet import RefineNet # Initialise a full RefineNet network with no pre-trained model r = RefineNet() # Initialise a standard RefineNet network with a model pre-trained on NYU r = RefineNet(model_type='full', load_pretrained='nyu') # Initialise a lightweight RefineNet network with 40 classes r = RefineNet(model='lightweight', num_classes=40) # Load a previous snapshot from a 152 layer network r = RefineNet(load_snapshot='/path/to/snapshot', num_resnet_layers=152) # Train a new model on the NYU dataset with a custom learning rate r.train('nyu', learning_rate=0.0005) # Train a model with the adam optimiser & 8 workers, saving output to ~/output r.train('voc', optimiser_type='adam', num_workers=8, output_directory='~/output') # Get a predicted segmentation as a NumPy image, given an input NumPy image segmentation_image = r.predict(image=my_image) # Save a segmentation image to file, given an image from another image file r.predict(image_file='/my/prediction.jpg', output_file='/my/segmentation/image.jpg') # Evaluate your model's performance on the voc dataset, & save the results with # images r.evaluate('voc', output_directory='/my/results.json', output_images=True) ``` ## Citing our work If using RefineNet in your work, please cite [our original CVPR paper](https://openaccess.thecvf.com/content_cvpr_2017/papers/Lin_RefineNet_Multi-Path_Refinement_CVPR_2017_paper.pdf): ```bibtex @InProceedings{Lin_2017_CVPR, author = {Lin, Guosheng and Milan, Anton and Shen, Chunhua and Reid, Ian}, title = {RefineNet: Multi-Path Refinement Networks for High-Resolution Semantic Segmentation}, booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {July}, year = {2017} } ``` Please also cite [our BMVC paper](http://bmvc2018.org/contents/papers/0494.pdf) on Light-Weight RefineNet if using the lightweight models: ```bibtex @article{nekrasov2018light, title={Light-weight refinenet for real-time semantic segmentation}, author={Nekrasov, Vladimir and Shen, Chunhua and Reid, Ian}, journal={arXiv preprint arXiv:1810.03272}, year={2018} } ```
/refinenet-0.9.4.tar.gz/refinenet-0.9.4/README.md
0.84572
0.923385
README.md
pypi
from torch import Tensor, arange, device as Device, dtype as DType from refiners.fluxion.layers import ( ApproximateGeLU, GeLU, Linear, LayerNorm, Embedding, Chain, Sum, SelfAttention, Lambda, Residual, ) from refiners.foundationals.clip.tokenizer import CLIPTokenizer class PositionalTokenEncoder(Sum): structural_attrs = ["vocabulary_size", "positional_embedding_dim"] def __init__( self, vocabulary_size: int, embedding_dim: int, positional_embedding_dim: int, device: Device | str | None = None, dtype: DType | None = None, ): self.vocabulary_size = vocabulary_size self.positional_embedding_dim = positional_embedding_dim super().__init__( Embedding( num_embeddings=vocabulary_size, embedding_dim=embedding_dim, device=device, dtype=dtype, ), Chain( Lambda(self.get_position_ids), Embedding( num_embeddings=positional_embedding_dim, embedding_dim=embedding_dim, device=device, dtype=dtype, ), ), ) @property def position_ids(self) -> Tensor: return arange(self.positional_embedding_dim, device=self.device).reshape(1, -1) def get_position_ids(self, x: Tensor) -> Tensor: return self.position_ids[:, : x.shape[1]] class FeedForward(Chain): structural_attrs = ["embedding_dim", "feedforward_dim"] def __init__( self, embedding_dim: int, feedforward_dim: int, device: Device | str | None = None, dtype: DType | None = None, ) -> None: self.embedding_dim = embedding_dim self.feedforward_dim = feedforward_dim super().__init__( Linear(in_features=embedding_dim, out_features=feedforward_dim, device=device, dtype=dtype), GeLU(), Linear(in_features=feedforward_dim, out_features=embedding_dim, device=device, dtype=dtype), ) class TransformerLayer(Chain): structural_attrs = ["embedding_dim", "num_attention_heads", "feedforward_dim", "layer_norm_eps"] def __init__( self, embedding_dim: int, feedforward_dim: int, num_attention_heads: int = 1, layer_norm_eps: float = 1e-5, device: Device | str | None = None, dtype: DType | None = None, ) -> None: self.embedding_dim = embedding_dim self.num_attention_heads = num_attention_heads self.feedforward_dim = feedforward_dim self.layer_norm_eps = layer_norm_eps super().__init__( Residual( LayerNorm( normalized_shape=embedding_dim, eps=layer_norm_eps, device=device, dtype=dtype, ), SelfAttention( embedding_dim=embedding_dim, num_heads=num_attention_heads, is_causal=True, device=device, dtype=dtype, ), ), Residual( LayerNorm( normalized_shape=embedding_dim, eps=layer_norm_eps, device=device, dtype=dtype, ), FeedForward( embedding_dim=embedding_dim, feedforward_dim=feedforward_dim, device=device, dtype=dtype, ), ), ) class CLIPTextEncoder(Chain): structural_attrs = [ "embedding_dim", "positional_embedding_dim", "vocabulary_size", "num_layers", "num_attention_heads", "feedforward_dim", "layer_norm_eps", "tokenizer", ] def __init__( self, embedding_dim: int = 768, positional_embedding_dim: int = 77, vocabulary_size: int = 49408, num_layers: int = 12, num_attention_heads: int = 12, feedforward_dim: int = 3072, layer_norm_eps: float = 1e-5, device: Device | str | None = None, dtype: DType | None = None, ): self.embedding_dim = embedding_dim self.positional_embedding_dim = positional_embedding_dim self.vocabulary_size = vocabulary_size self.num_layers = num_layers self.num_attention_heads = num_attention_heads self.feedforward_dim = feedforward_dim self.layer_norm_eps = layer_norm_eps self.tokenizer = CLIPTokenizer() super().__init__( PositionalTokenEncoder( vocabulary_size=vocabulary_size, embedding_dim=embedding_dim, positional_embedding_dim=positional_embedding_dim, device=device, dtype=dtype, ), *( TransformerLayer( embedding_dim=embedding_dim, num_attention_heads=num_attention_heads, feedforward_dim=feedforward_dim, layer_norm_eps=layer_norm_eps, device=device, dtype=dtype, ) for _ in range(num_layers) ), LayerNorm(normalized_shape=embedding_dim, eps=layer_norm_eps, device=device, dtype=dtype), ) def encode(self, text: str) -> Tensor: tokens = self.tokenizer(text, sequence_length=self.positional_embedding_dim).to(self.device) return self(tokens) @property def unconditional_text_embedding(self) -> Tensor: return self.encode("") class CLIPTextEncoderL(CLIPTextEncoder): """ CLIPTextEncoderL is the CLIP text encoder with the following parameters: embedding_dim=768 num_layers=12 num_attention_heads=12 feedforward_dim=3072 We replace the GeLU activation function with an approximate GeLU to comply with the original CLIP implementation of OpenAI (https://github.com/openai/CLIP/blob/main/clip/model.py#L166) """ def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: super().__init__( embedding_dim=768, num_layers=12, num_attention_heads=12, feedforward_dim=3072, device=device, dtype=dtype, ) for gelu, parent in self.walk(lambda m, _: isinstance(m, GeLU)): parent.replace(old_module=gelu, new_module=ApproximateGeLU()) class CLIPTextEncoderH(CLIPTextEncoder): """ CLIPTextEncoderH is the CLIP text encoder with the following parameters: embedding_dim=1024 num_layers=23 num_attention_heads=16 feedforward_dim=4096 """ def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: super().__init__( embedding_dim=1024, num_layers=23, num_attention_heads=16, feedforward_dim=4096, device=device, dtype=dtype, ) class CLIPTextEncoderG(CLIPTextEncoder): """ CLIPTextEncoderG is the CLIP text encoder with the following parameters: embedding_dim=1280 num_layers=32 num_attention_heads=16 feedforward_dim=5120 """ def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: super().__init__( embedding_dim=1280, num_layers=32, num_attention_heads=20, feedforward_dim=5120, device=device, dtype=dtype, )
/foundationals/clip/text_encoder.py
0.96225
0.425725
text_encoder.py
pypi
import gzip from pathlib import Path from functools import lru_cache from itertools import islice import re from torch import Tensor, tensor from refiners.fluxion import pad class CLIPTokenizer: def __init__( self, vocabulary_path: str | Path = Path(__file__).resolve().parent / "bpe_simple_vocab_16e6.txt.gz", ): self.vocabulary_path = vocabulary_path self.byte_to_unicode_mapping = self.get_bytes_to_unicode_mapping() self.byte_decoder = {v: k for k, v in self.byte_to_unicode_mapping.items()} merge_tuples = [ tuple(merge.split()) for merge in gzip.open(vocabulary_path).read().decode("utf-8").split("\n")[1 : 49152 - 256 - 2 + 1] ] vocabulary = ( list(self.byte_to_unicode_mapping.values()) + [v + "</w>" for v in self.byte_to_unicode_mapping.values()] + ["".join(merge) for merge in merge_tuples] + ["", ""] ) self.token_to_id_mapping = {token: i for i, token in enumerate(vocabulary)} self.byte_pair_encoding_ranks = {merge: i for i, merge in enumerate(merge_tuples)} self.byte_pair_encoding_cache = {"": ""} # Note: this regular expression does not support Unicode. It was changed so # to get rid of the dependence on the `regex` module. Unicode support could # potentially be added back by leveraging the `\w` character class. self.token_pattern = re.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[a-zA-Z]+|[0-9]|[^\s\w]+""", re.IGNORECASE, ) self.start_of_text_token_id: int = 49406 self.end_of_text_token_id: int = 49407 def __call__(self, text: str, sequence_length: int) -> Tensor: tokens = self.encode(text=text, max_length=sequence_length).unsqueeze(0) assert ( tokens.shape[1] <= sequence_length ), f"Text is too long: tokens.shape[1] > sequence_length: {tokens.shape[1]} > {sequence_length}" return pad(tokens, (0, sequence_length - tokens.shape[1]), value=self.end_of_text_token_id) @lru_cache() def get_bytes_to_unicode_mapping(self) -> dict[int, str]: initial_byte_values = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) extra_unicode_values = (byte for byte in range(2**8) if byte not in initial_byte_values) byte_values = initial_byte_values + list(extra_unicode_values) unicode_values = [chr(value) for value in byte_values] return dict(zip(byte_values, unicode_values)) def byte_pair_encoding(self, token: str) -> str: if token in self.byte_pair_encoding_cache: return self.byte_pair_encoding_cache[token] def recursive_bpe(word: tuple[str, ...]) -> tuple[str, ...]: if len(word) < 2: return word pairs = {(i, (word[i], word[i + 1])) for i in range(len(word) - 1)} min_pair = min( pairs, key=lambda pair: self.byte_pair_encoding_ranks.get(pair[1], float("inf")), ) if min_pair[1] not in self.byte_pair_encoding_ranks: return word new_word: list[str] = [] i = 0 while i < len(word): if i == min_pair[0]: new_word.append(min_pair[1][0] + min_pair[1][1]) i += 2 else: new_word.append(word[i]) i += 1 return recursive_bpe(tuple(new_word)) word = tuple(token[:-1]) + (token[-1] + "</w>",) result = " ".join(recursive_bpe(word)) self.byte_pair_encoding_cache[token] = result return result def encode(self, text: str, max_length: int | None = None) -> Tensor: text = re.sub(r"\s+", " ", text.lower()) tokens = re.findall(self.token_pattern, text) upper_bound = None if max_length: assert max_length >= 2 upper_bound = max_length - 2 encoded_tokens = islice( ( self.token_to_id_mapping[subtoken] for token in tokens for subtoken in self.byte_pair_encoding( "".join(self.byte_to_unicode_mapping[character] for character in token.encode("utf-8")) ).split(" ") ), 0, upper_bound, ) return tensor([self.start_of_text_token_id, *encoded_tokens, self.end_of_text_token_id])
/foundationals/clip/tokenizer.py
0.667581
0.436682
tokenizer.py
pypi
from typing import cast, Iterable from torch import Tensor, device as Device, dtype as DType from refiners.fluxion.context import Contexts import refiners.fluxion.layers as fl from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.adapters.range_adapter import RangeEncoder, RangeAdapter2d class TimestepEncoder(fl.Passthrough): def __init__( self, context_key: str = "timestep_embedding", device: Device | str | None = None, dtype: DType | None = None, ) -> None: super().__init__( fl.UseContext("diffusion", "timestep"), RangeEncoder(320, 1280, device=device, dtype=dtype), fl.SetContext("range_adapter", context_key), ) class ResidualBlock(fl.Sum): structural_attrs = ["in_channels", "out_channels", "num_groups", "eps"] def __init__( self, in_channels: int, out_channels: int, num_groups: int = 32, eps: float = 1e-5, device: Device | str | None = None, dtype: DType | None = None, ) -> None: if in_channels % num_groups != 0 or out_channels % num_groups != 0: raise ValueError("Number of input and output channels must be divisible by num_groups.") self.in_channels = in_channels self.out_channels = out_channels self.num_groups = num_groups self.eps = eps shortcut = ( fl.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, device=device, dtype=dtype) if in_channels != out_channels else fl.Identity() ) super().__init__( fl.Chain( fl.GroupNorm(channels=in_channels, num_groups=num_groups, eps=eps, device=device, dtype=dtype), fl.SiLU(), fl.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, device=device, dtype=dtype, ), fl.GroupNorm(channels=out_channels, num_groups=num_groups, eps=eps, device=device, dtype=dtype), fl.SiLU(), fl.Conv2d( in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1, device=device, dtype=dtype, ), ), shortcut, ) class CLIPLCrossAttention(CrossAttentionBlock2d): def __init__( self, channels: int, device: Device | str | None = None, dtype: DType | None = None, ) -> None: super().__init__( channels=channels, context_embedding_dim=768, context_key="clip_text_embedding", num_attention_heads=8, use_bias=False, device=device, dtype=dtype, ) class DownBlocks(fl.Chain): structural_attrs = ["in_channels"] def __init__( self, in_channels: int, device: Device | str | None = None, dtype: DType | None = None, ): self.in_channels = in_channels super().__init__( fl.Chain( fl.Conv2d( in_channels=in_channels, out_channels=320, kernel_size=3, padding=1, device=device, dtype=dtype ) ), fl.Chain( ResidualBlock(in_channels=320, out_channels=320, device=device, dtype=dtype), CLIPLCrossAttention(channels=320, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=320, out_channels=320, device=device, dtype=dtype), CLIPLCrossAttention(channels=320, device=device, dtype=dtype), ), fl.Chain(fl.Downsample(channels=320, scale_factor=2, padding=1, device=device, dtype=dtype)), fl.Chain( ResidualBlock(in_channels=320, out_channels=640, device=device, dtype=dtype), CLIPLCrossAttention(channels=640, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=640, out_channels=640, device=device, dtype=dtype), CLIPLCrossAttention(channels=640, device=device, dtype=dtype), ), fl.Chain(fl.Downsample(channels=640, scale_factor=2, padding=1, device=device, dtype=dtype)), fl.Chain( ResidualBlock(in_channels=640, out_channels=1280, device=device, dtype=dtype), CLIPLCrossAttention(channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), CLIPLCrossAttention(channels=1280, device=device, dtype=dtype), ), fl.Chain(fl.Downsample(channels=1280, scale_factor=2, padding=1, device=device, dtype=dtype)), fl.Chain( ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), ), ) class UpBlocks(fl.Chain): def __init__( self, device: Device | str | None = None, dtype: DType | None = None, ) -> None: super().__init__( fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), fl.Upsample(channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), CLIPLCrossAttention(channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), CLIPLCrossAttention(channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=1920, out_channels=1280, device=device, dtype=dtype), CLIPLCrossAttention(channels=1280, device=device, dtype=dtype), fl.Upsample(channels=1280, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=1920, out_channels=640, device=device, dtype=dtype), CLIPLCrossAttention(channels=640, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=1280, out_channels=640, device=device, dtype=dtype), CLIPLCrossAttention(channels=640, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=960, out_channels=640, device=device, dtype=dtype), CLIPLCrossAttention(channels=640, device=device, dtype=dtype), fl.Upsample(channels=640, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=960, out_channels=320, device=device, dtype=dtype), CLIPLCrossAttention(channels=320, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=640, out_channels=320, device=device, dtype=dtype), CLIPLCrossAttention(channels=320, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=640, out_channels=320, device=device, dtype=dtype), CLIPLCrossAttention(channels=320, device=device, dtype=dtype), ), ) class MiddleBlock(fl.Chain): def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: super().__init__( ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), CLIPLCrossAttention(channels=1280, device=device, dtype=dtype), ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), ) class ResidualAccumulator(fl.Passthrough): structural_attrs = ["n"] def __init__(self, n: int) -> None: self.n = n super().__init__( fl.Residual( fl.UseContext(context="unet", key="residuals").compose(func=lambda residuals: residuals[self.n]) ), fl.SetContext(context="unet", key="residuals", callback=self.update), ) def update(self, residuals: list[Tensor | float], x: Tensor) -> None: residuals[self.n] = x class ResidualConcatenator(fl.Chain): structural_attrs = ["n"] def __init__(self, n: int) -> None: self.n = n super().__init__( fl.Concatenate( fl.Identity(), fl.UseContext(context="unet", key="residuals").compose(lambda residuals: residuals[self.n]), dim=1, ), ) class UNet(fl.Chain): structural_attrs = ["in_channels", "clip_embedding_dim"] def __init__( self, in_channels: int, clip_embedding_dim: int, device: Device | str | None = None, dtype: DType | None = None, ): self.in_channels = in_channels self.clip_embedding_dim = clip_embedding_dim super().__init__( TimestepEncoder(device=device, dtype=dtype), DownBlocks(in_channels=in_channels, device=device, dtype=dtype), fl.Sum( fl.UseContext(context="unet", key="residuals").compose(lambda x: x[-1]), MiddleBlock(device=device, dtype=dtype), ), UpBlocks(), fl.Chain( fl.GroupNorm(channels=320, num_groups=32, device=device, dtype=dtype), fl.SiLU(), fl.Conv2d( in_channels=320, out_channels=4, kernel_size=3, stride=1, padding=1, device=device, dtype=dtype, ), ), ) for residual_block in self.layers(ResidualBlock): chain = residual_block.Chain range_adapter = RangeAdapter2d( target=chain.Conv2d_1, channels=residual_block.out_channels, embedding_dim=1280, context_key="timestep_embedding", device=device, dtype=dtype, ) range_adapter.inject(chain) for n, block in enumerate(cast(Iterable[fl.Chain], self.DownBlocks)): block.append(ResidualAccumulator(n)) for n, block in enumerate(cast(Iterable[fl.Chain], self.UpBlocks)): block.insert(0, ResidualConcatenator(-n - 2)) def init_context(self) -> Contexts: return { "unet": {"residuals": [0.0] * 13}, "diffusion": {"timestep": None}, "range_adapter": {"timestep_embedding": None}, "sampling": {"shapes": []}, } def set_clip_text_embedding(self, clip_text_embedding: Tensor) -> None: self.set_context("cross_attention_block", {"clip_text_embedding": clip_text_embedding}) def set_timestep(self, timestep: Tensor) -> None: self.set_context("diffusion", {"timestep": timestep})
/foundationals/latent_diffusion/unet.py
0.919113
0.30549
unet.py
pypi
from enum import Enum from pathlib import Path from torch import Tensor, device as Device from torch.nn import Parameter as TorchParameter from refiners.adapters.lora import LoraAdapter, load_lora_weights from refiners.foundationals.clip.text_encoder import FeedForward, TransformerLayer from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion import StableDiffusion_1 from refiners.foundationals.latent_diffusion.controlnet import Controlnet import refiners.fluxion.layers as fl from refiners.fluxion.utils import load_from_safetensors, load_metadata_from_safetensors class LoraTarget(str, Enum): Self = "self" Attention = "Attention" SelfAttention = "SelfAttention" CrossAttention = "CrossAttentionBlock2d" FeedForward = "FeedForward" TransformerLayer = "TransformerLayer" def get_class(self) -> type[fl.Chain]: match self: case LoraTarget.Self: return fl.Chain case LoraTarget.Attention: return fl.Attention case LoraTarget.SelfAttention: return fl.SelfAttention case LoraTarget.CrossAttention: return CrossAttentionBlock2d case LoraTarget.FeedForward: return FeedForward case LoraTarget.TransformerLayer: return TransformerLayer def get_lora_rank(weights: list[Tensor]) -> int: ranks: set[int] = {w.shape[1] for w in weights[0::2]} assert len(ranks) == 1 return ranks.pop() def apply_loras_to_target(module: fl.Chain, target: LoraTarget, rank: int, scale: float) -> None: for layer in module.layers(layer_type=target.get_class()): for linear, parent in layer.walk(fl.Linear): adapter = LoraAdapter( target=linear, rank=rank, scale=scale, device=module.device, dtype=module.dtype, ) adapter.inject(parent) class LoraWeights: """A single LoRA weights training checkpoint used to patch a Stable Diffusion 1.5 model.""" metadata: dict[str, str] | None tensors: dict[str, Tensor] def __init__(self, checkpoint_path: Path | str, device: Device | str): self.metadata = load_metadata_from_safetensors(checkpoint_path) self.tensors = load_from_safetensors(checkpoint_path, device=device) def patch(self, sd: StableDiffusion_1, scale: float = 1.0) -> None: assert self.metadata is not None, "Invalid safetensors checkpoint: missing metadata" for meta_key, meta_value in self.metadata.items(): match meta_key: case "unet_targets": # TODO: support this transparently if any([isinstance(module, Controlnet) for module in sd.unet]): raise NotImplementedError("Cannot patch a UNet which already contains a Controlnet adapter") model = sd.unet key_prefix = "unet." case "text_encoder_targets": model = sd.clip_text_encoder key_prefix = "text_encoder." case "lda_targets": model = sd.lda key_prefix = "lda." case _: raise ValueError(f"Unexpected key in checkpoint metadata: {meta_key}") # TODO(FG-487): support loading multiple LoRA-s if any(model.layers(LoraAdapter)): raise NotImplementedError(f"{model.__class__.__name__} already contains LoRA layers") lora_weights = [w for w in [self.tensors[k] for k in sorted(self.tensors) if k.startswith(key_prefix)]] assert len(lora_weights) % 2 == 0 rank = get_lora_rank(lora_weights) for target in meta_value.split(","): apply_loras_to_target(model, target=LoraTarget(target), rank=rank, scale=scale) assert len(list(model.layers(LoraAdapter))) == (len(lora_weights) // 2) load_lora_weights(model, [TorchParameter(w) for w in lora_weights])
/foundationals/latent_diffusion/lora.py
0.823186
0.45532
lora.py
pypi
from typing import cast from torch import Tensor, device as Device, dtype as DType from refiners.fluxion.context import Contexts import refiners.fluxion.layers as fl from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.unet import ResidualAccumulator, ResidualBlock, ResidualConcatenator from refiners.adapters.range_adapter import RangeAdapter2d, RangeEncoder, compute_sinusoidal_embedding class TextTimeEmbedding(fl.Chain): structural_attrs = ["timestep_embedding_dim", "time_ids_embedding_dim", "text_time_embedding_dim"] def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: self.timestep_embedding_dim = 1280 self.time_ids_embedding_dim = 256 self.text_time_embedding_dim = 2816 super().__init__( fl.Concatenate( fl.UseContext(context="diffusion", key="pooled_text_embedding"), fl.Chain( fl.UseContext(context="diffusion", key="time_ids"), fl.Unsqueeze(dim=-1), fl.Lambda(func=self.compute_sinuosoidal_embedding), fl.Reshape(-1), ), dim=1, ), fl.Linear( in_features=self.text_time_embedding_dim, out_features=self.timestep_embedding_dim, device=device, dtype=dtype, ), fl.SiLU(), fl.Linear( in_features=self.timestep_embedding_dim, out_features=self.timestep_embedding_dim, device=device, dtype=dtype, ), ) def compute_sinuosoidal_embedding(self, x: Tensor) -> Tensor: return compute_sinusoidal_embedding(x=x, embedding_dim=self.time_ids_embedding_dim).to(dtype=self.dtype) class TimestepEncoder(fl.Passthrough): structural_attrs = ["timestep_embedding_dim"] def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: self.timestep_embedding_dim = 1280 super().__init__( fl.Sum( fl.Chain( fl.UseContext(context="diffusion", key="timestep"), RangeEncoder( sinuosidal_embedding_dim=320, embedding_dim=self.timestep_embedding_dim, device=device, dtype=dtype, ), ), TextTimeEmbedding(device=device, dtype=dtype), ), fl.SetContext(context="range_adapter", key="timestep_embedding"), ) class SDXLCrossAttention(CrossAttentionBlock2d): structural_attrs = ["channels", "num_attention_layers", "num_attention_heads"] def __init__( self, channels: int, num_attention_layers: int = 1, num_attention_heads: int = 10, device: Device | str | None = None, dtype: DType | None = None, ) -> None: super().__init__( channels=channels, context_embedding_dim=2048, context_key="clip_text_embedding", num_attention_layers=num_attention_layers, num_attention_heads=num_attention_heads, use_bias=False, use_linear_projection=True, device=device, dtype=dtype, ) class DownBlocks(fl.Chain): structural_attrs = ["in_channels"] def __init__(self, in_channels: int, device: Device | str | None = None, dtype: DType | None = None) -> None: self.in_channels = in_channels in_block = fl.Chain( fl.Conv2d(in_channels=in_channels, out_channels=320, kernel_size=3, padding=1, device=device, dtype=dtype) ) first_blocks = [ fl.Chain( ResidualBlock(in_channels=320, out_channels=320, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=320, out_channels=320, device=device, dtype=dtype), ), fl.Chain( fl.Downsample(channels=320, scale_factor=2, padding=1, device=device, dtype=dtype), ), ] second_blocks = [ fl.Chain( ResidualBlock(in_channels=320, out_channels=640, device=device, dtype=dtype), SDXLCrossAttention( channels=640, num_attention_layers=2, num_attention_heads=10, device=device, dtype=dtype ), ), fl.Chain( ResidualBlock(in_channels=640, out_channels=640, device=device, dtype=dtype), SDXLCrossAttention( channels=640, num_attention_layers=2, num_attention_heads=10, device=device, dtype=dtype ), ), fl.Chain( fl.Downsample(channels=640, scale_factor=2, padding=1, device=device, dtype=dtype), ), ] third_blocks = [ fl.Chain( ResidualBlock(in_channels=640, out_channels=1280, device=device, dtype=dtype), SDXLCrossAttention( channels=1280, num_attention_layers=10, num_attention_heads=20, device=device, dtype=dtype ), ), fl.Chain( ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), SDXLCrossAttention( channels=1280, num_attention_layers=10, num_attention_heads=20, device=device, dtype=dtype ), ), ] super().__init__( in_block, *first_blocks, *second_blocks, *third_blocks, ) class UpBlocks(fl.Chain): structural_attrs = [] def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: first_blocks = [ fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), SDXLCrossAttention( channels=1280, num_attention_layers=10, num_attention_heads=20, device=device, dtype=dtype ), ), fl.Chain( ResidualBlock(in_channels=2560, out_channels=1280, device=device, dtype=dtype), SDXLCrossAttention( channels=1280, num_attention_layers=10, num_attention_heads=20, device=device, dtype=dtype ), ), fl.Chain( ResidualBlock(in_channels=1920, out_channels=1280, device=device, dtype=dtype), SDXLCrossAttention( channels=1280, num_attention_layers=10, num_attention_heads=20, device=device, dtype=dtype ), fl.Upsample(channels=1280, device=device, dtype=dtype), ), ] second_blocks = [ fl.Chain( ResidualBlock(in_channels=1920, out_channels=640, device=device, dtype=dtype), SDXLCrossAttention( channels=640, num_attention_layers=2, num_attention_heads=10, device=device, dtype=dtype ), ), fl.Chain( ResidualBlock(in_channels=1280, out_channels=640, device=device, dtype=dtype), SDXLCrossAttention( channels=640, num_attention_layers=2, num_attention_heads=10, device=device, dtype=dtype ), ), fl.Chain( ResidualBlock(in_channels=960, out_channels=640, device=device, dtype=dtype), SDXLCrossAttention( channels=640, num_attention_layers=2, num_attention_heads=10, device=device, dtype=dtype ), fl.Upsample(channels=640, device=device, dtype=dtype), ), ] third_blocks = [ fl.Chain( ResidualBlock(in_channels=960, out_channels=320, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=640, out_channels=320, device=device, dtype=dtype), ), fl.Chain( ResidualBlock(in_channels=640, out_channels=320, device=device, dtype=dtype), ), ] super().__init__( *first_blocks, *second_blocks, *third_blocks, ) class MiddleBlock(fl.Chain): structural_attrs = [] def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: super().__init__( ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), SDXLCrossAttention( channels=1280, num_attention_layers=10, num_attention_heads=20, device=device, dtype=dtype ), ResidualBlock(in_channels=1280, out_channels=1280, device=device, dtype=dtype), ) class OutputBlock(fl.Chain): structural_attrs = [] def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: super().__init__( fl.GroupNorm(channels=320, num_groups=32), fl.SiLU(), fl.Conv2d(in_channels=320, out_channels=4, kernel_size=3, stride=1, padding=1, device=device, dtype=dtype), ) class SDXLUNet(fl.Chain): structural_attrs = ["in_channels"] def __init__(self, in_channels: int, device: Device | str | None = None, dtype: DType | None = None) -> None: self.in_channels = in_channels super().__init__( TimestepEncoder(device=device, dtype=dtype), DownBlocks(in_channels=in_channels, device=device, dtype=dtype), MiddleBlock(device=device, dtype=dtype), fl.Residual(fl.UseContext(context="unet", key="residuals").compose(lambda x: x[-1])), UpBlocks(device=device, dtype=dtype), OutputBlock(device=device, dtype=dtype), ) for residual_block in self.layers(ResidualBlock): chain = residual_block.Chain range_adapter = RangeAdapter2d( target=chain.Conv2d_1, channels=residual_block.out_channels, embedding_dim=1280, context_key="timestep_embedding", device=device, dtype=dtype, ) range_adapter.inject(chain) for n, block in enumerate(iterable=cast(list[fl.Chain], self.DownBlocks)): block.append(module=ResidualAccumulator(n=n)) for n, block in enumerate(iterable=cast(list[fl.Chain], self.UpBlocks)): block.insert(index=0, module=ResidualConcatenator(n=-n - 2)) def init_context(self) -> Contexts: return { "unet": {"residuals": [0.0] * 10}, "diffusion": {"timestep": None, "time_ids": None, "pooled_text_embedding": None}, "range_adapter": {"timestep_embedding": None}, "sampling": {"shapes": []}, } def set_clip_text_embedding(self, clip_text_embedding: Tensor) -> None: self.set_context(context="cross_attention_block", value={"clip_text_embedding": clip_text_embedding}) def set_timestep(self, timestep: Tensor) -> None: self.set_context(context="diffusion", value={"timestep": timestep}) def set_time_ids(self, time_ids: Tensor) -> None: self.set_context(context="diffusion", value={"time_ids": time_ids}) def set_pooled_text_embedding(self, pooled_text_embedding: Tensor) -> None: self.set_context(context="diffusion", value={"pooled_text_embedding": pooled_text_embedding})
/foundationals/latent_diffusion/sdxl_unet.py
0.94625
0.46794
sdxl_unet.py
pypi
from refiners.fluxion.context import Contexts from refiners.fluxion.layers import ( Chain, Conv2d, GroupNorm, Identity, SiLU, Downsample, Upsample, Sum, SelfAttention2d, Slicing, ) from refiners.fluxion.utils import image_to_tensor, tensor_to_image from torch import Tensor, device as Device, dtype as DType from PIL import Image class Resnet(Sum): structural_attrs = ["in_channels", "out_channels"] def __init__( self, in_channels: int, out_channels: int, num_groups: int = 32, device: Device | str | None = None, dtype: DType | None = None, ): self.in_channels = in_channels self.out_channels = out_channels shortcut = ( Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, device=device, dtype=dtype) if in_channels != out_channels else Identity() ) super().__init__( shortcut, Chain( GroupNorm(channels=in_channels, num_groups=num_groups, device=device, dtype=dtype), SiLU(), Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, device=device, dtype=dtype, ), GroupNorm(channels=out_channels, num_groups=num_groups, device=device, dtype=dtype), SiLU(), Conv2d( in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1, device=device, dtype=dtype, ), ), ) class Encoder(Chain): def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: resnet_sizes: list[int] = [128, 256, 512, 512, 512] input_channels: int = 3 latent_dim: int = 8 resnet_layers: list[Chain] = [ Chain( [ Resnet( in_channels=resnet_sizes[i - 1] if i > 0 else resnet_sizes[0], out_channels=resnet_sizes[i], device=device, dtype=dtype, ), Resnet(in_channels=resnet_sizes[i], out_channels=resnet_sizes[i], device=device, dtype=dtype), ] ) for i in range(len(resnet_sizes)) ] for _, layer in zip(range(3), resnet_layers): channels: int = layer[-1].out_channels # type: ignore layer.append(Downsample(channels=channels, scale_factor=2, device=device, dtype=dtype)) attention_layer = Sum( Identity(), Chain( GroupNorm(channels=resnet_sizes[-1], num_groups=32, eps=1e-6, device=device, dtype=dtype), SelfAttention2d(channels=resnet_sizes[-1], device=device, dtype=dtype), ), ) resnet_layers[-1].insert_after_type(Resnet, attention_layer) super().__init__( Conv2d( in_channels=input_channels, out_channels=resnet_sizes[0], kernel_size=3, padding=1, device=device, dtype=dtype, ), Chain(*resnet_layers), Chain( GroupNorm(channels=resnet_sizes[-1], num_groups=32, eps=1e-6, device=device, dtype=dtype), SiLU(), Conv2d( in_channels=resnet_sizes[-1], out_channels=latent_dim, kernel_size=3, padding=1, device=device, dtype=dtype, ), ), Chain( Conv2d(in_channels=8, out_channels=8, kernel_size=1, device=device, dtype=dtype), Slicing(dim=1, start=0, length=4), ), ) def init_context(self) -> Contexts: return {"sampling": {"shapes": []}} class Decoder(Chain): structural_attrs = ["resnet_sizes", "latent_dim", "output_channels"] def __init__(self, device: Device | str | None = None, dtype: DType | None = None) -> None: self.resnet_sizes: list[int] = [128, 256, 512, 512, 512] self.latent_dim: int = 4 self.output_channels: int = 3 resnet_sizes = self.resnet_sizes[::-1] resnet_layers: list[Chain] = [ ( Chain( [ Resnet( in_channels=resnet_sizes[i - 1] if i > 0 else resnet_sizes[0], out_channels=resnet_sizes[i], device=device, dtype=dtype, ), Resnet(in_channels=resnet_sizes[i], out_channels=resnet_sizes[i], device=device, dtype=dtype), Resnet(in_channels=resnet_sizes[i], out_channels=resnet_sizes[i], device=device, dtype=dtype), ] ) if i > 0 else Chain( [ Resnet(in_channels=resnet_sizes[0], out_channels=resnet_sizes[i], device=device, dtype=dtype), Resnet(in_channels=resnet_sizes[i], out_channels=resnet_sizes[i], device=device, dtype=dtype), ] ) ) for i in range(len(resnet_sizes)) ] attention_layer = Sum( Identity(), Chain( GroupNorm(channels=resnet_sizes[0], num_groups=32, eps=1e-6, device=device, dtype=dtype), SelfAttention2d(channels=resnet_sizes[0], device=device, dtype=dtype), ), ) resnet_layers[0].insert(1, attention_layer) for _, layer in zip(range(3), resnet_layers[1:]): channels: int = layer[-1].out_channels layer.insert(-1, Upsample(channels=channels, upsample_factor=2, device=device, dtype=dtype)) super().__init__( Conv2d( in_channels=self.latent_dim, out_channels=self.latent_dim, kernel_size=1, device=device, dtype=dtype ), Conv2d( in_channels=self.latent_dim, out_channels=resnet_sizes[0], kernel_size=3, padding=1, device=device, dtype=dtype, ), Chain(*resnet_layers), Chain( GroupNorm(channels=resnet_sizes[-1], num_groups=32, eps=1e-6, device=device, dtype=dtype), SiLU(), Conv2d( in_channels=resnet_sizes[-1], out_channels=self.output_channels, kernel_size=3, padding=1, device=device, dtype=dtype, ), ), ) class LatentDiffusionAutoencoder(Chain): structural_attrs = ["encoder_scale"] def __init__( self, device: Device | str | None = None, dtype: DType | None = None, ) -> None: self.encoder_scale: float = 0.18215 super().__init__( Encoder(device=device, dtype=dtype), Decoder(device=device, dtype=dtype), ) def encode(self, x: Tensor) -> Tensor: encoder = self[0] x = self.encoder_scale * encoder(x) return x def decode(self, x: Tensor) -> Tensor: decoder = self[1] x = decoder(x / self.encoder_scale) return x def encode_image(self, image: Image.Image) -> Tensor: x = image_to_tensor(image, device=self.device, dtype=self.dtype) x = 2 * x - 1 return self.encode(x) def decode_latents(self, x: Tensor) -> Image.Image: x = self.decode(x) x = (x + 1) / 2 return tensor_to_image(x)
/foundationals/latent_diffusion/auto_encoder.py
0.886586
0.200108
auto_encoder.py
pypi
from torch import Tensor, Size, device as Device, dtype as DType from refiners.fluxion.context import Contexts from refiners.fluxion.layers import ( Identity, Flatten, Unflatten, Transpose, Chain, Parallel, LayerNorm, Attention, Sum, UseContext, Linear, GLU, GeLU, GroupNorm, Conv2d, SelfAttention, SetContext, ) class CrossAttentionBlock(Chain): structural_attrs = ["embedding_dim", "context_embedding_dim", "context", "context_key", "num_heads", "use_bias"] def __init__( self, embedding_dim: int, context_embedding_dim: int, context_key: str, num_heads: int = 1, use_bias: bool = True, device: Device | str | None = None, dtype: DType | None = None, ) -> None: self.embedding_dim = embedding_dim self.context_embedding_dim = context_embedding_dim self.context = "cross_attention_block" self.context_key = context_key self.num_heads = num_heads self.use_bias = use_bias super().__init__( Sum( Identity(), Chain( LayerNorm(normalized_shape=embedding_dim, device=device, dtype=dtype), SelfAttention( embedding_dim=embedding_dim, num_heads=num_heads, use_bias=use_bias, device=device, dtype=dtype ), ), ), Sum( Identity(), Chain( LayerNorm(normalized_shape=embedding_dim, device=device, dtype=dtype), Parallel( Identity(), UseContext(context=self.context, key=context_key), UseContext(context=self.context, key=context_key), ), Attention( embedding_dim=embedding_dim, num_heads=num_heads, key_embedding_dim=context_embedding_dim, value_embedding_dim=context_embedding_dim, use_bias=use_bias, device=device, dtype=dtype, ), ), ), Sum( Identity(), Chain( LayerNorm(normalized_shape=embedding_dim, device=device, dtype=dtype), Linear(in_features=embedding_dim, out_features=2 * 4 * embedding_dim, device=device, dtype=dtype), GLU(GeLU()), Linear(in_features=4 * embedding_dim, out_features=embedding_dim, device=device, dtype=dtype), ), ), ) class StatefulFlatten(Chain): structural_attrs = ["start_dim", "end_dim"] def __init__(self, context: str, key: str, start_dim: int = 0, end_dim: int = -1) -> None: self.start_dim = start_dim self.end_dim = end_dim super().__init__( SetContext(context=context, key=key, callback=self.push), Flatten(start_dim=start_dim, end_dim=end_dim), ) def push(self, sizes: list[Size], x: Tensor) -> None: sizes.append( x.shape[slice(self.start_dim, self.end_dim + 1 if self.end_dim >= 0 else x.ndim + self.end_dim + 1)] ) class CrossAttentionBlock2d(Sum): structural_attrs = [ "channels", "in_channels", "out_channels", "context_embedding_dim", "num_attention_heads", "num_attention_layers", "num_groups", "context_key", "use_linear_projection", "projection_type", ] def __init__( self, channels: int, context_embedding_dim: int, context_key: str, num_attention_heads: int = 1, num_attention_layers: int = 1, num_groups: int = 32, use_bias: bool = True, use_linear_projection: bool = False, device: Device | str | None = None, dtype: DType | None = None, ) -> None: assert channels % num_attention_heads == 0, "in_channels must be divisible by num_attention_heads" self.channels = channels self.in_channels = channels self.out_channels = channels self.context_embedding_dim = context_embedding_dim self.num_attention_heads = num_attention_heads self.num_attention_layers = num_attention_layers self.num_groups = num_groups self.context_key = context_key self.use_linear_projection = use_linear_projection self.projection_type = "Linear" if use_linear_projection else "Conv2d" in_block = ( Chain( GroupNorm(channels=channels, num_groups=num_groups, eps=1e-6, affine=True, device=device, dtype=dtype), StatefulFlatten(context="flatten", key="sizes", start_dim=2), Transpose(1, 2), Linear(in_features=channels, out_features=channels, device=device, dtype=dtype), ) if use_linear_projection else Chain( GroupNorm(channels=channels, num_groups=num_groups, eps=1e-6, affine=True, device=device, dtype=dtype), Conv2d(in_channels=channels, out_channels=channels, kernel_size=1, device=device, dtype=dtype), StatefulFlatten(context="flatten", key="sizes", start_dim=2), Transpose(1, 2), ) ) out_block = ( Chain( Linear(in_features=channels, out_features=channels, device=device, dtype=dtype), Transpose(1, 2), Parallel( Identity(), UseContext(context="flatten", key="sizes").compose(lambda x: x.pop()), ), Unflatten(dim=2), ) if use_linear_projection else Chain( Transpose(1, 2), Parallel( Identity(), UseContext(context="flatten", key="sizes").compose(lambda x: x.pop()), ), Unflatten(dim=2), Conv2d(in_channels=channels, out_channels=channels, kernel_size=1, device=device, dtype=dtype), ) ) super().__init__( Identity(), Chain( in_block, Chain( CrossAttentionBlock( embedding_dim=channels, context_embedding_dim=context_embedding_dim, context_key=context_key, num_heads=num_attention_heads, use_bias=use_bias, device=device, dtype=dtype, ) for _ in range(num_attention_layers) ), out_block, ), ) def init_context(self) -> Contexts: return {"flatten": {"sizes": []}}
/foundationals/latent_diffusion/cross_attention.py
0.925289
0.427875
cross_attention.py
pypi
from refiners.fluxion.layers import ( Passthrough, Lambda, Chain, Concatenate, UseContext, SelfAttention, SetContext, Identity, Parallel, ) from refiners.adapters.adapter import Adapter from refiners.foundationals.latent_diffusion.unet import UNet from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock from torch import Tensor class SaveLayerNormAdapter(Chain, Adapter[SelfAttention]): def __init__(self, target: SelfAttention, context: str) -> None: self.context = context with self.setup_adapter(target): super().__init__(SetContext(self.context, "norm"), target) class ReferenceOnlyControlAdapter(Chain, Adapter[SelfAttention]): def __init__( self, target: SelfAttention, context: str, sai: "SelfAttentionInjection", ) -> None: self.context = context self._sai = [sai] # only to support setting `style_cfg` dynamically sa_guided = target.structural_copy() assert isinstance(sa_guided[0], Parallel) sa_guided.replace( sa_guided[0], Parallel( Identity(), Concatenate(Identity(), UseContext(self.context, "norm"), dim=1), Concatenate(Identity(), UseContext(self.context, "norm"), dim=1), ), ) with self.setup_adapter(target): super().__init__( Parallel(sa_guided, Chain(Lambda(lambda x: x[:1]), target)), Lambda(self.compute_averaged_unconditioned_x), ) def compute_averaged_unconditioned_x(self, x: Tensor, unguided_unconditioned_x: Tensor) -> Tensor: style_cfg = self._sai[0].style_cfg x[0] = style_cfg * x[0] + (1.0 - style_cfg) * unguided_unconditioned_x return x class SelfAttentionInjection(Passthrough): # TODO: Does not support batching yet. Assumes concatenated inputs for classifier-free guidance def __init__(self, unet: UNet, style_cfg: float = 0.5) -> None: # the style_cfg is the weight of the guide in unconditionned diffusion. # This value is recommended to be 0.5 on the sdwebui repo. self.style_cfg = style_cfg self._adapters: list[ReferenceOnlyControlAdapter] = [] self._unet = [unet] guide_unet = unet.structural_copy() for i, attention_block in enumerate(guide_unet.layers(CrossAttentionBlock)): sa = attention_block.find(SelfAttention) assert sa is not None and sa.parent is not None SaveLayerNormAdapter(sa, context=f"self_attention_context_{i}").inject() for i, attention_block in enumerate(unet.layers(CrossAttentionBlock)): unet.set_context(f"self_attention_context_{i}", {"norm": None}) sa = attention_block.find(SelfAttention) assert sa is not None and sa.parent is not None self._adapters.append(ReferenceOnlyControlAdapter(sa, context=f"self_attention_context_{i}", sai=self)) super().__init__( Lambda(self.copy_diffusion_context), UseContext("self_attention_injection", "guide"), guide_unet, Lambda(self.restore_diffusion_context), ) @property def unet(self): return self._unet[0] def inject(self) -> None: assert self not in self._unet[0], f"{self} is already injected" for adapter in self._adapters: adapter.inject() self.unet.insert(0, self) def eject(self) -> None: assert self.unet[0] == self, f"{self} is not the first element of target UNet" for adapter in self._adapters: adapter.eject() self.unet.pop(0) def set_controlnet_condition(self, condition: Tensor) -> None: self.set_context("self_attention_injection", {"guide": condition}) def copy_diffusion_context(self, x: Tensor) -> Tensor: # This function allows to not disrupt the accumulation of residuals in the unet (if controlnet are used) self.set_context( "self_attention_residuals_buffer", {"buffer": self.use_context("unet")["residuals"]}, ) self.set_context( "unet", {"residuals": [0.0] * 13}, ) return x def restore_diffusion_context(self, x: Tensor) -> Tensor: self.set_context( "unet", { "residuals": self.use_context("self_attention_residuals_buffer")["buffer"], }, ) return x def structural_copy(self: "SelfAttentionInjection") -> "SelfAttentionInjection": raise RuntimeError("SelfAttentionInjection cannot be copied, eject it first.")
/foundationals/latent_diffusion/self_attention_injection.py
0.829077
0.502808
self_attention_injection.py
pypi
from abc import abstractmethod from torch import Tensor, device as Device, dtype as DType, linspace, float32, sqrt, log from typing import TypeVar T = TypeVar("T", bound="Scheduler") class Scheduler: """ A base class for creating a diffusion model scheduler. The Scheduler creates a sequence of noise and scaling factors used in the diffusion process, which gradually transforms the original data distribution into a Gaussian one. This process is described using several parameters such as initial and final diffusion rates, and is encapsulated into a `__call__` method that applies a step of the diffusion process. """ timesteps: Tensor def __init__( self, num_inference_steps: int, num_train_timesteps: int = 1_000, initial_diffusion_rate: float = 8.5e-4, final_diffusion_rate: float = 1.2e-2, device: Device | str = "cpu", dtype: DType = float32, ): self.device: Device = Device(device) self.dtype: DType = dtype self.num_inference_steps = num_inference_steps self.num_train_timesteps = num_train_timesteps self.initial_diffusion_rate = initial_diffusion_rate self.final_diffusion_rate = final_diffusion_rate self.scale_factors = ( 1.0 - linspace( start=initial_diffusion_rate**0.5, end=final_diffusion_rate**0.5, steps=num_train_timesteps, dtype=dtype, ) ** 2 ) self.cumulative_scale_factors = sqrt(self.scale_factors.cumprod(dim=0)) self.noise_std = sqrt(1.0 - self.scale_factors.cumprod(dim=0)) self.signal_to_noise_ratios = log(self.cumulative_scale_factors) - log(self.noise_std) self.timesteps = self._generate_timesteps() @abstractmethod def __call__(self, x: Tensor, noise: Tensor, step: int) -> Tensor: """ Applies a step of the diffusion process to the input tensor `x` using the provided `noise` and `timestep`. This method should be overridden by subclasses to implement the specific diffusion process. """ ... @abstractmethod def _generate_timesteps(self) -> Tensor: """ Generates a tensor of timesteps. This method should be overridden by subclasses to provide the specific timesteps for the diffusion process. """ ... @property def steps(self) -> list[int]: return list(range(self.num_inference_steps)) def add_noise( self, x: Tensor, noise: Tensor, step: int, ) -> Tensor: timestep = self.timesteps[step] cumulative_scale_factors = self.cumulative_scale_factors[timestep].unsqueeze(-1).unsqueeze(-1) noise_stds = self.noise_std[timestep].unsqueeze(-1).unsqueeze(-1) noised_x = cumulative_scale_factors * x + noise_stds * noise return noised_x def to(self: T, device: Device | str | None = None, dtype: DType | None = None) -> T: # type: ignore if device is not None: self.device = Device(device) self.timesteps = self.timesteps.to(device) if dtype is not None: self.dtype = dtype self.scale_factors = self.scale_factors.to(device, dtype=dtype) self.cumulative_scale_factors = self.cumulative_scale_factors.to(device, dtype=dtype) self.noise_std = self.noise_std.to(device, dtype=dtype) self.signal_to_noise_ratios = self.signal_to_noise_ratios.to(device, dtype=dtype) return self
/foundationals/latent_diffusion/schedulers/scheduler.py
0.964855
0.841598
scheduler.py
pypi
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler import numpy as np from torch import Tensor, device as Device, tensor, exp from collections import deque class DPMSolver(Scheduler): """Implements DPM-Solver++ from https://arxiv.org/abs/2211.01095 We only support noise prediction for now. """ def __init__( self, num_inference_steps: int, num_train_timesteps: int = 1_000, initial_diffusion_rate: float = 8.5e-4, final_diffusion_rate: float = 1.2e-2, device: Device | str = "cpu", ): super().__init__( num_inference_steps=num_inference_steps, num_train_timesteps=num_train_timesteps, initial_diffusion_rate=initial_diffusion_rate, final_diffusion_rate=final_diffusion_rate, device=device, ) self.estimated_data = deque([tensor([])] * 2, maxlen=2) self.initial_steps = 0 def _generate_timesteps(self) -> Tensor: # We need to use numpy here because: # numpy.linspace(0,999,31)[15] is 499.49999999999994 # torch.linspace(0,999,31)[15] is 499.5 # ...and we want the same result as the original codebase. return tensor( np.linspace(0, self.num_train_timesteps - 1, self.num_inference_steps + 1).round().astype(int)[1:], device=self.device, ).flip(0) def dpm_solver_first_order_update(self, x: Tensor, noise: Tensor, step: int) -> Tensor: timestep, previous_timestep = ( self.timesteps[step], self.timesteps[step + 1 if step < len(self.timesteps) - 1 else 0], ) previous_ratio, current_ratio = ( self.signal_to_noise_ratios[previous_timestep], self.signal_to_noise_ratios[timestep], ) previous_scale_factor = self.cumulative_scale_factors[previous_timestep] previous_noise_std, current_noise_std = ( self.noise_std[previous_timestep], self.noise_std[timestep], ) exp_factor = exp(-(previous_ratio - current_ratio)) denoised_x = (previous_noise_std / current_noise_std) * x - (previous_scale_factor * (exp_factor - 1.0)) * noise return denoised_x def multistep_dpm_solver_second_order_update(self, x: Tensor, step: int) -> Tensor: previous_timestep, current_timestep, next_timestep = ( self.timesteps[step + 1] if step < len(self.timesteps) - 1 else tensor([0]), self.timesteps[step], self.timesteps[step - 1], ) current_data_estimation, next_data_estimation = self.estimated_data[-1], self.estimated_data[-2] previous_ratio, current_ratio, next_ratio = ( self.signal_to_noise_ratios[previous_timestep], self.signal_to_noise_ratios[current_timestep], self.signal_to_noise_ratios[next_timestep], ) previous_scale_factor = self.cumulative_scale_factors[previous_timestep] previous_std, current_std = ( self.noise_std[previous_timestep], self.noise_std[current_timestep], ) estimation_delta = (current_data_estimation - next_data_estimation) / ( (current_ratio - next_ratio) / (previous_ratio - current_ratio) ) exp_neg_factor = exp(-(previous_ratio - current_ratio)) x_t = ( (previous_std / current_std) * x - (previous_scale_factor * (exp_neg_factor - 1.0)) * current_data_estimation - 0.5 * (previous_scale_factor * (exp_neg_factor - 1.0)) * estimation_delta ) return x_t def __call__( self, x: Tensor, noise: Tensor, step: int, ) -> Tensor: """ Represents one step of the backward diffusion process that iteratively denoises the input data `x`. This method works by estimating the denoised version of `x` and applying either a first-order or second-order backward Euler update, which is a numerical method commonly used to solve ordinary differential equations (ODEs). """ current_timestep = self.timesteps[step] scale_factor, noise_ratio = self.cumulative_scale_factors[current_timestep], self.noise_std[current_timestep] estimated_denoised_data = (x - noise_ratio * noise) / scale_factor self.estimated_data.append(estimated_denoised_data) denoised_x = ( self.dpm_solver_first_order_update(x=x, noise=estimated_denoised_data, step=step) if (self.initial_steps == 0) else self.multistep_dpm_solver_second_order_update(x=x, step=step) ) if self.initial_steps < 2: self.initial_steps += 1 return denoised_x
/foundationals/latent_diffusion/schedulers/dpm_solver.py
0.876278
0.673076
dpm_solver.py
pypi
from torch import Tensor, device as Device, randn, arange, Generator, tensor from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler class DDPM(Scheduler): """ The Denoising Diffusion Probabilistic Models (DDPM) is a specific type of diffusion model, which uses a specific strategy to generate the timesteps and applies the diffusion process in a specific way. """ def __init__( self, num_inference_steps: int, num_train_timesteps: int = 1_000, initial_diffusion_rate: float = 8.5e-4, final_diffusion_rate: float = 1.2e-2, device: Device | str = "cpu", ) -> None: super().__init__(num_inference_steps, num_train_timesteps, initial_diffusion_rate, final_diffusion_rate, device) def _generate_timesteps(self) -> Tensor: step_ratio = self.num_train_timesteps // self.num_inference_steps timesteps = arange(start=0, end=self.num_inference_steps, step=1) * step_ratio return timesteps.flip(0) def __call__(self, x: Tensor, noise: Tensor, step: int, generator: Generator | None = None) -> Tensor: """ Generate the next step in the diffusion process. This method adjusts the input data using added noise and an estimate of the denoised data, based on the current step in the diffusion process. This adjusted data forms the next step in the diffusion process. 1. It uses current and previous timesteps to calculate the current factor dictating the contribution of original data and noise to the new step. 2. An estimate of the denoised data (`estimated_denoised_data`) is generated. 3. It calculates coefficients for the estimated denoised data and current data (`original_data_coeff` and `current_data_coeff`) that balance their contribution to the denoised data for the next step. 4. It calculates the denoised data for the next step (`denoised_x`), which is a combination of the estimated denoised data and current data, adjusted by their respective coefficients. 5. Noise is then added to `denoised_x`. The magnitude of noise is controlled by a calculated variance based on the cumulative scaling factor and the current factor. The output is the new data step for the next stage in the diffusion process. """ timestep, previous_timestep = ( self.timesteps[step], ( self.timesteps[step + 1] if step < len(self.timesteps) - 1 else tensor(-(self.num_train_timesteps // self.num_inference_steps), device=self.device) ), ) current_cumulative_factor, previous_cumulative_scale_factor = (self.scale_factors.cumprod(0))[timestep], ( (self.scale_factors.cumprod(0))[previous_timestep] if step < len(self.timesteps) - 1 else tensor(1, device=self.device) ) current_factor = current_cumulative_factor / previous_cumulative_scale_factor estimated_denoised_data = ( x - (1 - current_cumulative_factor) ** 0.5 * noise ) / current_cumulative_factor**0.5 estimated_denoised_data = estimated_denoised_data.clamp(-1, 1) original_data_coeff = (previous_cumulative_scale_factor**0.5 * (1 - current_factor)) / ( 1 - current_cumulative_factor ) current_data_coeff = ( current_factor**0.5 * (1 - previous_cumulative_scale_factor) / (1 - current_cumulative_factor) ) denoised_x = original_data_coeff * estimated_denoised_data + current_data_coeff * x if step < len(self.timesteps) - 1: variance = (1 - previous_cumulative_scale_factor) / (1 - current_cumulative_factor) * (1 - current_factor) denoised_x = denoised_x + (variance.clamp(min=1e-20) ** 0.5) * randn( x.shape, device=x.device, dtype=x.dtype, generator=generator ) return denoised_x
/foundationals/latent_diffusion/schedulers/ddpm.py
0.906638
0.839997
ddpm.py
pypi
import logging import os import sys from datetime import datetime from functools import lru_cache from logging import LogRecord from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler from typing import Tuple, Any, Optional from ._tools import DEBUG from ._tools._specification import BaseSpecification # --------------------------------------------------------------------------- # Conversion from TS/JS to Python # --------------------------------------------------------------------------- TRACE = 5 py_grade = logging._nameToLevel.copy() py_grade["TRACE"] = TRACE # add an additional level logging.addLevelName(TRACE, "TRACE") py_grade = {f"py_{lname}": {"name": lname, "level": llevel} for lname, llevel in py_grade.items()} ts_grade = { "ts_trace": {"name": "trace", "level": 0}, "ts_debug": {"name": "debug", "level": 1}, "ts_info": {"name": "info", "level": 2}, "ts_warn": {"name": "warn", "level": 3}, "ts_error": {"name": "error", "level": 4}, "ts_silent": {"name": "silent", "level": 5}, } conversion_schema = [ ("ts_trace", "py_TRACE"), ("ts_debug", "py_DEBUG"), ("ts_info", "py_INFO"), ("ts_warn", "py_WARNING"), ("ts_error", "py_ERROR"), ("ts_silent", "py_CRITICAL"), ] """ +------------+----------+ | TypeScript | Python | +------------+----------+ | trace | TRACE | +------------+----------+ | debug | DEBUG | +------------+----------+ | info | INFO | +------------+----------+ | warn | WARNING | +------------+----------+ | error | ERROR | +------------+----------+ | silent | CRITICAL | +------------+----------+ """ py_by_ts_nameToName = {ts_grade[ts_]["name"]: py_grade[py_]["name"] for ts_, py_ in conversion_schema} """ +------------+--------+ | TypeScript | Python | +------------+--------+ | 0 | 5 | +------------+--------+ | 1 | 10 | +------------+--------+ | 2 | 20 | +------------+--------+ | 3 | 30 | +------------+--------+ | 4 | 40 | +------------+--------+ | 5 | 50 | +------------+--------+ """ py_by_ts_levelToLevel = {ts_grade[ts_]["level"]: py_grade[py_]["level"] for ts_, py_ in conversion_schema} # --------------------------------------------------------------------------- # File handler # --------------------------------------------------------------------------- bytes_by_suffix = { "B": 1, # B "K": 2**10, # KiB "M": 2**20, # MiB "G": 2**30, # GiB } def convert_filesize(s) -> int: if isinstance(s, int): return s if isinstance(s, str): suffix_ = s[-1] count_ = int(s[:-1]) bytes_ = bytes_by_suffix[suffix_] count_bytes_ = count_ * bytes_ return count_bytes_ def convert_interval(s) -> Tuple[int, str]: when_ = s[-1] interval_ = int(s[:-1]) # Months if when_ == "M": when_ = "D" interval_ = interval_ * 30 return interval_, when_ class TimedSizedRotatingHandler(TimedRotatingFileHandler, RotatingFileHandler): def __init__( self, filename, file_mode="a", max_bytes=0, backup_count=0, encoding="ascii", delay=False, when="h", interval=1, utc=False, at_time=None, *args, **kwargs, ): if file_mode.startswith("w"): try: os.remove(filename) except Exception: pass self.filename = filename RotatingFileHandler.__init__( self, filename=filename, mode=file_mode, maxBytes=max_bytes, backupCount=backup_count, encoding=encoding, delay=delay, ) TimedRotatingFileHandler.__init__( self, filename=filename, when=when, interval=interval, backupCount=backup_count, encoding=encoding, delay=delay, utc=utc, atTime=at_time, ) if os.path.sep in filename: self.baseFilename = filename def shouldRollover(self, record): timed_rollover = TimedRotatingFileHandler.shouldRollover(self, record) sized_rollover = RotatingFileHandler.shouldRollover(self, record) return timed_rollover or sized_rollover def doRollover(self): super(TimedRotatingFileHandler, self).doRollover() def getFilesToDelete(self): return super(TimedRotatingFileHandler, self).getFilesToDelete() def _filenamer(base_filename): basename_ = os.path.basename(base_filename) date_, time_, pid_, *name_, name_with_count_ = basename_.split("-") *name_chunk_, count_ = name_with_count_.split(".") name_.append(".".join(name_chunk_)) name_ = "-".join(name_) new_basename_ = "-".join([date_, time_, count_, pid_, name_]) return base_filename.replace(basename_, new_basename_) if DEBUG: fmt = ( "[%(asctime)s|" "%(levelname)s|" "%(thread)d-%(threadName)s|" "%(name)s] " "%(module)s." "%(funcName)s " "%(message)s" ) else: fmt = ( "[%(asctime)s] - " "[%(name)s] - " "[%(levelname)s] - " "[%(thread)d - %(threadName)s] - " "[%(module)s] - " "[%(funcName)s] - " "%(message)s" ) class RDFormatter(logging.Formatter): def formatTime(self, record: LogRecord, datefmt: Optional[str] = None) -> str: return datetime.fromtimestamp(record.created).astimezone().isoformat() _file_handler_formatter = RDFormatter(fmt) def _get_filename(filename_: str, datetime_: datetime, pid_: int) -> str: date_ = datetime_.strftime("%Y%m%d") time_ = datetime_.strftime("%H%M") filename_ = filename_.replace("\\", os.path.sep) filename_ = os.path.normpath(filename_) *path, filename = filename_.split(os.path.sep) if path: new_filename = f"{date_}-{time_}-{pid_}-{filename}" path.append(new_filename) filename_ = f"{os.path.sep}".join(path) else: filename_ = f"{date_}-{time_}-{pid_}-{filename}" return filename_ @lru_cache(None) def _create_log_file_handler(name_, file_size_, max_files_, interval_): # file name filename_ = _get_filename(name_, datetime.now(), os.getpid()) # file size file_size_ = convert_filesize(file_size_) # interval interval_, when_ = convert_interval(interval_) handler_ = TimedSizedRotatingHandler( filename_, max_bytes=file_size_, when=when_, interval=interval_, backup_count=max_files_, encoding="utf-8", delay=True, ) handler_.namer = _filenamer handler_.setFormatter(_file_handler_formatter) return handler_ # --------------------------------------------------------------------------- # Stdout handler # --------------------------------------------------------------------------- if DEBUG: fmt = ( "[%(asctime)s|" "%(levelname)s|" "%(thread)d-%(threadName)s|" "%(name)s] \n" "%(module)s." "%(funcName)s " "%(message)s" ) else: fmt = "[%(asctime)s] - [%(levelname)s] - [%(name)s] - [%(thread)d] | %(threadName)s\n%(message)s" _stdout_formatter = RDFormatter(fmt) def _create_log_stdout_handler(): handler_ = logging.StreamHandler(sys.stdout) handler_.setFormatter(_stdout_formatter) return handler_ # --------------------------------------------------------------------------- # Filtering # --------------------------------------------------------------------------- class NotLog(BaseSpecification): def is_satisfied_by(self, record: Any) -> bool: return False class LogEverything(BaseSpecification): def is_satisfied_by(self, record: Any) -> bool: return True class NotLogWithName(BaseSpecification): def __init__(self, name) -> None: super().__init__() self.name = name def is_satisfied_by(self, record: Any) -> bool: return self.name != record.name class LogWithName(BaseSpecification): def __init__(self, name) -> None: super().__init__() self.name = name def is_satisfied_by(self, record: Any) -> bool: return self.name == record.name class LogStartsWithName(BaseSpecification): def __init__(self, name) -> None: super().__init__() self.name = name def is_satisfied_by(self, record: Any) -> bool: return record.name.startswith(self.name) class NotLogStartsWithName(BaseSpecification): def __init__(self, name) -> None: super().__init__() self.name = name def is_satisfied_by(self, record: Any) -> bool: return not record.name.startswith(self.name) default_log_filter = "*" def join_by_and_(prev_spec, spec): return prev_spec and prev_spec.and_(spec) or spec def join_by_or_(prev_spec, spec): return prev_spec and prev_spec.or_(spec) or spec def make_filter(text): ss = [s.strip() for s in text.split(",") if s] if not ss: can_log = NotLog() else: can_log = None for s in ss: if s == "*": can_log = join_by_or_(can_log, LogEverything()) elif s.startswith("-") and s.endswith("*"): can_log = join_by_and_(can_log, NotLogStartsWithName(s[1:-1])) elif s.startswith("-"): can_log = join_by_and_(can_log, NotLogWithName(s[1:])) elif s.endswith("*"): can_log = join_by_or_(can_log, LogStartsWithName(s[:-1])) else: can_log = join_by_or_(can_log, LogWithName(s)) def inner(record): return can_log.is_satisfied_by(record) return inner # --------------------------------------------------------------------------- # Log level # --------------------------------------------------------------------------- def convert_log_level(level) -> int: py_level_ = None if isinstance(level, str): level_ = level.strip() py_level_ = py_by_ts_nameToName.get(level_) if py_level_ is None: py_level_ = level py_level_ = logging._nameToLevel.get(py_level_) elif isinstance(level, int): py_level_ = level return py_level_ or logging.INFO def read_log_level_config(): from . import _configure as configure level_ = configure.get_str(configure.keys.log_level) return convert_log_level(level_) # --------------------------------------------------------------------------- # Create and dispose logger # --------------------------------------------------------------------------- _log_stream_handler = _create_log_stdout_handler() _existing_loggers = [] @lru_cache(None) def create_logger(name): from . import _configure as configure # construct the logger object for session logger_ = logging.getLogger(name) log_file_enabled_ = configure.get(configure.keys.log_file_enabled, True) if log_file_enabled_: name_ = configure.get_str(configure.keys.log_filename) file_size_ = configure.get_str(configure.keys.log_file_size) max_files_ = configure.get_int(configure.keys.log_max_files) interval_ = configure.get_str(configure.keys.log_interval) _log_file_handler = _create_log_file_handler(name_, file_size_, max_files_, interval_) logger_.addHandler(_log_file_handler) log_console_enabled_ = configure.get(configure.keys.log_console_enabled, True) if log_console_enabled_: logger_.addHandler(_log_stream_handler) else: logger_.propagate = False log_level_ = read_log_level_config() if log_level_ != logger_.level: logger_.setLevel(log_level_) log_filter_ = configure.get(configure.keys.log_filter, default_log_filter) logger_.addFilter(make_filter(log_filter_)) _existing_loggers.append(name) return logger_ def get_logger(name): return create_logger(name) def set_log_level(logger, level): if isinstance(logger, str): logger = get_logger(logger) level = convert_log_level(level) logger.setLevel(level) return logger def existing_loggers(): return _existing_loggers def dispose_logger(logger): if isinstance(logger, str): logger = get_logger(logger) handlers_ = logger.handlers[:] for hdlr_ in handlers_: hdlr_.close() logger.removeHandler(hdlr_) return logger # --------------------------------------------------------------------------- # Root logger # --------------------------------------------------------------------------- _root_logger = None def root_logger(): global _root_logger if _root_logger is None: _root_logger = create_logger("rd") return _root_logger
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_log.py
0.531453
0.191857
_log.py
pypi
from datetime import date, datetime, timedelta from typing import Optional, Union, Iterable from pandas import DataFrame from refinitiv.data.errors import RDError from ._containers import UniverseContainer, FieldsContainer, ADCDataContainer, HPDataContainer, CustInstDataContainer from ._data_provider import get_hp_data, get_custominsts_data, get_adc_data from ._history_df_builder_factory import get_history_df_builder from ._intervals_consts import INTERVALS from .context_collection import get_hp_context, get_adc_context, get_cust_inst_context from .._core.session import get_default, raise_if_closed from .._tools import fr_datetime_adapter from .._types import OptDateTime from ..content.fundamental_and_reference._data_grid_type import get_data_grid_type_by_session from ..usage_collection._filter_types import FilterType from ..usage_collection._logger import get_usage_logger from ..usage_collection._utils import ModuleName def get_history( universe: Union[str, Iterable[str]], fields: Union[str, Iterable[str], None] = None, interval: Optional[str] = None, start: "OptDateTime" = None, end: "OptDateTime" = None, adjustments: Optional[str] = None, count: Optional[int] = None, use_field_names_in_headers: bool = False, parameters: Union[str, dict, None] = None, ) -> DataFrame: """ Retrieves the pricing history, as well as Fundamental and Reference data history. Parameters ---------- universe: str | list Instruments to request fields: str | list, optional Fields to request interval: str, optional Date interval. Supported intervals are: tick, tas, taq, minute, 1min, 5min, 10min, 30min, 60min, hourly, 1h, daily, 1d, 1D, 7D, 7d, weekly, 1W, monthly, 1M, quarterly, 3M, 6M, yearly, 1Y start: str or date or datetime or timedelta, optional The start date and timestamp of the requested history end: str or date or datetime or timedelta, optional The end date and timestamp of the requested history adjustments : str, optional Tells the system whether to apply or not apply CORAX (Corporate Actions) events or exchange/manual corrections or price and volume adjustment according to trade/quote qualifier summarization actions to historical time series data. Possible values are: exchangeCorrection, manualCorrection, CCH, CRE, RTS, RPO, unadjusted, qualifiers count : int, optional The maximum number of data points returned. Values range: 1 - 10000. Applies only to pricing fields. use_field_names_in_headers : bool, default False If True - returns field name as column headers for data instead of title parameters: str | dict, optional Single global parameter key=value or dictionary of global parameters to request. Applies only to TR fields. Returns ------- pandas.DataFrame Examples -------- >>> get_history(universe="GOOG.O") >>> get_history(universe="GOOG.O", fields="tr.Revenue", interval="1Y") >>> get_history( ... universe="GOOG.O", ... fields=["BID", "ASK", "tr.Revenue"], ... interval="1Y", ... start="2015-01-01", ... end="2020-10-01", ... ) """ session = get_default() raise_if_closed(session) logger = session.logger() if interval is not None and interval not in INTERVALS: raise ValueError(f"Not supported interval value.\nSupported intervals are: {list(INTERVALS.keys())}") # Library usage logging get_usage_logger().log_func( name=f"{ModuleName.ACCESS}.get_history", func_path=f"{__name__}.get_history", kwargs=dict( universe=universe, fields=fields, interval=interval, start=start, end=end, count=count, adjustments=adjustments, parameters=parameters, use_field_names_in_headers=use_field_names_in_headers, ), desc={FilterType.SYNC, FilterType.LAYER_ACCESS}, ) universe = UniverseContainer(universe) fields = FieldsContainer(fields) data_grid_type = get_data_grid_type_by_session(session) hp = get_hp_context(data_grid_type, universe, fields, use_field_names_in_headers) adc = get_adc_context(data_grid_type, universe, fields, use_field_names_in_headers) cust_inst = get_cust_inst_context(data_grid_type, universe, fields, use_field_names_in_headers) exceptions = list() adc_raw = None adc_df = None if adc.can_get_data: adc_params = get_adc_params(start, end, interval) adc_params.update(parameters or {}) adc_raw, adc_df, adc_exception_msg = get_adc_data( universe=universe.adc, fields=fields.adc, parameters=adc_params, use_field_names_in_headers=use_field_names_in_headers, logger=logger, ) exceptions.append(adc_exception_msg) adc_data = ADCDataContainer(adc_raw, adc_df, fields) universe.calc_hp(adc_data.raw) hp_raw = None hp_df = None if hp.can_get_data: hp_raw, hp_df, hp_exception_msg = get_hp_data( universe=universe.hp, interval=interval, start=start, end=end, adjustments=adjustments, count=count, fields=fields.hp, logger=logger, ) exceptions.append(hp_exception_msg) hp_data = HPDataContainer(hp_raw, hp_df) cust_inst_raw = None cust_inst_df = None if cust_inst.can_get_data: cust_inst_raw, cust_inst_df, cust_inst_exception_msg = get_custominsts_data( universe=universe.cust_inst, interval=interval, start=start, end=end, count=count, logger=logger, ) exceptions.append(cust_inst_exception_msg) cust_inst_data = CustInstDataContainer(cust_inst_raw, cust_inst_df) if exceptions and all(exceptions): except_msg = "\n\n".join(exceptions) raise RDError(-1, except_msg) if not any({adc_data, hp_data, cust_inst_data}): return DataFrame() adc.set_data(adc_data, hp_data, cust_inst_data) hp.set_data(adc_data, hp_data, cust_inst_data) cust_inst.set_data(adc_data, hp_data, cust_inst_data) history_provider = get_history_df_builder(data_grid_type) return history_provider.build_df_date_as_index(adc, hp, cust_inst, universe.hp, interval) def get_adc_params( start: Union[str, date, datetime, timedelta], end: Union[str, date, datetime, timedelta], interval: Optional[str], ) -> dict: """ Gets parameters for ADC request. Parameters ---------- start : str or date or datetime or timedelta Parameters start date. end : str or date or datetime or timedelta Parameters end date. interval : str, optional Interval using to calculate parameters. Returns ------- parameters : dict Parameters for ADC requests. """ parameters = {} if start is not None: parameters["SDate"] = fr_datetime_adapter.get_str(start) if end is not None: parameters["EDate"] = fr_datetime_adapter.get_str(end) if interval is not None: parameters["FRQ"] = INTERVALS[interval]["adc"] return parameters
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/get_history_func.py
0.891646
0.185799
get_history_func.py
pypi
from datetime import datetime from typing import List import numpy as np from pandas import DataFrame, NA, MultiIndex, concat from .._tools import ohlc def merge_dataframes(dfs: List[DataFrame]) -> DataFrame: df = dfs.pop() df = df.join(dfs, how="outer") # noqa df = df.convert_dtypes() df.index.name = "Timestamp" df.ohlc = ohlc.__get__(df, None) return df def create_df(data: list, timestamps: list, fields: list, stream_name: str) -> DataFrame: numpy_array = np.array(data) timestamp_array = np.array(timestamps) if np.size(numpy_array): dataframe = DataFrame(numpy_array, columns=fields, index=timestamp_array) else: dataframe = DataFrame() dataframe.sort_index(inplace=True) dataframe.columns = MultiIndex.from_product([[stream_name], dataframe.columns]) return dataframe def retrieve_data_for_df(stream_data: List[dict], repeat: bool = False) -> tuple: timestamps = [] data = [] fields = set() fields.update(*(item["Fields"] for item in stream_data)) fields = list(fields) for idx, record in enumerate(stream_data): if repeat and idx == 0: timestamps.append(datetime.now()) else: timestamps.append(record["Timestamp"]) rics_data = [record["Fields"].get(field) for field in fields] data.append(rics_data) return timestamps, data, fields def replace_values_by_nan(_data): for rics_data in _data: for idx, value in enumerate(rics_data): try: if value is not None: float(value) except ValueError: rics_data[idx] = None def create_df_based_on_ticks(df): first_update = df.index[0] last_update = df.index[-1] time_delta = last_update - first_update if hasattr(time_delta, "days") and time_delta.days != 0: days = time_delta.days + 1 days = str(days) + "D" df = df.ohlc(days, origin="end", call_from_recorder=True) elif hasattr(time_delta, "hours"): hours = time_delta.hour + 1 hours = str(hours) + "H" df = df.ohlc(hours, origin="end", call_from_recorder=True) elif hasattr(time_delta, "minutes"): minutes = time_delta.minutes + 1 minutes = str(minutes) + "min" df = df.ohlc(minutes, origin="end", call_from_recorder=True) else: seconds = time_delta.seconds + 1 seconds = str(seconds) + "s" df = df.ohlc(seconds, origin="end", call_from_recorder=True) return df class OHLCBuilder: def __init__(self, frequency: str, universe: list, fields: list) -> None: self._frequency = frequency self.dataframes = [] self.ohlc_df = None self._universe = universe self._fields = fields self._last_recorded_ohlc_updates = None def create_ohlc_df(self, df: DataFrame) -> DataFrame: return df.ohlc(self._frequency, origin="end", call_from_recorder=True) def save_ohlc_data(self, df: DataFrame): df.fillna(NA, inplace=True) if self.ohlc_df is None: self.ohlc_df = df else: self.ohlc_df = concat([self.ohlc_df, df]) def build(self, updates_by_stream_name: dict): dfs = [] df = DataFrame() self._count_of_updates = 0 for universe, stream_data in updates_by_stream_name.items(): _timestamps, _data, _fields = retrieve_data_for_df(stream_data, True) self._count_of_updates += len(_data) replace_values_by_nan(_data) updates_by_stream_name[universe] = [] dataframe = create_df(_data, _timestamps, _fields, universe) dfs.append(dataframe) if dfs: df = merge_dataframes(dfs) self.dataframes.append(df) if not df.empty: df = self.create_ohlc_df(df) df["ticks count"] = self._count_of_updates if df.empty: empty_row = {} if isinstance(self.ohlc_df, DataFrame) and not self.ohlc_df.empty: columns = [col for col in self.ohlc_df] else: columns = self._create_ohlc_columns() for column in columns: empty_row[column] = np.nan _timestamps = np.array([datetime.now()]) df = DataFrame(empty_row, index=_timestamps) self._last_recorded_ohlc_updates = df self.save_ohlc_data(df) def _create_ohlc_columns(self): columns = [] ohlc_list = ["open", "high", "low", "close"] for universe in self._universe: for field in self._fields: for item in ohlc_list: columns.append((universe, field, item)) columns.append(("ticks count", "", "")) return columns def dispose(self): self.dataframes = [] self.ohlc_df = None class Ticks_OHLCBuilder(OHLCBuilder): def create_ohlc_df(self, df: DataFrame) -> DataFrame: return create_df_based_on_ticks(df)
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/_ohlc_builder.py
0.688049
0.474205
_ohlc_builder.py
pypi
import warnings from typing import Any, Dict, TYPE_CHECKING, Union from urllib.parse import quote_plus from .._tools import ( ADC_FUNC_PATTERN_IN_FIELDS, ADC_TR_PATTERN, cached_property, fields_arg_parser, iterator_str_arg_parser, ) if TYPE_CHECKING: from pandas import DataFrame from .._types import OptStrStrs, StrStrings class Container: def __init__(self, raw: Any = None) -> None: self._raw = raw @property def raw(self) -> Any: return self._raw def __bool__(self) -> bool: return bool(self._raw) def __iter__(self): return iter(self._raw) class UniverseContainer(Container): def __init__(self, raw: "OptStrStrs" = None) -> None: super().__init__(raw) self._hp = [] @cached_property def _universe(self) -> "StrStrings": raw = iterator_str_arg_parser.get_list(self.raw or []) unique_rics = list(dict.fromkeys(raw).keys()) if len(unique_rics) < len(raw): warnings.warn("You have duplicated instruments in your input. Output will contain unique instruments only.") return unique_rics @cached_property def hp_and_cust_inst(self): return self.hp + self.cust_inst @cached_property def adc(self) -> "StrStrings": return [inst for inst in self._universe if not inst.startswith("S)")] @cached_property def cust_inst(self) -> "StrStrings": return [inst for inst in self._universe if inst.startswith("S)")] @property def hp(self) -> "StrStrings": return self._hp @cached_property def is_universe_expander(self): from ..discovery._universe_expanders._universe_expander import UniverseExpander return isinstance(self.raw, UniverseExpander) def calc_hp(self, adc_raw: Union[Dict, None]) -> None: if not adc_raw: self._hp = self.adc else: rics_from_server = list(map(lambda i: quote_plus(i[0]) if "/" in i[0] else i[0], adc_raw.get("data", {}))) self._hp = list(dict.fromkeys(rics_from_server).keys()) def __iter__(self): return iter(self._universe) def __len__(self): return len(self._universe) def __repr__(self): return f"UniverseContainer({self._universe})" class FieldsContainer(Container): def __init__(self, raw: "OptStrStrs" = None) -> None: super().__init__(raw) self._adc_fields: "OptStrStrs" = None self._hp_fields: "OptStrStrs" = None def _parse(self) -> None: self._adc_fields = [] self._hp_fields = [] for field in self._fields: if ADC_TR_PATTERN.match(field) or ADC_FUNC_PATTERN_IN_FIELDS.match(field): self._adc_fields.append(field) else: self._hp_fields.append(field) @cached_property def _fields(self) -> "StrStrings": raw = fields_arg_parser.get_list(self.raw or []) unique_fields = list(dict.fromkeys(map(str.upper, raw)).keys()) if len(unique_fields) < len(raw): warnings.warn("You have duplicated fields in your input. Output will contain unique fields only.") return unique_fields @property def adc(self) -> "StrStrings": if self._adc_fields is None: self._parse() return self._adc_fields @property def hp(self) -> "StrStrings": if self._hp_fields is None: self._parse() return self._hp_fields @property def is_no_hp(self) -> bool: return not self.hp @cached_property def is_disjoint_adc(self) -> bool: return set(self.adc).isdisjoint(set(self._fields)) @cached_property def is_one_adc_no_hp(self) -> bool: return len(self.adc) == 1 and not self.hp def insert(self, index: int, value: str) -> "StrStrings": copy = list(self._fields) copy.insert(index, value) return copy def __getattr__(self, attr: str) -> Any: try: return getattr(self._fields, attr) except KeyError: raise AttributeError(attr) def __iter__(self): return iter(self._fields) def __repr__(self): return f"FieldsContainer({self._fields})" class DataContainer(Container): def __init__(self, raw: Union[dict, list, None], df: Union["DataFrame", None]) -> None: super().__init__(raw) self._df = df @property def df(self) -> "DataFrame": return self._df class ADCDataContainer(DataContainer): def __init__( self, raw: Union[dict, list, None], df: Union["DataFrame", None], fields: "FieldsContainer", ): super().__init__(raw, df) self._fields = fields def __bool__(self): is_none = self.raw in [{}, None] or (self.raw and self._fields.is_disjoint_adc) return not is_none class HPDataContainer(DataContainer): pass class CustInstDataContainer(DataContainer): pass class HPAndCustInstDataContainer(HPDataContainer): def __init__( self, columns: Union[list, None], raw: Union[dict, list, None], df: Union["DataFrame", None], ): super().__init__(raw, df) self._columns = columns @property def columns(self): return self._columns
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/_containers.py
0.769773
0.220972
_containers.py
pypi
import abc from collections import defaultdict from typing import TYPE_CHECKING, Union, List, Dict from pandas import DataFrame from ._intervals_consts import NON_INTRA_DAY_INTERVALS from .._tools import ohlc from .._tools._dataframe import convert_dtypes, convert_str_to_timestamp if TYPE_CHECKING: from .context_collection import ADCContext, CustInstContext, HPContext from ..content._df_builder import DFBuilder class HistoryDFBuilder(abc.ABC): @property @abc.abstractmethod def dfbuilder(self) -> "DFBuilder": # for override pass @property @abc.abstractmethod def date_name(self) -> str: # for override pass def build_df_date_as_index( self, adc: "ADCContext", hp: "HPContext", cust_inst: "CustInstContext", universe: List[str], interval: Union[str, None], ) -> DataFrame: if adc.can_build_df: if not adc.raw["data"]: df = DataFrame() else: dicts_by_ric, headers = adc.get_data_headers() data = [] fields = [f.casefold() for f in adc.fields.insert(0, self.date_name)] for inst in universe: for d in dicts_by_ric.get(inst, []): datum = [inst, *(d.get(f) for f in fields)] data.append(datum) df = adc.dfbuilder.build_date_as_index( {"data": data, "headers": headers}, adc.use_field_names_in_headers, use_multiindex=False, sort_ascending=True, ) elif hp.can_build_df: df = hp.df elif cust_inst.can_join_hp_multiindex_df: df = cust_inst.join_hp_multiindex_df(hp.df) elif cust_inst.can_build_df: df = cust_inst.build_df() else: hp_data, fields = hp.get_data_fields() adc_data, headers = adc.get_data_headers() has_cust_inst_raw = bool(cust_inst.raw) df = self.build_common_df( adc_data, hp_data, headers, universe, fields, adc.use_field_names_in_headers, use_multiindex=has_cust_inst_raw, ) if has_cust_inst_raw: df = cust_inst.join_common_df(df, headers) df = convert_dtypes(df) if interval is not None and interval not in NON_INTRA_DAY_INTERVALS: df.index.names = ["Timestamp"] df.ohlc = ohlc.__get__(df, None) return df def build_common_df( self, adc_data: Dict[str, List[dict]], hp_data: Dict[str, List[dict]], headers: List[Dict[str, str]], universe: List[str], fields: List[str], use_field_names_in_headers: bool, use_multiindex: bool, ): date_name = self.date_name fields.insert(0, date_name) fields = [f.casefold() for f in fields] dicts_by_timestamp_by_inst = defaultdict(dict) date_name = date_name.casefold() for inst in universe: common_dicts = adc_data.get(inst, []) + hp_data.get(inst, []) for common_dict in common_dicts: date_str = common_dict[date_name] timestamp = None if date_str is not None: timestamp = convert_str_to_timestamp(date_str) dicts_by_timestamp = dicts_by_timestamp_by_inst[inst] dicts = dicts_by_timestamp.setdefault(timestamp, []) for d in dicts: common_dict_keys = set(common_dict.keys()) d_keys = set(d.keys()) keys_to_update = common_dict_keys - d_keys for key in keys_to_update: d[key] = common_dict.pop(key) if keys_to_update: break if list(common_dict.keys()) != [date_name]: dicts.append(common_dict) data = [] for inst in universe: for dicts in dicts_by_timestamp_by_inst[inst].values(): for d in dicts: datum = [inst, *(d.get(f) for f in fields)] data.append(datum) df = self.dfbuilder.build_date_as_index( {"data": data, "headers": headers}, use_field_names_in_headers, use_multiindex, sort_ascending=True ) return df
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/_history_df_builder.py
0.556641
0.199016
_history_df_builder.py
pypi
import math import threading from typing import Callable, Optional, Union from typing import TYPE_CHECKING import pandas as pd from ._ohlc_builder import ( merge_dataframes, create_df, retrieve_data_for_df, Ticks_OHLCBuilder, OHLCBuilder, ) from ._recording_control import ( RecordingControl, NoBlocking_RecordingControl, Blocking_RecordingControl, RepeatNonBlocking_RecordingControl, RepeatBlocking_RecordingControl, ) from ._stream_update_handler import ( BuildDF_StreamUpdateHandler, CollectUpdates_StreamUpdateHandler, StreamUpdateHandler, ) from ..session import get_default if TYPE_CHECKING: from ..content._universe_streams import _UniverseStreams class PricingRecorder: """ Pricing recorder class allows to record updates from server. Create dataframes based on received updates. Parameters ---------- stream : _UniverseStreams UniverseStreams object """ def __init__(self, stream: "_UniverseStreams"): self._stream = stream self._logger = get_default().logger() self._event = threading.Event() self._frequency = None self._duration = None self._ticks_per_bar = None self._on_data = None self._record_called = 0 self.is_running = False self._ohlc_builder: Union[OHLCBuilder, None] = None self._update_handler: Optional[StreamUpdateHandler] = None self._recording_control: Optional[RecordingControl] = None @staticmethod def _parse_input_frequency_and_duration(value: str) -> int: last_char = value[-1] if last_char not in ["s", "h", "d", "m"] and not value.endswith("min"): raise ValueError( "Please provide 'duration' or 'frequency' value in valid format. For example: '10s', '2min', '1h'" ) result = None try: if "s" == last_char: result = int(value[:-1]) elif value.endswith("min"): seconds = int(value[:-3]) result = seconds * 60 elif "h" == last_char: seconds = int(value[:-1]) result = seconds * 3600 elif "d" == last_char: seconds = int(value[:-1]) result = seconds * 3600 * 24 elif "m" == last_char: seconds = int(value[:-1]) result = seconds * 3600 * 24 * 30 except ValueError: raise ValueError("Please provide 'duration' value in valid format. For example: '10s', '2min', '1h'") return result @staticmethod def _validate_count_argument(ticks_per_bar): try: ticks_per_bar = int(ticks_per_bar) except ValueError: raise ValueError( "Invalid argument. Please provide 'ticks_per_bar' in the following format: '10', '100', '500'" ) if ticks_per_bar <= 0: raise ValueError("Invalid argument. 'ticks_per_bar' should be more then 0") def _validate_arguments(self, frequency: str, duration: str, ticks_per_bar: str): if ticks_per_bar != "1" and frequency != "tick": raise ValueError("Please provide 'tick' value as frequency when you are using 'ticks_per_bar' argument.") self._validate_count_argument(ticks_per_bar) if duration and frequency != "tick": frequency = self._parse_input_frequency_and_duration(frequency) duration = self._parse_input_frequency_and_duration(duration) self._expected_count_of_callbacks = duration / frequency float_part, self._expected_count_of_callbacks = math.modf(self._expected_count_of_callbacks) self._callback_called_count = 0 if duration % frequency: self._expected_count_of_callbacks += 1 if frequency > duration: raise ValueError("Please check your arguments, 'duration' should be higher that 'frequency'.") @staticmethod def _check_df(df: pd.DataFrame) -> pd.DataFrame: if isinstance(df, pd.DataFrame): df.fillna(pd.NA, inplace=True) else: df = pd.DataFrame() return df def record( self, frequency: str = "tick", duration: "str" = None, ticks_per_bar: str = "1", on_data: Callable = None, ): """ Starts recording updates from server and save it in memory Parameters ---------- frequency : str, optional Using to calculate ohlc based on received updates during period that was provided duration : str, optional Duration of recording data. Could be provided in seconds, minutes, hours ticks_per_bar : str, optional Count of ticks to record on_data : function, optional Callback which is calling with 'frequency' and receive dataframe with calculated ohlc from last updates and recorder object. Frequency has to be provided Returns ------- Examples ------- Start recording all updates during 15 seconds and calculate ohlc >>> import refinitiv.data as rd >>> stream = rd.open_pricing_stream(universe=['EUR='], fields=['BID', 'ASK', 'OPEN_PRC']) >>> stream.recorder.record(duration="15s") >>> stream.recorder.stop() >>> stream.close() >>> history = stream.recorder.get_history() >>> history.ohlc("5s") Start recording updates and calculate ohlc by using 'frequency' and call 'callback' function with updated ohlc dataframe every 5 seconds >>> import refinitiv.data as rd >>> stream = rd.open_pricing_stream(universe=['EUR='], fields=['BID', 'ASK', 'OPEN_PRC']) >>> >>> >>> def callback(dataframe, recorder): ... print(dataframe) >>> >>> stream.recorder.record(frequency="5s", duration="15s", on_data=callback) >>> stream.recorder.stop() >>> stream.close() >>> history = stream.recorder.get_history() """ if self._stream.is_closed: raise ConnectionError("Stream is closed. Cannot record.") self._validate_arguments(frequency, duration, ticks_per_bar) self._frequency = frequency self._duration = duration self._ticks_per_bar = int(ticks_per_bar) self._on_data = on_data self.is_running = True frequency_tick = self._frequency == "tick" no_duration = not self._duration ticks_1 = self._ticks_per_bar == 1 ticks_not_1 = self._ticks_per_bar != 1 # stream.recorder.record(frequency='tick') if all([frequency_tick, no_duration, ticks_1]): if not isinstance(self._update_handler, CollectUpdates_StreamUpdateHandler): self._update_handler = CollectUpdates_StreamUpdateHandler(self._stream, recorder=self) self._recording_control = NoBlocking_RecordingControl(self._update_handler) self._recording_control.start_recording() # stream.recorder.record(frequency='tick', ticks_per_bar=10) elif all([frequency_tick, no_duration, ticks_not_1]): self._create_ohlc_builder(Ticks_OHLCBuilder) self._update_handler = BuildDF_StreamUpdateHandler( self._stream, self._ohlc_builder, self._ticks_per_bar, recorder=self, on_record_callback=on_data, ) self._recording_control = NoBlocking_RecordingControl(self._update_handler) self._recording_control.start_recording() # stream.recorder.record(frequency='tick', duration="60s") elif all([frequency_tick, duration, ticks_1]): if not isinstance(self._update_handler, CollectUpdates_StreamUpdateHandler): self._update_handler = CollectUpdates_StreamUpdateHandler(self._stream, recorder=self) self._recording_control = Blocking_RecordingControl(self._update_handler) interval = self._parse_input_frequency_and_duration(self._duration) self._recording_control.start_recording(interval) self.stop() # stream.recorder.record(frequency='tick', duration="60s", ticks_per_bar=10) elif all([frequency_tick, duration, ticks_not_1]): self._create_ohlc_builder(Ticks_OHLCBuilder) self._update_handler = BuildDF_StreamUpdateHandler( self._stream, self._ohlc_builder, self._ticks_per_bar, self, self._on_data, ) self._recording_control = Blocking_RecordingControl(self._update_handler) interval = self._parse_input_frequency_and_duration(self._duration) self._recording_control.start_recording(interval) self.stop() # stream.recorder.record(frequency='5s') elif all([frequency, no_duration, ticks_1]): self._create_ohlc_builder(OHLCBuilder) self._update_handler = CollectUpdates_StreamUpdateHandler(self._stream, recorder=self) self._recording_control = RepeatNonBlocking_RecordingControl( self._on_data, self._update_handler, self._ohlc_builder, self._logger, self, ) interval = self._parse_input_frequency_and_duration(self._frequency) self._recording_control.start_recording(interval) # stream.recorder.record(frequency='5s', duration="17s") elif all([frequency, duration, ticks_1]): self._create_ohlc_builder(OHLCBuilder) self._update_handler = CollectUpdates_StreamUpdateHandler(self._stream, recorder=self) duration = self._parse_input_frequency_and_duration(self._duration) self._recording_control = RepeatBlocking_RecordingControl( duration, self._on_data, self._update_handler, self._ohlc_builder, self._logger, self, ) interval = self._parse_input_frequency_and_duration(self._frequency) self._recording_control.start_recording(interval) self.stop() else: raise ValueError( f"Cannot cover case when " f"frequency={self._frequency}, " f"duration={self._duration}, " f"ticks_per_bar={self._ticks_per_bar}" ) def _create_ohlc_builder(self, klass): if not isinstance(self._ohlc_builder, klass): self._ohlc_builder = klass(self._frequency, self._stream.universe, self._stream.fields) def get_history(self) -> "pd.DataFrame": dfs = [] if self._frequency == "tick" and self._ticks_per_bar == 1: updates_by_stream_name = self._update_handler.updates_by_stream_name for universe, stream_data in updates_by_stream_name.items(): timestamps, data, fields = retrieve_data_for_df(stream_data) dataframe = create_df(data, timestamps, fields, universe) dfs.append(dataframe) if not dfs: msg = "We didn't receive any updates. Dataframe couldn't be created." self._logger.warning(msg) df = pd.DataFrame() else: df = merge_dataframes(dfs) else: df = self._check_df(self._ohlc_builder.ohlc_df) return df def stop(self): """ Stop recording updates and cancel repeat timer for creating ohlc dataframes. """ if not self.is_running: return self.is_running = False self._recording_control.stop_recording() def delete(self): """Delete whole recorded updates""" self._recording_control.delete_recording()
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/_pricing_recorder.py
0.8498
0.15393
_pricing_recorder.py
pypi
from typing import TYPE_CHECKING import pandas as pd from .._tools._dataframe import convert_dtypes if TYPE_CHECKING: from .context_collection import ADCContext, HPAndCustInstContext def convert_types(column: list, column_names: list) -> list: date_columns = [ i for i, column_name in enumerate(column_names) if any(i for i in ["Date", "date", "_DT", "DATE"] if i in column_name) and all(i if i not in column_name else False for i in ["DateType", "Dates"]) ] result = [i if i is not None and i != "" else pd.NA for i in column] if date_columns: for i in date_columns: result[i] = pd.to_datetime(result[i]) return result class DataDFBuilder: @staticmethod def build_df( adc: "ADCContext", hp_and_cust_inst: "HPAndCustInstContext", ) -> pd.DataFrame(): if not adc.raw and not hp_and_cust_inst.raw: return pd.DataFrame() elif hp_and_cust_inst.can_build_df: return hp_and_cust_inst.df elif adc.can_build_df: return adc.df adc_headers_names = adc.headers_names columns = adc_headers_names + hp_and_cust_inst.columns if not any(columns): return pd.DataFrame() else: if not adc_headers_names and hp_and_cust_inst.columns: columns.insert(0, "Instrument") elif "instrument" in columns: columns[columns.index("instrument")] = "Instrument" adc_data = adc.get_data_wid_universe_as_index() data = [] for universe in hp_and_cust_inst.raw: if universe in adc_data: for column in adc_data[universe]: column.extend(hp_and_cust_inst.raw[universe]) data.append(column) else: tmpl = [universe] + [pd.NA] * (len(adc_headers_names) - 1) + hp_and_cust_inst.raw[universe] adc_data[universe] = tmpl data.append(tmpl) return convert_dtypes(pd.DataFrame(data, columns=columns))
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/_data_df_builder.py
0.514156
0.248101
_data_df_builder.py
pypi
from contextlib import AbstractContextManager from typing import Callable, Iterable, List, Optional, TYPE_CHECKING, Union import pandas as pd from ._mixed_streams import Stream from ._pricing_recorder import PricingRecorder from .._core.session import get_default, raise_if_closed from .._tools import cached_property, iterator_str_arg_parser from ..content._universe_stream import _UniverseStream if TYPE_CHECKING: from .. import OpenState def make_callback(on_data, logger): def callback(update, ric, stream): try: stream = PricingStream(stream) df = pd.DataFrame(update, index=[ric]) on_data(df, ric, stream) except Exception as error: logger.error(error) return callback def open_pricing_stream( universe: Union[str, Iterable[str]], fields: Union[str, List[str]] = None, service: Optional[str] = None, on_data: Optional[Callable] = None, ) -> "PricingStream": """ Creates and opens a pricing stream. Parameters ---------- universe : str | List[str] Instruments to request. fields : str | list, optional Fields to request. service : str, optional Name of the streaming service publishing the instruments. on_data : function, optional Callback function. Returns ---------- PricingStream Examples ------- >>> import refinitiv.data as rd >>> def callback(updated_data, ric, stream): ... print(updated_data) >>> pricing_stream = rd.open_pricing_stream(universe=['EUR='], fields=['BID', 'ASK', 'OPEN_PRC'], on_data=callback) # noqa """ session = get_default() raise_if_closed(session) logger = session.logger() universe = iterator_str_arg_parser.get_list(universe) _stream = Stream(universe=universe, fields=fields, service=service) if on_data: _stream.on_update(make_callback(on_data, logger)) _stream.on_refresh(make_callback(on_data, logger)) stream = PricingStream(_stream) stream.open() return stream class PricingStream(AbstractContextManager): def __init__(self, stream): self._stream = stream def __enter__(self): self.open() return self def __exit__(self, exc_type, exc_value, exc_traceback): self.close() def open(self, with_updates: bool = True) -> "OpenState": return self._stream.open(with_updates=with_updates) def close(self) -> "OpenState": return self._stream.close() def get_snapshot( self, universe: Union[str, List[str], None] = None, fields: Optional[List[str]] = None, convert: bool = True, ) -> "pd.DataFrame": return self._stream.get_snapshot(universe=universe, fields=fields, convert=convert) def _get_fields(self, universe: str, fields: Optional[list] = None) -> dict: return self._stream._get_fields(universe=universe, fields=fields) def add_instruments(self, *args): self._stream.add_instruments(*args) def remove_instruments(self, *args): self._stream.remove_instruments(*args) def __getitem__(self, item) -> "_UniverseStream": return self._stream.__getitem__(item) def __iter__(self): return self._stream.__iter__() @cached_property def recorder(self) -> PricingRecorder: return PricingRecorder(self._stream._stream)
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/get_stream.py
0.891126
0.181136
get_stream.py
pypi
from typing import Iterable, Union from pandas import DataFrame from ._containers import ADCDataContainer, FieldsContainer, HPAndCustInstDataContainer, UniverseContainer from ._data_df_builder import DataDFBuilder from ._data_provider import get_data_from_stream, get_adc_data_safe from .context_collection import get_adc_context, get_hp_and_custinst_context from .._core.session import get_default, raise_if_closed from .._errors import RDError from ..content.fundamental_and_reference._data_grid_type import get_data_grid_type_by_session from ..usage_collection import FilterType, get_usage_logger from ..usage_collection._utils import ModuleName def get_data( universe: Union[str, Iterable[str]], fields: Union[str, Iterable[str], None] = None, parameters: Union[str, dict, None] = None, use_field_names_in_headers: bool = False, ) -> DataFrame: """ Retrieves pricing snapshots, as well as Fundamental and Reference data. Parameters ---------- universe: str | list Instruments to request fields: str | list, optional Fields to request parameters: str | dict, optional Single key=value global parameter or dictionary of global parameters to request use_field_names_in_headers: bool, default False If True - returns field name as column headers for data instead of title Returns ------- pandas.DataFrame Examples -------- >>> get_data(universe=['IBM.N', 'VOD.L'], fields=['BID', 'ASK']) >>> get_data( ... universe=['GOOG.O', 'AAPL.O'], ... fields=['TR.EV','TR.EVToSales'], ... parameters = {'SDate': '0CY', 'Curn': 'CAD'} ...) """ session = get_default() raise_if_closed(session) logger = session.logger() # Library usage logging get_usage_logger().log_func( name=f"{ModuleName.ACCESS}.get_data", func_path=f"{__name__}.get_data", kwargs=dict( universe=universe, fields=fields, parameters=parameters, use_field_names_in_headers=use_field_names_in_headers, ), desc={FilterType.SYNC, FilterType.LAYER_ACCESS}, ) exceptions = list() universe = UniverseContainer(universe) fields = FieldsContainer(fields) data_grid_type = get_data_grid_type_by_session(session) adc = get_adc_context(data_grid_type, universe, fields, use_field_names_in_headers) hp_and_cust_inst = get_hp_and_custinst_context(data_grid_type, universe, fields, use_field_names_in_headers) adc_raw, adc_df, adc_exception_msg = None, None, None if adc.can_get_data: adc_raw, adc_df, adc_exception_msg = get_adc_data_safe( params={ "universe": universe.adc, "fields": fields.adc, "parameters": parameters, "use_field_names_in_headers": use_field_names_in_headers, }, logger=logger, ) exceptions.append(adc_exception_msg) universe.calc_hp(adc_raw) stream_columns, stream_data, stream_df, hp_exception_msg = None, None, None, None if hp_and_cust_inst.can_get_data: stream_columns, stream_data, stream_df, hp_exception_msg = get_data_from_stream( universe.hp_and_cust_inst, fields.hp, logger ) exceptions.append(hp_exception_msg) if exceptions and all(exceptions): except_msg = "\n\n".join(exceptions) raise RDError(-1, except_msg) hp_and_cust_inst_data = HPAndCustInstDataContainer(stream_columns, stream_data, stream_df) adc_data = ADCDataContainer(adc_raw, adc_df, fields) adc.set_data(adc_data=adc_data, hp_data=hp_and_cust_inst_data) hp_and_cust_inst.set_data(adc_data=adc_data, hp_data=hp_and_cust_inst_data) df = DataDFBuilder.build_df(adc, hp_and_cust_inst) df.rename(columns={"instrument": "Instrument"}, inplace=True) return df
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/get_data_func.py
0.823612
0.193604
get_data_func.py
pypi
from logging import Logger from typing import Dict, List, Optional, Union, Tuple from typing import TYPE_CHECKING from pandas import DataFrame from refinitiv.data.errors import ScopeError, RDError from ._data_df_builder import convert_types from ._intervals_consts import INTERVALS, EVENTS_INTERVALS from .._tools import DEBUG from .._tools._dataframe import convert_dtypes from ..content import custom_instruments, fundamental_and_reference, historical_pricing if TYPE_CHECKING: from logging import Logger def get_hp_data( universe: List[str], fields: List[str], interval: Optional[str], start: Optional[str], end: Optional[str], adjustments: Optional[str], count: Optional[int], logger: Logger, ) -> Tuple[Union[Dict, None], Union[DataFrame, None], Union[str, None]]: """ Gets historical pricing raw data. Parameters ---------- universe : str / list Instruments to request. fields : str / list Fields for request. interval: str, optional Consolidation interval. start : str, optional Start date. end : str, optional End date. adjustments : str, optional Adjustments for request. count : int, optional Number of data rows. logger : Logger Session logger. Returns ------- raw : dict or None: Historical pricing raw data. df : DataFrame or None Historical pricing dataframe. exception_msg : str or None API exception message. """ raw = None df = None exception_msg = None if interval in EVENTS_INTERVALS: definition = historical_pricing.events.Definition( universe=universe, eventTypes=INTERVALS[interval]["event_types"], start=start, end=end, adjustments=adjustments, count=count, fields=fields, ) else: interval = INTERVALS[interval]["pricing"] if interval is not None else interval definition = historical_pricing.summaries.Definition( universe=universe, interval=interval, start=start, end=end, adjustments=adjustments, count=count, fields=fields, ) try: response = definition.get_data() DEBUG and logger.debug(f"HISTORICAL_PRICING --->\n{response.data.df.to_string()}\n") raw = response.data.raw if isinstance(raw, dict): raw = [raw] df = response.data.df except RDError as hp_error: DEBUG and logger.exception(f"Failure sending request with {definition}, error:{hp_error}") exception_msg = hp_error.message except Exception as exc: DEBUG and logger.exception(f"Failure sending request with {definition}. {exc}") exception_msg = str(exc) return raw, df, exception_msg def get_adc_data( universe: List[str], fields: List[str], parameters: dict, use_field_names_in_headers: bool, logger: Logger, ) -> Tuple[Union[Dict, None], Union[DataFrame, None], Union[str, None]]: """ Gets ADC data. Parameters ---------- universe : list of str Instruments for request. fields : list of str Fields for request. parameters : dict Precalculated parameters for request. use_field_names_in_headers : bool Returns fields names in headers instead of title. logger : Logger Session logger. Returns ------- raw : dict or None: ADC raw data. df : DataFrame or None ADC dataframe. exception_msg : str or None API exception message. """ raw = None df = None exception_msg = None definition = fundamental_and_reference.Definition( universe=universe, fields=fields, parameters=parameters, row_headers="date", use_field_names_in_headers=use_field_names_in_headers, ) try: response = definition.get_data() raw = response.data.raw df = response.data.df DEBUG and logger.debug(f"ADC --->\n{response.data.df.to_string()}\n") except ScopeError as scope_error: DEBUG and logger.exception(f"Failure sending request with {definition}. {scope_error}") exception_msg = ( f"Insufficient scope for key={scope_error.key}, " f"method={scope_error.method} failed.\n " f"Required scope: {' OR '.join(map(str, scope_error.required_scope))}\n " f"Missing scopes: {' OR '.join(map(str, scope_error.missing_scopes))}" ) except RDError as adc_error: DEBUG and logger.exception(f"Failure sending request with {definition}. {adc_error}") exception_msg = adc_error.message except Exception as exc: DEBUG and logger.exception(f"Failure sending request with {definition}. {exc}") exception_msg = str(exc) return raw, df, exception_msg def get_adc_data_safe(params: dict, logger: "Logger") -> Tuple[dict, DataFrame, Union[str, None]]: """ Gets data from ADC and handles exceptions, if necessary. Parameters ---------- params : dict Input parameters with instruments and fields. logger : Logger Session logger. Returns ------- raw : dict ADC raw data. df : DataFrame ADC dataframe. exception_msg : str or None API exception message, if returned. """ raw = {} df = DataFrame() exception_msg = None try: fields = params.get("fields", "") universe = params["universe"] logger.info(f"Requesting {fields} for {universe}") response = fundamental_and_reference.Definition(**params).get_data() DEBUG and logger.debug(f"ADC --->\n{response.data.df.to_string()}\n") request_messages = response.request_message statuses = response.http_status if not isinstance(response.request_message, list): request_messages = [response.request_message] statuses = [response.http_status] for request, status in zip(request_messages, statuses): path = request.url.path current_universe = path.rsplit("/", 1)[-1] if current_universe not in universe: current_universe = universe logger.info(f"Request to {path} with {fields} for {current_universe}\nstatus: {status}\n") data = response.data raw = data.raw df = data.df except ScopeError as scope_error: DEBUG and logger.exception( f"Failure sending request with " f"{params.get('fields', '')} for {params['universe']}. " f"{scope_error}" ) exception_msg = ( f"Insufficient scope for key={scope_error.key}, " f"method={scope_error.method} failed.\n " f"Required scope: {' OR '.join(map(str, scope_error.required_scope))}\n " f"Missing scopes: {' OR '.join(map(str, scope_error.missing_scopes))}" ) except RDError as adc_error: DEBUG and logger.exception( f"Failure sending request with {params.get('fields', '')} for {params['universe']}. {adc_error}" ) exception_msg = adc_error.message except Exception as exc: DEBUG and logger.exception( f"Failure sending request with {params.get('fields', '')} for {params['universe']}. {str(exc)}" ) exception_msg = str(exc) return raw, df, exception_msg def get_custominsts_data( universe: List[str], interval: Optional[str], start: Optional[str], end: Optional[str], count: Optional[int], logger: Logger, ) -> Tuple[Union[Dict, None], Union[DataFrame, None], Union[str, None]]: """ Get custom instruments data. Parameters ---------- universe : list of str Instruments for request. interval : str, optional Interval for request. start : str, optional Start date. end : str, optional End date. count : int, optional Maximum number of retrieved data. logger : Logger Session logger. Returns ------- raw : dict or None: Custom instruments raw data. df : DataFrame or None Custom instruments dataframe. exception_msg : str or None API exception message. """ raw = None df = None exception_msg = None if interval in EVENTS_INTERVALS: definition = custom_instruments.events.Definition( universe=universe, start=start, end=end, count=count, ) else: interval = INTERVALS[interval]["pricing"] if interval is not None else interval definition = custom_instruments.summaries.Definition( universe=universe, interval=interval, start=start, end=end, count=count, ) try: response = definition.get_data() raw = response.data.raw df = response.data.df DEBUG and logger.debug(f"CUSTOMINSTS --->\n{response.data.df.to_string()}\n") except RDError as cust_error: DEBUG and logger.exception(f"Failure sending request with {definition}. {cust_error}") exception_msg = cust_error.message except Exception as exc: DEBUG and logger.exception(f"Failure sending request with {definition}. {exc}") exception_msg = str(exc) return raw, df, exception_msg def get_columns_from_stream(stream: "Stream") -> List[str]: """ Gets columns names from stream. Parameters ---------- stream : Stream Pricing stream. Returns ------- list of str Columns names. """ columns = set() for _stream in stream: fields = _stream.fields or [] columns.update(fields) return list(columns) def get_columns_and_data_from_stream(stream: "Stream", fields: List[str]) -> Tuple[List[str], dict]: """ Gets columns names and raw data items from stream. Parameters ---------- stream : Stream Pricing stream. fields : list of str Input columns names. Returns ------- columns : list of strings Columns names. data Pricing raw data. """ stream_columns = get_columns_from_stream(stream) if fields: columns = [i for i in fields if i in stream_columns] else: columns = stream_columns data = {_stream.name: convert_types([_stream[column] for column in columns], columns) for _stream in stream} return columns, data def get_data_from_stream( universe: Union[str, List[str]], fields: Union[str, List[str]], logger: "Logger" ) -> Tuple[Union[List[str], None], Union[dict, None], DataFrame, Union[str, None]]: """ Gets pricing and custom instruments data from stream. Parameters ---------- universe : str or list of str Instruments using to get data. fields : str or list of str Instruments fields for request. logger Session logger Returns ------- columns : list of str or None Names of data columns, if returned. data : dict or None Pricing raw data, if returned. df : DataFrame Pricing dataframe, if returned, else empty DataFrame. exception_msg : str or None API exception message, if returned. """ from . import Stream logger.info(f"Requesting pricing info for fields={fields} via websocket") stream = Stream(universe=universe, fields=fields) columns, data, exception_msg, df = None, None, None, DataFrame() try: stream.open(with_updates=False) columns, data = get_columns_and_data_from_stream(stream, fields) df = stream.get_snapshot(fields=fields) if len(df.columns) == 1 or not any(_stream.fields for _stream in stream): df = DataFrame() else: df = convert_dtypes(df) stream.close() except Exception as exc: logger.debug(f"Failure retrieving data for {stream._stream.universe}") exception_msg = str(exc) return columns, data, df, exception_msg
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/_data_provider.py
0.865736
0.305179
_data_provider.py
pypi
import numpy as np from typing import List, Optional, Union from ..._types import OptDateTime from ...content.ipa._enums import DateMovingConvention, EndOfMonthConvention from ...content.ipa.dates_and_calendars.add_periods import Definition def add_periods( start_date: "OptDateTime" = None, period: str = None, calendars: Optional[List[str]] = None, currencies: Optional[List[str]] = None, date_moving_convention: Optional[Union[DateMovingConvention, str]] = None, end_of_month_convention: Optional[Union[EndOfMonthConvention, str]] = None, ) -> np.datetime64: """ Retrieves the updated date, based on the provided start date and particular period of time or calendar values. Parameters ---------- start_date: str or datetime or timedelta, optional Start date of calculation. period: str, optional Calculation time period. calendars: list of str, optional Calendars to determine the working days and national holidays for particular countries. Optional if currencies is provided. currencies: list of str, optional Currencies to use for calculation of the date for the working day or weekend. Optional if calendars is provided. date_moving_convention : DateMovingConvention or str, optional Convention for adjusting the dates. end_of_month_convention : EndOfMonthConvention or str, optional Possible values for the end of month. Returns ------- np.datetime64 Added period date Examples -------- >>> import datetime >>> import refinitiv.data as rd >>> from refinitiv.data import dates_and_calendars >>> >>> rd.open_session("platform.default") >>> >>> added_period = rd.dates_and_calendars.add_periods( ... start_date=datetime.date(2014, 1, 1), ... period="1Y", ... calendars=["BAR", "KOR"], ... date_moving_convention=dates_and_calendars.DateMovingConvention.NEXT_BUSINESS_DAY, ... end_of_month_convention=dates_and_calendars.EndOfMonthConvention.LAST28 ... ) """ response = Definition( start_date=start_date, period=period, calendars=calendars, currencies=currencies, date_moving_convention=date_moving_convention, end_of_month_convention=end_of_month_convention, ).get_data() return np.datetime64(response.data.added_period.date)
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/dates_and_calendars/_add_periods.py
0.924513
0.463141
_add_periods.py
pypi
from dataclasses import dataclass from typing import List, Optional, Union from ..._types import OptDateTime from ...content.ipa._enums import PeriodType, DayCountBasis from ...content.ipa.dates_and_calendars.count_periods import Definition @dataclass class CountedPeriods: count: float tenor: str def count_periods( start_date: "OptDateTime" = None, end_date: "OptDateTime" = None, period_type: Optional[Union[PeriodType, str]] = None, calendars: Optional[List[str]] = None, currencies: Optional[List[str]] = None, day_count_basis: Optional[Union[DayCountBasis, str]] = None, ) -> CountedPeriods: """ Gets the quantity of time periods based on the provided start date, end date and period type (such as working day, non-working day etc). Parameters ---------- start_date: str or datetime or timedelta, optional Calculation start date. end_date: str or datetime or timedelta, optional Calculation end date. period_type : PeriodType or str, optional Date periods counting method. calendars: list of str, optional Calendars to determine the working days and national holidays for particular countries. Optional if currencies is provided. currencies: list of str, optional Currencies to use for calculation of the date for the working day or weekend. Optional if calendars is provided. day_count_basis: DayCountBasis or str, optional Predefined values for day count basis. Returns ------- CountedPeriods Counted periods object with count and tenor values. Examples -------- >>> import datetime >>> import refinitiv.data as rd >>> from refinitiv.data import dates_and_calendars >>> >>> rd.open_session("platform.default") >>> >>> counted_period = rd.dates_and_calendars.count_periods( ... start_date=datetime.timedelta(-11), ... end_date=datetime.timedelta(-3), ... period_type=dates_and_calendars.PeriodType.WORKING_DAY, ... currencies=["EUR"], >>>) """ response = Definition( start_date=start_date, end_date=end_date, period_type=period_type, calendars=calendars, currencies=currencies, day_count_basis=day_count_basis, ).get_data() response = CountedPeriods(response.data.counted_period.count, response.data.counted_period.tenor) return response
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/dates_and_calendars/_count_periods.py
0.9551
0.58883
_count_periods.py
pypi
import numpy as np from typing import List, Optional, Union from ..._types import OptDateTime from ...content.ipa._enums import DateScheduleFrequency, DayOfWeek from ...content.ipa.dates_and_calendars.date_schedule import Definition def date_schedule( frequency: Union[DateScheduleFrequency, str] = None, start_date: "OptDateTime" = None, end_date: "OptDateTime" = None, calendar_day_of_month: Optional[int] = None, calendars: Optional[List[str]] = None, currencies: Optional[List[str]] = None, day_of_week: Optional[Union[DayOfWeek, str]] = None, count: Optional[int] = None, ) -> List[np.datetime64]: """ Gets a list of dates based on the provided values, which can then be used as input for other functions. Parameters ---------- frequency: DateScheduleFrequency or str, optional The frequency of dates in the predefined period. start_date: str or datetime or timedelta, optional The start date of the predetermined list of dates. The start date must be earlier or equal to the end date. Mandatory if endDate is in the past. end_date: str or datetime or timedelta, optional The end date of the predetermined list of dates. If start_date is not set end_date is used to define a list of dates from today to the end date; end_date and count should not be set at a time; end_date must be later or equal to start_date. Mandatory if count is not specified. calendar_day_of_month : int, optional The number of the days of the month to which the dates are adjusted. The first date in the list is defined as the corresponding day of the month to which the start date belongs. Mandatory if frequency is set to 'Monthly'. calendars: list of str, optional Calendars to determine the working days and national holidays for particular countries. Optional if currencies is provided. currencies: list of str, optional Currencies to use for calculation of the date for the working day or weekend. Optional if calendars is provided. day_of_week : DayOfWeek or str, optional The day of week to which dates are adjusted. The first date in the list is defined as corresponding day of week following the start date. The last date in the list is defined as corresponding day of week preceding the end date. count : int, optional The number of dates from the start date to retrieve. Mandatory if end_date is not specified. Returns ------- List[np.datetime64] List of np.datetime64 dates. Examples -------- >>> import datetime >>> import refinitiv.data as rd >>> from refinitiv.data import dates_and_calendars >>> >>> rd.open_session("platform.default") >>> >>> dates = rd.dates_and_calendars.date_schedule( ... start_date=datetime.date(2019, 4, 30), ... count=10, ... frequency=dates_and_calendars.DateScheduleFrequency.WEEKLY, ... calendars=["EMU", "GER"], ... day_of_week="Tuesday", >>>) """ response = Definition( frequency=frequency, start_date=start_date, end_date=end_date, calendar_day_of_month=calendar_day_of_month, calendars=calendars, currencies=currencies, day_of_week=day_of_week, count=count, ).get_data() return response.data.dates
/refinitiv-data-1.3.1.tar.gz/refinitiv-data-1.3.1/refinitiv/data/_fin_coder_layer/dates_and_calendars/_date_schedule.py
0.931064
0.645748
_date_schedule.py
pypi