code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def get_frequency_aware_conv2d( data_format='default', freq_aware_name='frequency_aware_conv2d', *args, **kwargs ): """Returns a frequency-aware conv2d layer. Args: data_format (str): specifies the data format of batch input/output. freq_aware_name (str): name of the returned layer *args: position args for `keras.layers.Conv2D`. **kwargs: keyword args for `keras.layers.Conv2D`. Returns: A sequential model of ConcatenateFrequencyMap and Conv2D. References: Koutini, K., Eghbal-zadeh, H., & Widmer, G. (2019). `Receptive-Field-Regularized CNN Variants for Acoustic Scene Classification <https://arxiv.org/abs/1909.02859>`_. In Proceedings of the Detection and Classification of Acoustic Scenes and Events 2019 Workshop (DCASE2019). """ if ('groups' in kwargs and kwargs.get('groups') > 1) or (len(args) >= 7 and args[7] > 1): raise ValueError( 'Group convolution is not supported with frequency_aware layer because only the last group' 'would be frequency-aware, which might not be expected.' ) freq_map_concat_layer = ConcatenateFrequencyMap(data_format=data_format) if data_format != _CH_DEFAULT_STR: kwargs['data_format'] = data_format conv2d = keras.layers.Conv2D(*args, **kwargs) return Sequential([freq_map_concat_layer, conv2d], name=freq_aware_name)
Returns a frequency-aware conv2d layer. Args: data_format (str): specifies the data format of batch input/output. freq_aware_name (str): name of the returned layer *args: position args for `keras.layers.Conv2D`. **kwargs: keyword args for `keras.layers.Conv2D`. Returns: A sequential model of ConcatenateFrequencyMap and Conv2D. References: Koutini, K., Eghbal-zadeh, H., & Widmer, G. (2019). `Receptive-Field-Regularized CNN Variants for Acoustic Scene Classification <https://arxiv.org/abs/1909.02859>`_. In Proceedings of the Detection and Classification of Acoustic Scenes and Events 2019 Workshop (DCASE2019).
get_frequency_aware_conv2d
python
keunwoochoi/kapre
kapre/composed.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/composed.py
MIT
def call(self, x): """ Args: x (`Tensor`): batch audio signal in the specified 1D format in initiation. Returns: (`Tensor`): A framed tensor. The shape is (batch, time (frames), frame_length, channel) if `channels_last`, or (batch, channel, time (frames), frame_length) if `channels_first`. """ return tf.signal.frame( x, frame_length=self.frame_length, frame_step=self.hop_length, pad_end=self.pad_end, pad_value=self.pad_value, axis=self.time_axis, )
Args: x (`Tensor`): batch audio signal in the specified 1D format in initiation. Returns: (`Tensor`): A framed tensor. The shape is (batch, time (frames), frame_length, channel) if `channels_last`, or (batch, channel, time (frames), frame_length) if `channels_first`.
call
python
keunwoochoi/kapre
kapre/signal.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/signal.py
MIT
def call(self, x): """ Args: x (`Tensor`): batch audio signal in the specified 1D format in initiation. Returns: (`Tensor`): A framed tensor. The shape is (batch, time (frames), channel) if `channels_last`, or (batch, channel, time (frames)) if `channels_first`. """ frames = tf.signal.frame( x, frame_length=self.frame_length, frame_step=self.hop_length, pad_end=self.pad_end, pad_value=self.pad_value, axis=self.time_axis, ) frames = tf.math.square(frames) # batch, ndim=4 frame_axis = 2 if self.data_format == _CH_LAST_STR else 3 energies = tf.math.reduce_sum( frames, axis=frame_axis ) # batch, ndim=3. (b, t, ch) or (b, ch, t) # normalize it to self.ref_duration nor_coeff = self.ref_duration / (self.frame_length / self.sample_rate) return nor_coeff * energies
Args: x (`Tensor`): batch audio signal in the specified 1D format in initiation. Returns: (`Tensor`): A framed tensor. The shape is (batch, time (frames), channel) if `channels_last`, or (batch, channel, time (frames)) if `channels_first`.
call
python
keunwoochoi/kapre
kapre/signal.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/signal.py
MIT
def call(self, log_melgrams): """ Args: log_melgrams (float `Tensor`): a batch of log_melgrams. `(b, time, mel, ch)` if `channels_last` and `(b, ch, time, mel)` if `channels_first`. Returns: (float `Tensor`): MFCCs. `(batch, time, n_mfccs, ch)` if `channels_last`, `(batch, ch, time, n_mfccs)` if `channels_first`. """ if self.permutation is not None: # reshape so that last channel == mel log_melgrams = K.permute_dimensions(log_melgrams, pattern=self.permutation) mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_melgrams) mfccs = mfccs[..., : self.n_mfccs] if self.permutation is not None: mfccs = K.permute_dimensions(mfccs, pattern=self.permutation) return mfccs
Args: log_melgrams (float `Tensor`): a batch of log_melgrams. `(b, time, mel, ch)` if `channels_last` and `(b, ch, time, mel)` if `channels_first`. Returns: (float `Tensor`): MFCCs. `(batch, time, n_mfccs, ch)` if `channels_last`, `(batch, ch, time, n_mfccs)` if `channels_first`.
call
python
keunwoochoi/kapre
kapre/signal.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/signal.py
MIT
def _rdft(signal, dft_length): """DFT for real signals. Calculates the onesided dft, assuming real signal implies complex conjugate symetry, hence only onesided DFT is returned. Args: signal (tensor) signal to transform, assumes that the last dimension is the time dimension signal can be framed, e.g. (1, 40, 1024) for a single batch of 40 frames of length 1024 dft_length (int) - DFT length Returns: spec_real (float32 tensor) - real part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft spec_imag (float32 tensor) - imag part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft """ # calculate the positive frequency atoms, and tell tensorflow this is a constant. rdft_mat = _rdft_matrix(dft_length) # tflite doest support complex types so split into real and imaginary: rdft_mat_real = tf.constant(np.real(rdft_mat)) rdft_mat_imag = tf.constant(np.imag(rdft_mat)) frame_length = tf.shape(signal)[-1] # Right-padding, in case the frame length and DFT lenght are different, # pad the signal on the right hand side of the frame pad_values = tf.concat( [tf.zeros([tf.rank(signal) - 1, 2], tf.int32), [[0, dft_length - frame_length]]], axis=0 ) signal_padded = tf.pad(signal, pad_values) # matrix multiplying real and imag separately is faster than using complex types. spec_real = tf.matmul(signal_padded, rdft_mat_real) spec_imag = tf.matmul(signal_padded, rdft_mat_imag) spectrogram = tf.stack([spec_real, spec_imag], axis=-1) return spectrogram
DFT for real signals. Calculates the onesided dft, assuming real signal implies complex conjugate symetry, hence only onesided DFT is returned. Args: signal (tensor) signal to transform, assumes that the last dimension is the time dimension signal can be framed, e.g. (1, 40, 1024) for a single batch of 40 frames of length 1024 dft_length (int) - DFT length Returns: spec_real (float32 tensor) - real part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft spec_imag (float32 tensor) - imag part of spectrogram, e.g. (1, 40, 513) for a 1024 length dft
_rdft
python
keunwoochoi/kapre
kapre/tflite_compatible_stft.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py
MIT
def fixed_frame(signal, frame_length, frame_step): """tflite-compatible tf.signal.frame for fixed-size input. Args: signal: Tensor containing signal(s). frame_length: Number of samples to put in each frame. frame_step: Sample advance between successive frames. Returns: A new tensor where the last axis (or first, if first_axis) of input signal has been replaced by a (num_frames, frame_length) array of individual frames where each frame is drawn frame_step samples after the previous one. Raises: ValueError: if signal has an undefined axis length. This routine only supports framing of signals whose shape is fixed at graph-build time. """ signal_shape = list(signal.shape) length_samples = signal_shape[-1] if length_samples <= 0: raise ValueError("fixed framing requires predefined constant signal length") # the number of whole frames num_frames = max(0, 1 + (length_samples - frame_length) // frame_step) # define the output_shape, if we receive a None dimension, replace with 1 outer_dimensions = [dim if dim else 1 for dim in signal_shape[:-1]] # outer_dimensions = signal_shape[:-1] output_shape = outer_dimensions + [num_frames, frame_length] # Currently tflite's gather only supports axis==0, but that may still # work if we want the last of 1 axes. gather_axis = len(outer_dimensions) # subframe length is the largest int that as a common divisor of the frame # length and hop length. We will slice the signal up into these subframes # in order to then construct the frames. subframe_length = math.gcd(frame_length, frame_step) subframes_per_frame = frame_length // subframe_length subframes_per_hop = frame_step // subframe_length num_subframes = length_samples // subframe_length # define the subframe shape and the trimmed audio length, removeing any unused # excess audio, so subframe fit exactly. subframe_shape = outer_dimensions + [num_subframes, subframe_length] trimmed_input_size = outer_dimensions + [num_subframes * subframe_length] # slice up the audio into subframes subframes = tf.reshape( tf.slice(signal, begin=np.zeros(len(signal_shape), np.int32), size=trimmed_input_size), subframe_shape, ) # frame_selector is a [num_frames, subframes_per_frame] tensor # that indexes into the appropriate frame in subframes. For example: # [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]] frame_selector = np.reshape(np.arange(num_frames) * subframes_per_hop, [num_frames, 1]) # subframe_selector is a [num_frames, subframes_per_frame] tensor # that indexes into the appropriate subframe within a frame. For example: # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]] subframe_selector = np.reshape(np.arange(subframes_per_frame), [1, subframes_per_frame]) # Adding the 2 selector tensors together produces a [num_frames, # subframes_per_frame] tensor of indices to use with tf.gather to select # subframes from subframes. We then reshape the inner-most subframes_per_frame # dimension to stitch the subframes together into frames. For example: # [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]]. selector = frame_selector + subframe_selector frames = tf.reshape( tf.gather(subframes, selector.astype(np.int32), axis=gather_axis), output_shape ) return frames
tflite-compatible tf.signal.frame for fixed-size input. Args: signal: Tensor containing signal(s). frame_length: Number of samples to put in each frame. frame_step: Sample advance between successive frames. Returns: A new tensor where the last axis (or first, if first_axis) of input signal has been replaced by a (num_frames, frame_length) array of individual frames where each frame is drawn frame_step samples after the previous one. Raises: ValueError: if signal has an undefined axis length. This routine only supports framing of signals whose shape is fixed at graph-build time.
fixed_frame
python
keunwoochoi/kapre
kapre/tflite_compatible_stft.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py
MIT
def stft_tflite(signal, frame_length, frame_step, fft_length, window_fn, pad_end): """tflite-compatible implementation of tf.signal.stft. Compute the short-time Fourier transform of a 1D input while avoiding tf ops that are not currently supported in tflite (Rfft, Range, SplitV). fft_length must be fixed. A Hann window is of frame_length is always applied. Since fixed (precomputed) framing must be used, signal.shape[-1] must be a specific value (so "?"/None is not supported). Args: signal: 1D tensor containing the time-domain waveform to be transformed. frame_length: int, the number of points in each Fourier frame. frame_step: int, the number of samples to advance between successive frames. fft_length: int, the size of the Fourier transform to apply. window_fn: tf.signal.window, the return of backend.get_window_fn(window_name) pad_end: bool, if true pads the end with zeros so that signal contains an integer number of frames Returns: spectrogram: Two (num_frames, fft_length) tensors containing the real and imaginary parts of the short-time Fourier transform of the input signal. """ signal = tf.cast(signal, tf.float32) if pad_end: # the number of whole frames # (NOTE: kenders2000), padding is pre-calculated and thus fixed in graph length_samples = signal.shape[-1] num_steps_round_up = int(np.ceil(length_samples / frame_step)) pad_amount = (num_steps_round_up * frame_step + frame_length - frame_step) - length_samples signal = tf.pad(signal, tf.constant([[0, 0], [0, 0], [0, pad_amount]])) # Make the window be shape (1, frame_length) instead of just frame_length # in an effort to help the tflite broadcast logic. window = tf.reshape(window_fn(frame_length), [1, frame_length]) framed_signal = fixed_frame(signal, frame_length, frame_step) framed_signal *= window spectrogram = _rdft(framed_signal, fft_length) return spectrogram
tflite-compatible implementation of tf.signal.stft. Compute the short-time Fourier transform of a 1D input while avoiding tf ops that are not currently supported in tflite (Rfft, Range, SplitV). fft_length must be fixed. A Hann window is of frame_length is always applied. Since fixed (precomputed) framing must be used, signal.shape[-1] must be a specific value (so "?"/None is not supported). Args: signal: 1D tensor containing the time-domain waveform to be transformed. frame_length: int, the number of points in each Fourier frame. frame_step: int, the number of samples to advance between successive frames. fft_length: int, the size of the Fourier transform to apply. window_fn: tf.signal.window, the return of backend.get_window_fn(window_name) pad_end: bool, if true pads the end with zeros so that signal contains an integer number of frames Returns: spectrogram: Two (num_frames, fft_length) tensors containing the real and imaginary parts of the short-time Fourier transform of the input signal.
stft_tflite
python
keunwoochoi/kapre
kapre/tflite_compatible_stft.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py
MIT
def continued_fraction_arctan(x, n=100, dtype=tf.float32): """Continued fraction Approximation to the arctan function Approximate solution to arctan(x), atan is not a natively supported tflite op (or a flex op). n is the number of iterations, the high the more accurate. Accuracy is poor when the argument is large. https://functions.wolfram.com/ElementaryFunctions/ArcTan/10/ Args: x (tensor) - argument tensor to calculate arctan of n (int) - The number of iterations, large means arctan is more accurate dtype (tf.dtype) - tf.float32, or tf.float64 Returns arctan(x) (tensor) - approx value of arctan(x) """ x = tf.cast(x, dtype) x2 = x * x d = tf.zeros(tf.shape(x), dtype) + tf.cast(n * 2 + 1, dtype) for k in tf.range(n, 0.0, -1.0, dtype): f = k * 2.0 - 1.0 d = f + k * k * x2 / d return x / d
Continued fraction Approximation to the arctan function Approximate solution to arctan(x), atan is not a natively supported tflite op (or a flex op). n is the number of iterations, the high the more accurate. Accuracy is poor when the argument is large. https://functions.wolfram.com/ElementaryFunctions/ArcTan/10/ Args: x (tensor) - argument tensor to calculate arctan of n (int) - The number of iterations, large means arctan is more accurate dtype (tf.dtype) - tf.float32, or tf.float64 Returns arctan(x) (tensor) - approx value of arctan(x)
continued_fraction_arctan
python
keunwoochoi/kapre
kapre/tflite_compatible_stft.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py
MIT
def atan2_tflite(y, x, n=100, dtype=tf.float32): """Approximation to the atan2 function atan is not a tflite supported op or flex op, thus this uses an Approximation Poor accuracy when either x is very small or y is very large. https://en.wikipedia.org/wiki/Atan2 Args: y (tensor) - vertical component of tangent (or imaginary part of number for phase) x (tensor) - horizontal component of tangent (or real part of number for phase) n (int) - The number of iterations to use for atan approximations, larger means arctan is more accurate dtype (tf.dtype) - tf.float32, or tf.float64 Returns atan2(x) (tensor) - approx value of atan2(x) """ pi = tf.zeros(tf.shape(x), dtype) + tf.cast(np.pi, dtype) zeros = tf.zeros(tf.shape(x), dtype) atan2 = continued_fraction_arctan(y / x, n, dtype) atan2 = tf.where(x > 0, atan2, atan2) # implicit atan2 = tf.where(tf.logical_and(x < 0.0, y >= 0.0), atan2 + pi, atan2) atan2 = tf.where(tf.logical_and(x < 0.0, y < 0.0), atan2 - pi, atan2) atan2 = tf.where(tf.logical_and(tf.equal(x, 0.0), y > 0.0), pi, atan2) atan2 = tf.where(tf.logical_and(tf.equal(x, 0.0), y < 0.0), -pi, atan2) # undefined (return 0) atan2 = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)), zeros, atan2) return atan2
Approximation to the atan2 function atan is not a tflite supported op or flex op, thus this uses an Approximation Poor accuracy when either x is very small or y is very large. https://en.wikipedia.org/wiki/Atan2 Args: y (tensor) - vertical component of tangent (or imaginary part of number for phase) x (tensor) - horizontal component of tangent (or real part of number for phase) n (int) - The number of iterations to use for atan approximations, larger means arctan is more accurate dtype (tf.dtype) - tf.float32, or tf.float64 Returns atan2(x) (tensor) - approx value of atan2(x)
atan2_tflite
python
keunwoochoi/kapre
kapre/tflite_compatible_stft.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/tflite_compatible_stft.py
MIT
def _shape_spectrum_output(spectrums, data_format): """Shape batch spectrograms into the right format. Args: spectrums (`Tensor`): result of tf.signal.stft or similar, i.e., (..., time, freq). data_format (`str`): 'channels_first' or 'channels_last' Returns: spectrums (`Tensor`): a transposed version of input `spectrums` """ if data_format == _CH_FIRST_STR: pass # probably it's already (batch, channel, time, freq) else: spectrums = tf.transpose(spectrums, perm=(0, 2, 3, 1)) # (batch, time, freq, channel) return spectrums
Shape batch spectrograms into the right format. Args: spectrums (`Tensor`): result of tf.signal.stft or similar, i.e., (..., time, freq). data_format (`str`): 'channels_first' or 'channels_last' Returns: spectrums (`Tensor`): a transposed version of input `spectrums`
_shape_spectrum_output
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first. Args: x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format Return: (complex `Tensor`): A STFT representation of x in a 2D batch shape. `complex64` if `x` is `float32`, `complex128` if `x` is `float64`. Its shape is (batch, time, freq, ch) or (batch. ch, time, freq) depending on `output_data_format` and `time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )` if `pad_end` is `True`. `freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT). """ waveforms = x # (batch, ch, time) if input_data_format == 'channels_first'. # (batch, time, ch) if input_data_format == 'channels_last'. # this is needed because tf.signal.stft lives in channels_first land. if self.input_data_format == _CH_LAST_STR: waveforms = tf.transpose( waveforms, perm=(0, 2, 1) ) # always (batch, ch, time) from here if self.pad_begin: waveforms = tf.pad( waveforms, tf.constant([[0, 0], [0, 0], [int(self.n_fft - self.hop_length), 0]]) ) stfts = tf.signal.stft( signals=waveforms, frame_length=self.win_length, frame_step=self.hop_length, fft_length=self.n_fft, window_fn=self.window_fn, pad_end=self.pad_end, name='%s_tf.signal.stft' % self.name, ) # (batch, ch, time, freq) if self.output_data_format == _CH_LAST_STR: stfts = tf.transpose(stfts, perm=(0, 2, 3, 1)) # (batch, t, f, ch) return stfts
Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first. Args: x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format Return: (complex `Tensor`): A STFT representation of x in a 2D batch shape. `complex64` if `x` is `float32`, `complex128` if `x` is `float64`. Its shape is (batch, time, freq, ch) or (batch. ch, time, freq) depending on `output_data_format` and `time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )` if `pad_end` is `True`. `freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT).
call
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Compute inverse STFT of the input STFT. Args: x (complex `Tensor`): batch of STFTs, (batch, ch, time, freq) or (batch, time, freq, ch) depending on `input_data_format` Return: (`float`): audio signals of x. Shape: 1D batch shape. I.e., (batch, time, ch) or (batch, ch, time) depending on `output_data_format` """ stfts = x # (batch, ch, time, freq) if input_data_format == 'channels_first'. # (batch, time, freq, ch) if input_data_format == 'channels_last'. # this is needed because tf.signal.stft lives in channels_first land. if self.input_data_format == _CH_LAST_STR: stfts = tf.transpose(stfts, perm=(0, 3, 1, 2)) # now always (b, ch, t, f) waveforms = tf.signal.inverse_stft( stfts=stfts, frame_length=self.win_length, frame_step=self.hop_length, fft_length=self.n_fft, window_fn=self.window_fn, name='%s_tf.signal.istft' % self.name, ) # (batch, ch, time) if self.output_data_format == _CH_LAST_STR: waveforms = tf.transpose(waveforms, perm=(0, 2, 1)) # (batch, time, ch) return waveforms
Compute inverse STFT of the input STFT. Args: x (complex `Tensor`): batch of STFTs, (batch, ch, time, freq) or (batch, time, freq, ch) depending on `input_data_format` Return: (`float`): audio signals of x. Shape: 1D batch shape. I.e., (batch, time, ch) or (batch, ch, time) depending on `output_data_format`
call
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Args: x (complex `Tensor`): input complex tensor Returns: (float `Tensor`): phase of `x` (Radian) """ if self.approx_atan_accuracy: return atan2_tflite(tf.math.imag(x), tf.math.real(x), n=self.approx_atan_accuracy) return tf.math.angle(x)
Args: x (complex `Tensor`): input complex tensor Returns: (float `Tensor`): phase of `x` (Radian)
call
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Args: x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT. Returns: (`Tensor`): decibel-scaled float tensor of `x`. """ return backend.magnitude_to_decibel( x, ref_value=self.ref_value, amin=self.amin, dynamic_range=self.dynamic_range )
Args: x (`Tensor`): float tensor. Can be batch or not. Something like magnitude of STFT. Returns: (`Tensor`): decibel-scaled float tensor of `x`.
call
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Apply filterbank to `x`. Args: x (`Tensor`): float tensor in 2D batch shape. """ # x: 2d batch input. (b, t, fr, ch) or (b, ch, t, fr) output = tf.tensordot(x, self.filterbank, axes=(self.freq_axis, 0)) # ch_last -> (b, t, ch, new_fr). ch_first -> (b, ch, t, new_fr) if self.data_format == _CH_LAST_STR: output = tf.transpose(output, (0, 1, 3, 2)) return output
Apply filterbank to `x`. Args: x (`Tensor`): float tensor in 2D batch shape.
call
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Args: x (`Tensor`): a 2d batch (b, t, f, ch) or (b, ch, t, f) Returns: (`Tensor`): A tensor with the same shape as input data. """ if self.data_format == 'channels_first': x = K.permute_dimensions(x, (0, 2, 3, 1)) x = tf.pad( x, tf.constant([[0, 0], [self.n, self.n], [0, 0], [0, 0]]), mode=self.mode ) # pad over time kernel = K.arange(-self.n, self.n + 1, 1, dtype=K.floatx()) kernel = K.reshape(kernel, (-1, 1, 1, 1)) # time, freq, in_ch, out_ch x = K.conv2d(x, kernel, data_format=_CH_LAST_STR) / self.denom if self.data_format == _CH_FIRST_STR: x = K.permute_dimensions(x, (0, 3, 1, 2)) return x
Args: x (`Tensor`): a 2d batch (b, t, f, ch) or (b, ch, t, f) Returns: (`Tensor`): A tensor with the same shape as input data.
call
python
keunwoochoi/kapre
kapre/time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency.py
MIT
def call(self, x): """ Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first. Args: x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format Return: (real `Tensor`): A STFT representation of x in a 2D batch shape. The last dimension is size two and contains the real and imaginary parts of the stft. Its shape is (batch, time, freq, ch, 2) or (batch. ch, time, freq, 2) depending on `output_data_format` and `time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )` if `pad_end` is `True`. `freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT). """ waveforms = x # (batch, ch, time) if input_data_format == 'channels_first'. # (batch, time, ch) if input_data_format == 'channels_last'. # this is needed because tf.signal.stft lives in channels_first land. if self.input_data_format == _CH_LAST_STR: waveforms = tf.transpose( waveforms, perm=(0, 2, 1) ) # always (batch, ch, time) from here if self.pad_begin: waveforms = tf.pad( waveforms, tf.constant([[0, 0], [0, 0], [int(self.n_fft - self.hop_length), 0]]) ) stfts = stft_tflite( waveforms, frame_length=self.win_length, frame_step=self.hop_length, fft_length=self.n_fft, window_fn=self.window_fn, pad_end=self.pad_end, ) # (batch, ch, time, freq, re/imag) if self.output_data_format == _CH_LAST_STR: # tflite compatible stft produces real and imag in 1st dim stfts = tf.transpose(stfts, perm=(0, 2, 3, 1, 4)) # (batch, t, f, ch, re/im) return stfts
Compute STFT of the input signal. If the `time` axis is not the last axis of `x`, it should be transposed first. Args: x (float `Tensor`): batch of audio signals, (batch, ch, time) or (batch, time, ch) based on input_data_format Return: (real `Tensor`): A STFT representation of x in a 2D batch shape. The last dimension is size two and contains the real and imaginary parts of the stft. Its shape is (batch, time, freq, ch, 2) or (batch. ch, time, freq, 2) depending on `output_data_format` and `time` is the number of frames, which is `((len_src + (win_length - hop_length) / hop_length) // win_length )` if `pad_end` is `True`. `freq` is the number of fft unique bins, which is `n_fft // 2 + 1` (the unique components of the FFT).
call
python
keunwoochoi/kapre
kapre/time_frequency_tflite.py
https://github.com/keunwoochoi/kapre/blob/master/kapre/time_frequency_tflite.py
MIT
def test_spec_augment_apply_masks_to_axis(inputs): """ Tests the method _apply_masks_to_axis to see if shape is kept and exceptions are caught """ data_format, axis, mask_param, n_masks = inputs batch_src, input_shape = get_spectrogram(data_format) spec_augment = SpecAugment( input_shape=input_shape, freq_mask_param=5, time_mask_param=10, n_freq_masks=4, n_time_masks=3, mask_value=0.0, data_format=data_format, ) # We force axis that will trigger NotImplementedError if axis not in [0, 1, 2]: # Check axis error with pytest.raises(NotImplementedError): # We use batch_src instead of batch_src[0] to simulate a 4D spectrogram inputs = (batch_src, axis, mask_param, n_masks) spec_augment._apply_masks_to_axis(*inputs) # We force mask_params that will trigger the ValueError. If it is not triggered, then # inputs are ok, so we must only test if the shapes are kept during transformation elif mask_param != 5: # Check mask_param error with pytest.raises(ValueError): inputs = (batch_src[0], axis, mask_param, n_masks) spec_augment._apply_masks_to_axis(*inputs) else: # Check that transformation keeps shape inputs = (batch_src[0], axis, mask_param, n_masks) mask = spec_augment._apply_masks_to_axis(*inputs) np.testing.assert_equal(mask.shape[axis], input_shape[axis])
Tests the method _apply_masks_to_axis to see if shape is kept and exceptions are caught
test_spec_augment_apply_masks_to_axis
python
keunwoochoi/kapre
tests/test_augmentation.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_augmentation.py
MIT
def test_spec_augment_depth_exception(): """ Checks that SpecAugments fails if Spectrogram has depth greater than 1. """ data_format = "default" with pytest.raises(RuntimeError): batch_src, input_shape = get_spectrogram(data_format=data_format, n_ch=4) model = tf.keras.Sequential() spec_augment = SpecAugment( input_shape=input_shape, freq_mask_param=5, time_mask_param=10, data_format=data_format ) model.add(spec_augment) _ = model(batch_src, training=True)[0]
Checks that SpecAugments fails if Spectrogram has depth greater than 1.
test_spec_augment_depth_exception
python
keunwoochoi/kapre
tests/test_augmentation.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_augmentation.py
MIT
def test_spec_augment_layer(data_format, atol=1e-4): """ Tests the complete layer, checking if the parameter `training` has the expected behaviour. """ batch_src, input_shape = get_spectrogram(data_format) model = tf.keras.Sequential() spec_augment = SpecAugment( input_shape=input_shape, freq_mask_param=5, time_mask_param=10, n_freq_masks=4, n_time_masks=3, mask_value=0.0, data_format=data_format, ) model.add(spec_augment) # Fist, enforce training to True and check the shapes spec_augmented = model(batch_src, training=True) np.testing.assert_equal(model.layers[0].output_shape[1:], spec_augmented[0].shape) # Second, check that it doesn't change anything in default spec_augmented = model(batch_src) np.testing.assert_allclose(spec_augmented, batch_src, atol)
Tests the complete layer, checking if the parameter `training` has the expected behaviour.
test_spec_augment_layer
python
keunwoochoi/kapre
tests/test_augmentation.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_augmentation.py
MIT
def test_filterbank_log(sample_rate, n_freq, n_bins, bins_per_octave, f_min, spread): """It only tests if the function is a valid wrapper""" log_fb = KPB.filterbank_log( sample_rate=sample_rate, n_freq=n_freq, n_bins=n_bins, bins_per_octave=bins_per_octave, f_min=f_min, spread=spread, ) assert log_fb.dtype == K.floatx() assert log_fb.shape == (n_freq, n_bins)
It only tests if the function is a valid wrapper
test_filterbank_log
python
keunwoochoi/kapre
tests/test_backend.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_backend.py
MIT
def allclose_phase(a, b, atol=1e-3): """Testing phase. Remember that a small error in complex value may lead to a large phase difference if the norm is very small. Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase. """ np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol) np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol)
Testing phase. Remember that a small error in complex value may lead to a large phase difference if the norm is very small. Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
allclose_phase
python
keunwoochoi/kapre
tests/test_time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py
MIT
def assert_approx_phase(a, b, atol=1e-2, acceptable_fail_ratio=0.01): """Testing approximate phase. Tflite phase is approximate, some values will always have a large error So makes more sense to count the number that are within tolerance """ count_failed = np.sum(np.abs(a - b) > atol) assert ( count_failed / a.size < acceptable_fail_ratio ), "too many inaccuracte phase bins: {} bins out of {} incorrect".format(count_failed, a.size)
Testing approximate phase. Tflite phase is approximate, some values will always have a large error So makes more sense to count the number that are within tolerance
assert_approx_phase
python
keunwoochoi/kapre
tests/test_time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py
MIT
def test_melspectrogram_correctness( n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max ): """Test the correctness of melspectrogram. Note that mel filterbank is tested separated """ def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None): # compute with kapre melgram_model = get_melspectrogram_layer( n_fft=n_fft, sample_rate=sr, n_mels=n_mels, mel_f_min=mel_f_min, mel_f_max=mel_f_max, win_length=win_length, hop_length=hop_length, input_data_format=data_format, output_data_format=data_format, return_decibel=return_decibel, input_shape=input_shape, db_amin=amin, db_dynamic_range=dynamic_range, ) return melgram_model src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch) win_length = n_fft # test with x2 # compute with librosa S_ref = librosa.feature.melspectrogram( src_mono, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, power=1.0, n_mels=n_mels, fmin=mel_f_min, fmax=mel_f_max, ).T S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1 S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch if data_format == 'channels_first': S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq # melgram melgram_model = _get_melgram_model( return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0 ) S = melgram_model.predict(batch_src)[0] # 3d representation np.testing.assert_allclose(S_ref, S, atol=1e-4) # log melgram melgram_model = _get_melgram_model( return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range ) S = melgram_model.predict(batch_src)[0] # 3d representation S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range) np.testing.assert_allclose( S_ref_db, S, rtol=3e-3 ) # decibel is evaluated with relative tolerance
Test the correctness of melspectrogram. Note that mel filterbank is tested separated
test_melspectrogram_correctness
python
keunwoochoi/kapre
tests/test_time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py
MIT
def test_log_spectrogram_runnable(data_format): """test if log spectrogram layer works well""" src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1) _ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True) _ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False)
test if log spectrogram layer works well
test_log_spectrogram_runnable
python
keunwoochoi/kapre
tests/test_time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py
MIT
def test_save_load(save_format): """test saving/loading of models that has stft, melspectorgrma, and log frequency.""" src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1) # test STFT save/load save_load_compare( STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers, save_format, STFT, ) # test ConcatenateFrequencyMap specs_batch = np.random.randn(2, 3, 5, 4).astype(np.float32) save_load_compare( ConcatenateFrequencyMap(input_shape=specs_batch.shape[1:]), specs_batch, np.testing.assert_allclose, save_format, ConcatenateFrequencyMap, ) if save_format == 'tf': # test melspectrogram save/load save_load_compare( get_melspectrogram_layer(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose, save_format, ) # test log frequency spectrogram save/load save_load_compare( get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose, save_format, ) # test stft_mag_phase save_load_compare( get_stft_mag_phase(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose, save_format, ) # test stft mag save_load_compare( get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose, save_format, )
test saving/loading of models that has stft, melspectorgrma, and log frequency.
test_save_load
python
keunwoochoi/kapre
tests/test_time_frequency.py
https://github.com/keunwoochoi/kapre/blob/master/tests/test_time_frequency.py
MIT
def save_load_compare( layer, input_batch, allclose_func, save_format, layer_class=None, training=None, atol=1e-4 ): """test a model with `layer` with the given `input_batch`. The model prediction result is compared using `allclose_func` which may depend on the data type of the model output (e.g., float or complex). """ model = tf.keras.models.Sequential() model.add(layer) result_ref = model(input_batch, training=training) os_temp_dir = tempfile.gettempdir() model_temp_dir = tempfile.TemporaryDirectory(dir=os_temp_dir) if save_format == 'tf': model_path = model_temp_dir.name elif save_format == 'h5': model_path = os.path.join(model_temp_dir.name, 'model.h5') else: raise ValueError model.save(filepath=model_path, save_format=save_format) # if save_format == 'h5': # import ipdb; ipdb.set_trace() if save_format == 'h5': new_model = tf.keras.models.load_model( model_path, custom_objects={layer.__class__.__name__: layer_class} ) else: new_model = tf.keras.models.load_model(model_path) result_new = new_model(input_batch) allclose_func(result_ref, result_new, atol) model_temp_dir.cleanup() return model
test a model with `layer` with the given `input_batch`. The model prediction result is compared using `allclose_func` which may depend on the data type of the model output (e.g., float or complex).
save_load_compare
python
keunwoochoi/kapre
tests/utils.py
https://github.com/keunwoochoi/kapre/blob/master/tests/utils.py
MIT
def predict_using_tflite(model, batch_src): """Convert a keras model to tflite and infer on batch_src Attempts to convert a keras model to a tflite model, load the tflite model, then infer on the data in batch_src Args: model (keras model) batch_src (numpy array) - audio to test model Returns: pred_tflite (numpy array) - array of predictions. """ ############################################################################ # TF lite conversion converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.SELECT_TF_OPS, tf.lite.OpsSet.TFLITE_BUILTINS, ] tflite_model = converter.convert() model_name = 'test_tflite' path = Path("/tmp/tflite_tests/") # make a temporary location if path.exists(): shutil.rmtree(path) os.makedirs(path) tflite_file = path / Path(model_name + ".tflite") open(tflite_file.as_posix(), "wb").write(tflite_model) ############################################################################ # Make sure we can load and infer on the TFLITE model interpreter = tf.lite.Interpreter(tflite_file.as_posix()) # infer on each input separately and collect the predictions pred_tflite = [] for x in batch_src: # set batch size for tflite interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # apply input tensors, expand first dimension to create batch dimension interpreter.set_tensor(input_details[0]["index"], np.expand_dims(x, 0)) # infer interpreter.invoke() tflite_results = interpreter.get_tensor(output_details[0]["index"]) pred_tflite.append(tflite_results) return np.concatenate(pred_tflite, axis=0)
Convert a keras model to tflite and infer on batch_src Attempts to convert a keras model to a tflite model, load the tflite model, then infer on the data in batch_src Args: model (keras model) batch_src (numpy array) - audio to test model Returns: pred_tflite (numpy array) - array of predictions.
predict_using_tflite
python
keunwoochoi/kapre
tests/utils.py
https://github.com/keunwoochoi/kapre/blob/master/tests/utils.py
MIT
def add(ctx, task, priority, tags, extra, category, labels): """Add a new task to the to-do list. Note: Control the output of this using the verbosity option. """ if ctx.obj["verbose"] >= 2: click.echo(f"Adding task: {task}") click.echo(f"Priority: {priority}") click.echo(f'Tags: {", ".join(tags)}') click.echo(f"Extra data: {extra}") elif ctx.obj["verbose"] >= 1: click.echo(f"Adding task: {task}") else: pass # Implement the task adding functionality here
Add a new task to the to-do list. Note: Control the output of this using the verbosity option.
add
python
Textualize/trogon
examples/demo.py
https://github.com/Textualize/trogon/blob/master/examples/demo.py
MIT
def remove(ctx, task_id): """Remove a task from the to-do list by its ID.""" if ctx.obj["verbose"] >= 1: click.echo(f"Removing task with ID: {task_id}") # Implement the task removal functionality here
Remove a task from the to-do list by its ID.
remove
python
Textualize/trogon
examples/demo.py
https://github.com/Textualize/trogon/blob/master/examples/demo.py
MIT
def list_tasks(ctx, all, completed): """List tasks from the to-do list.""" if ctx.obj["verbose"] >= 1: click.echo(f"Listing tasks:") # Implement the task listing functionality here
List tasks from the to-do list.
list_tasks
python
Textualize/trogon
examples/demo.py
https://github.com/Textualize/trogon/blob/master/examples/demo.py
MIT
def add(verbose, task, priority, tags, extra, category, labels): """Add a new task to the to-do list.""" if verbose >= 2: click.echo(f"Adding task: {task}") click.echo(f"Priority: {priority}") click.echo(f'Tags: {", ".join(tags)}') click.echo(f"Extra data: {extra}") click.echo(f"Category: {category}") click.echo(f'Labels: {", ".join(labels)}') elif verbose >= 1: click.echo(f"Adding task: {task}") else: pass # Implement the task adding functionality here
Add a new task to the to-do list.
add
python
Textualize/trogon
examples/nogroup_demo.py
https://github.com/Textualize/trogon/blob/master/examples/nogroup_demo.py
MIT
def detect_run_string(_main: ModuleType = sys.modules["__main__"]) -> str: """This is a slightly modified version of a function from Click.""" path = sys.argv[0] # The value of __package__ indicates how Python was called. It may # not exist if a setuptools script is installed as an egg. It may be # set incorrectly for entry points created with pip on Windows. if getattr(_main, "__package__", None) is None or ( os.name == "nt" and _main.__package__ == "" and not os.path.exists(path) and os.path.exists(f"{path}.exe") ): # Executed a file, like "python app.py". file_path = shlex.quote(os.path.basename(path)) argv = get_orig_argv() if argv[0] == "python": prefix = f"{argv[0]} " else: prefix = "" return f"{prefix}{file_path}" # Executed a module, like "python -m example". # Rewritten by Python from "-m script" to "/path/to/script.py". # Need to look at main module to determine how it was executed. py_module = _main.__package__ name = os.path.splitext(os.path.basename(path))[0] # A submodule like "example.cli". if name != "__main__": py_module = f"{py_module}.{name}" return f"python -m {py_module.lstrip('.')}"
This is a slightly modified version of a function from Click.
detect_run_string
python
Textualize/trogon
trogon/detect_run_string.py
https://github.com/Textualize/trogon/blob/master/trogon/detect_run_string.py
MIT
def introspect_click_app(app: BaseCommand) -> dict[CommandName, CommandSchema]: """ Introspect a Click application and build a data structure containing information about all commands, options, arguments, and subcommands, including the docstrings and command function references. This function recursively processes each command and its subcommands (if any), creating a nested dictionary that includes details about options, arguments, and subcommands, as well as the docstrings and command function references. Args: app (click.BaseCommand): The Click application's top-level group or command instance. Returns: Dict[str, CommandData]: A nested dictionary containing the Click application's structure. The structure is defined by the CommandData TypedDict and its related TypedDicts (OptionData and ArgumentData). """ def process_command( cmd_name: CommandName, cmd_obj: click.Command, parent=None ) -> CommandSchema: cmd_data = CommandSchema( name=cmd_name, docstring=cmd_obj.help, function=cmd_obj.callback, options=[], arguments=[], subcommands={}, parent=parent, is_group=isinstance(cmd_obj, click.Group), ) for param in cmd_obj.params: default = MultiValueParamData.process_cli_option(param.default) if isinstance(param, (click.Option, click.core.Group)): option_data = OptionSchema( name=param.opts, type=param.type, is_flag=param.is_flag, is_boolean_flag=param.is_bool_flag, flag_value=param.flag_value, counting=param.count, opts=param.opts, secondary_opts=param.secondary_opts, required=param.required, default=default, help=param.help, multiple=param.multiple, nargs=param.nargs, ) if isinstance(param.type, click.Choice): option_data.choices = param.type.choices cmd_data.options.append(option_data) elif isinstance(param, click.Argument): argument_data = ArgumentSchema( name=param.name, type=param.type, required=param.required, multiple=param.multiple, default=default, nargs=param.nargs, ) if isinstance(param.type, click.Choice): argument_data.choices = param.type.choices cmd_data.arguments.append(argument_data) if isinstance(cmd_obj, click.core.Group): for subcmd_name, subcmd_obj in cmd_obj.commands.items(): cmd_data.subcommands[CommandName(subcmd_name)] = process_command( CommandName(subcmd_name), subcmd_obj, parent=cmd_data ) return cmd_data data: dict[CommandName, CommandSchema] = {} # Special case for the root group if isinstance(app, click.Group): root_cmd_name = CommandName("root") data[root_cmd_name] = process_command(root_cmd_name, app) app = data[root_cmd_name] if isinstance(app, click.Group): for cmd_name, cmd_obj in app.commands.items(): data[CommandName(cmd_name)] = process_command( CommandName(cmd_name), cmd_obj ) elif isinstance(app, click.Command): cmd_name = CommandName(app.name) data[cmd_name] = process_command(cmd_name, app) return data
Introspect a Click application and build a data structure containing information about all commands, options, arguments, and subcommands, including the docstrings and command function references. This function recursively processes each command and its subcommands (if any), creating a nested dictionary that includes details about options, arguments, and subcommands, as well as the docstrings and command function references. Args: app (click.BaseCommand): The Click application's top-level group or command instance. Returns: Dict[str, CommandData]: A nested dictionary containing the Click application's structure. The structure is defined by the CommandData TypedDict and its related TypedDicts (OptionData and ArgumentData).
introspect_click_app
python
Textualize/trogon
trogon/introspect.py
https://github.com/Textualize/trogon/blob/master/trogon/introspect.py
MIT
def to_cli_args(self, include_root_command: bool = False) -> list[str]: """ Generates a list of strings representing the CLI invocation based on the user input data. Returns: A list of strings that can be passed to subprocess.run to execute the command. """ cli_args = self._to_cli_args() if not include_root_command: cli_args = cli_args[1:] return cli_args
Generates a list of strings representing the CLI invocation based on the user input data. Returns: A list of strings that can be passed to subprocess.run to execute the command.
to_cli_args
python
Textualize/trogon
trogon/run_command.py
https://github.com/Textualize/trogon/blob/master/trogon/run_command.py
MIT
def to_cli_string(self, include_root_command: bool = False) -> Text: """ Generates a string representing the CLI invocation as if typed directly into the command line. Returns: A string representing the command invocation. """ args = self.to_cli_args(include_root_command=include_root_command) text_renderables: list[Text] = [] for arg in args: text_renderables.append( Text(shlex.quote(str(arg))) if arg != ValueNotSupplied() else Text("???", style="bold black on red") ) return Text(" ").join(text_renderables)
Generates a string representing the CLI invocation as if typed directly into the command line. Returns: A string representing the command invocation.
to_cli_string
python
Textualize/trogon
trogon/run_command.py
https://github.com/Textualize/trogon/blob/master/trogon/run_command.py
MIT
async def selected_command_changed( self, event: Tree.NodeHighlighted[CommandSchema] ) -> None: """When we highlight a node in the CommandTree, the main body of the home page updates to display a form specific to the highlighted command.""" await self._refresh_command_form(event.node)
When we highlight a node in the CommandTree, the main body of the home page updates to display a form specific to the highlighted command.
selected_command_changed
python
Textualize/trogon
trogon/trogon.py
https://github.com/Textualize/trogon/blob/master/trogon/trogon.py
MIT
def _update_command_description(self, command: CommandSchema) -> None: """Update the description of the command at the bottom of the sidebar based on the currently selected node in the command tree.""" description_box = self.query_one("#home-command-description", Static) description_text = command.docstring or "" description_text = description_text.lstrip() description_text = f"[b]{command.name}[/]\n{description_text}" description_box.update(description_text)
Update the description of the command at the bottom of the sidebar based on the currently selected node in the command tree.
_update_command_description
python
Textualize/trogon
trogon/trogon.py
https://github.com/Textualize/trogon/blob/master/trogon/trogon.py
MIT
def _update_execution_string_preview(self) -> None: """Update the preview box showing the command string to be executed""" command_name_syntax_style = self.get_component_rich_style("command-name-syntax") prefix = Text(f"{self.click_app_name} ", command_name_syntax_style) new_value = self.command_data.to_cli_string(include_root_command=False) highlighted_new_value = Text.assemble(prefix, self.highlighter(new_value)) prompt_style = self.get_component_rich_style("prompt") preview_string = Text.assemble(("$ ", prompt_style), highlighted_new_value) self.query_one("#home-exec-preview-static", Static).update(preview_string)
Update the preview box showing the command string to be executed
_update_execution_string_preview
python
Textualize/trogon
trogon/trogon.py
https://github.com/Textualize/trogon/blob/master/trogon/trogon.py
MIT
def __init__(self, title: TextType, message: TextType) -> None: """Initialise the dialog. Args: title: The title for the dialog. message: The message to show. """ super().__init__() self._title = title self._message = message
Initialise the dialog. Args: title: The title for the dialog. message: The message to show.
__init__
python
Textualize/trogon
trogon/widgets/about.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/about.py
MIT
def compose(self) -> ComposeResult: """Compose the content of the modal dialog.""" with Vertical(): with Center(): yield Static(self._title, classes="spaced") yield Static(self._message, id="message", classes="spaced") with Center(classes="spaced"): yield Button("OK", variant=self.button_style)
Compose the content of the modal dialog.
compose
python
Textualize/trogon
trogon/widgets/about.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/about.py
MIT
def _form_changed(self) -> None: """Take the current state of the form and build a UserCommandData from it, then post a FormChanged message""" command_schema = self.command_schema path_from_root = command_schema.path_from_root # Sentinel root value to make constructing the tree a little easier. parent_command_data = UserCommandData( name=CommandName("_"), options=[], arguments=[] ) root_command_data = parent_command_data for command in path_from_root: option_datas = [] # For each of the options in the schema for this command, # lets grab the values the user has supplied for them in the form. for option in command.options: parameter_control = self.query_one(f"#{option.key}", ParameterControls) value = parameter_control.get_values() for v in value.values: assert isinstance(v, tuple) option_data = UserOptionData(option.name, v, option) option_datas.append(option_data) # Now do the same for the arguments argument_datas = [] for argument in command.arguments: form_control_widget = self.query_one( f"#{argument.key}", ParameterControls ) value = form_control_widget.get_values() # This should only ever loop once since arguments can be multi-value but not multiple=True. for v in value.values: assert isinstance(v, tuple) argument_data = UserArgumentData(argument.name, v, argument) argument_datas.append(argument_data) assert all(isinstance(option.value, tuple) for option in option_datas) assert all(isinstance(argument.value, tuple) for argument in argument_datas) command_data = UserCommandData( name=command.name, options=option_datas, arguments=argument_datas, parent=parent_command_data, command_schema=command, ) parent_command_data.subcommand = command_data parent_command_data = command_data # Trim the sentinel root_command_data = root_command_data.subcommand root_command_data.parent = None self.post_message(self.Changed(root_command_data))
Take the current state of the form and build a UserCommandData from it, then post a FormChanged message
_form_changed
python
Textualize/trogon
trogon/widgets/form.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/form.py
MIT
def apply_filter(self, filter_query: str) -> bool: """Show or hide this ParameterControls depending on whether it matches the filter query or not. Args: filter_query: The string to filter on. Returns: True if the filter matched (and the widget is visible). """ help_text = getattr(self.schema, "help", "") or "" if not filter_query: should_be_visible = True self.display = should_be_visible else: name = self.schema.name if isinstance(name, str): # Argument names are strings, there's only one name name_contains_query = filter_query in name.casefold() should_be_visible = name_contains_query else: # Option names are lists since they can have multiple names (e.g. -v and --verbose) name_contains_query = any( filter_query in name.casefold() for name in self.schema.name ) help_contains_query = filter_query in help_text.casefold() should_be_visible = name_contains_query or help_contains_query self.display = should_be_visible # Update the highlighting of the help text if help_text: try: help_label = self.query_one(".command-form-control-help-text", Static) new_help_text = Text(help_text) new_help_text.highlight_words( filter_query.split(), "black on yellow", case_sensitive=False ) help_label.update(new_help_text) except NoMatches: pass return should_be_visible
Show or hide this ParameterControls depending on whether it matches the filter query or not. Args: filter_query: The string to filter on. Returns: True if the filter matched (and the widget is visible).
apply_filter
python
Textualize/trogon
trogon/widgets/parameter_controls.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py
MIT
def compose(self) -> ComposeResult: """Takes the schemas for each parameter of the current command, and converts it into a form consisting of Textual widgets.""" schema = self.schema name = schema.name argument_type = schema.type default = schema.default help_text = getattr(schema, "help", "") or "" multiple = schema.multiple is_option = isinstance(schema, OptionSchema) nargs = schema.nargs label = self._make_command_form_control_label( name, argument_type, is_option, schema.required, multiple=multiple ) first_focus_control: Widget | None = ( None # The widget that will be focused when the form is focused. ) # If there are N defaults, we render the "group" N times. # Each group will contain `nargs` widgets. with ControlGroupsContainer(): if not argument_type == click.BOOL: yield Label(label, classes="command-form-label") if isinstance(argument_type, click.Choice) and multiple: # Display a MultipleChoice widget # There's a special case where we have a Choice with multiple=True, # in this case, we can just render a single MultipleChoice widget # instead of multiple radio-sets. control_method = self.get_control_method(argument_type) multiple_choice_widget = control_method( default=default, label=label, multiple=multiple, schema=schema, control_id=schema.key, ) yield from multiple_choice_widget else: # For other widgets, we'll render as normal... # If required, we'll generate widgets containing the defaults for default_value_tuple in default.values: widget_group = list(self.make_widget_group()) with ControlGroup() as control_group: if len(widget_group) == 1: control_group.add_class("single-item") # Parameter types can be of length 1, but there could still # be multiple defaults. We need to render a widget for each # of those defaults. Extend the widget group such that # there's a slot available for each default... for default_value, control_widget in zip( default_value_tuple, widget_group ): self._apply_default_value(control_widget, default_value) yield control_widget # Keep track of the first control we render, for easy focus if first_focus_control is None: first_focus_control = control_widget # We always need to display the original group of controls, # regardless of whether there are defaults if multiple or not default.values: widget_group = list(self.make_widget_group()) with ControlGroup() as control_group: if len(widget_group) == 1: control_group.add_class("single-item") # No need to apply defaults to this group for control_widget in widget_group: yield control_widget if first_focus_control is None: first_focus_control = control_widget # Take note of the first form control, so we can easily focus it if self.first_control is None: self.first_control = first_focus_control # If it's a multiple, and it's a Choice parameter, then we display # our special case MultiChoice widget, and so there's no need for this # button. if (multiple or nargs == -1) and not isinstance(argument_type, click.Choice): with Horizontal(classes="add-another-button-container"): yield Button("+ value", variant="success", classes="add-another-button") # Render the dim help text below the form controls if help_text: yield Static(help_text, classes="command-form-control-help-text")
Takes the schemas for each parameter of the current command, and converts it into a form consisting of Textual widgets.
compose
python
Textualize/trogon
trogon/widgets/parameter_controls.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py
MIT
def make_widget_group(self) -> Iterable[ControlWidgetType]: """For this option, yield a single set of widgets required to receive user input for it.""" schema = self.schema default = schema.default parameter_type = schema.type name = schema.name multiple = schema.multiple required = schema.required is_option = isinstance(schema, OptionSchema) label = self._make_command_form_control_label( name, parameter_type, is_option, required, multiple ) # Get the types of the parameter. We can map these types on to widgets that will be rendered. parameter_types = ( parameter_type.types if isinstance(parameter_type, click.Tuple) else [parameter_type] ) # For each of the these parameters, render the corresponding widget for it. # At this point we don't care about filling in the default values. for _type in parameter_types: control_method = self.get_control_method(_type) control_widgets = control_method( default, label, multiple, schema, schema.key ) yield from control_widgets
For this option, yield a single set of widgets required to receive user input for it.
make_widget_group
python
Textualize/trogon
trogon/widgets/parameter_controls.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py
MIT
def _apply_default_value( control_widget: ControlWidgetType, default_value: Any ) -> None: """Set the default value of a parameter-handling widget.""" if isinstance(control_widget, Input): control_widget.value = str(default_value) control_widget.placeholder = f"{default_value} (default)" elif isinstance(control_widget, Select): control_widget.value = str(default_value) control_widget.prompt = f"{default_value} (default)"
Set the default value of a parameter-handling widget.
_apply_default_value
python
Textualize/trogon
trogon/widgets/parameter_controls.py
https://github.com/Textualize/trogon/blob/master/trogon/widgets/parameter_controls.py
MIT
def actions(self, state): 'actions are index where we can make a move' actions = [] for index, char in enumerate(state): if char == '_': actions.append(index) return actions
actions are index where we can make a move
actions
python
simpleai-team/simpleai
samples/machine_learning/tic_tac_toe.py
https://github.com/simpleai-team/simpleai/blob/master/samples/machine_learning/tic_tac_toe.py
MIT
def find_location(rows, element_to_find): '''Find the location of a piece in the puzzle. Returns a tuple: row, column''' for ir, row in enumerate(rows): for ic, element in enumerate(row): if element == element_to_find: return ir, ic
Find the location of a piece in the puzzle. Returns a tuple: row, column
find_location
python
simpleai-team/simpleai
samples/search/eight_puzzle.py
https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py
MIT
def actions(self, state): '''Returns a list of the pieces we can move to the empty space.''' rows = string_to_list(state) row_e, col_e = find_location(rows, 'e') actions = [] if row_e > 0: actions.append(rows[row_e - 1][col_e]) if row_e < 2: actions.append(rows[row_e + 1][col_e]) if col_e > 0: actions.append(rows[row_e][col_e - 1]) if col_e < 2: actions.append(rows[row_e][col_e + 1]) return actions
Returns a list of the pieces we can move to the empty space.
actions
python
simpleai-team/simpleai
samples/search/eight_puzzle.py
https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py
MIT
def result(self, state, action): '''Return the resulting state after moving a piece to the empty space. (the "action" parameter contains the piece to move) ''' rows = string_to_list(state) row_e, col_e = find_location(rows, 'e') row_n, col_n = find_location(rows, action) rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e] return list_to_string(rows)
Return the resulting state after moving a piece to the empty space. (the "action" parameter contains the piece to move)
result
python
simpleai-team/simpleai
samples/search/eight_puzzle.py
https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py
MIT
def heuristic(self, state): '''Returns an *estimation* of the distance from a state to the goal. We are using the manhattan distance. ''' rows = string_to_list(state) distance = 0 for number in '12345678e': row_n, col_n = find_location(rows, number) row_n_goal, col_n_goal = goal_positions[number] distance += abs(row_n - row_n_goal) + abs(col_n - col_n_goal) return distance
Returns an *estimation* of the distance from a state to the goal. We are using the manhattan distance.
heuristic
python
simpleai-team/simpleai
samples/search/eight_puzzle.py
https://github.com/simpleai-team/simpleai/blob/master/samples/search/eight_puzzle.py
MIT
def result(self, s, a): '''Result of applying an action to a state.''' # result: boat on opposite side, and numbers of missioners and # cannibals updated according to the move if s[2] == 0: return (s[0] - a[1][0], s[1] - a[1][1], 1) else: return (s[0] + a[1][0], s[1] + a[1][1], 0)
Result of applying an action to a state.
result
python
simpleai-team/simpleai
samples/search/missioners.py
https://github.com/simpleai-team/simpleai/blob/master/samples/search/missioners.py
MIT
def mkconstraints(): """ Make constraint list for binary constraint problem. """ constraints = [] for j in range(1, 10): vars = ["%s%d" % (i, j) for i in uppercase[:9]] constraints.extend((c, const_different) for c in combinations(vars, 2)) for i in uppercase[:9]: vars = ["%s%d" % (i, j) for j in range(1, 10)] constraints.extend((c, const_different) for c in combinations(vars, 2)) for b0 in ['ABC', 'DEF', 'GHI']: for b1 in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]: vars = ["%s%d" % (i, j) for i in b0 for j in b1] l = list((c, const_different) for c in combinations(vars, 2)) constraints.extend(l) return constraints
Make constraint list for binary constraint problem.
mkconstraints
python
simpleai-team/simpleai
samples/search/sudoku.py
https://github.com/simpleai-team/simpleai/blob/master/samples/search/sudoku.py
MIT
def step(self, viewer=None): "This method evolves one step in time" if not self.is_completed(self.state): for agent in self.agents: action = agent.program(self.percept(agent, self.state)) next_state = self.do_action(self.state, action, agent) if viewer: viewer.event(self.state, action, next_state, agent) self.state = next_state if self.is_completed(self.state): return
This method evolves one step in time
step
python
simpleai-team/simpleai
simpleai/environments.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/environments.py
MIT
def learn(self, examples, attributes, parent_examples): """ A decision tree learner that *strictly* follows the pseudocode given in AIMA. In 3rd edition, see Figure 18.5, page 702. """ if not examples: return self.plurality_value(parent_examples) elif len(set(map(self.target, examples))) == 1: return self.plurality_value(examples) elif not attributes: return self.plurality_value(examples) A = max(attributes, key=lambda a: self.importance(a, examples)) tree = DecisionTreeNode(attribute=A) for value in set(map(A, examples)): exs = [e for e in examples if A(e) == value] subtree = self.learn(exs, attributes - set([A]), examples) tree.add_branch(value, subtree) return tree
A decision tree learner that *strictly* follows the pseudocode given in AIMA. In 3rd edition, see Figure 18.5, page 702.
learn
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def importance(self, attribute, examples): """ AIMA implies that importance should be information gain. Since AIMA only defines it for binary features this implementation was based on the wikipedia article: http://en.wikipedia.org/wiki/Information_gain_in_decision_trees """ gain_counter = OnlineInformationGain(attribute, self.target) for example in examples: gain_counter.add(example) return gain_counter.get_gain()
AIMA implies that importance should be information gain. Since AIMA only defines it for binary features this implementation was based on the wikipedia article: http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
importance
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def save(self, filepath): """ Saves the classifier to `filepath`. Because this classifier needs to save the dataset, it must be something that can be pickled and not something like an iterator. """ if not filepath or not isinstance(filepath, str): raise ValueError("Invalid filepath") with open(filepath, "wb") as filehandler: pickle.dump(self, filehandler)
Saves the classifier to `filepath`. Because this classifier needs to save the dataset, it must be something that can be pickled and not something like an iterator.
save
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def tree_to_str(root): """ Returns a string representation of a decision tree with root node `root`. """ xs = [] for value, node, depth in iter_tree(root): template = "{indent}" if node is not root: template += "case={value}\t" if node.attribute is None: template += "result={result} -- P={prob:.2}" else: template += "split by {split}:\t" +\ "(partial result={result} -- P={prob:.2})" line = template.format(indent=" " * depth, value=value, result=node.result[0], prob=node.result[1], split=str(node.attribute)) xs.append(line) return "\n".join(xs)
Returns a string representation of a decision tree with root node `root`.
tree_to_str
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def take_branch(self, example): """ Returns a `DecisionTreeNode` instance that can better classify `example` based on the selectors value. If there are no more branches (ie, this node is a leaf) or the attribute gives a value for an unexistent branch then this method returns None. """ if self.attribute is None: return None value = self.attribute(example) return self.branches.get(value, None)
Returns a `DecisionTreeNode` instance that can better classify `example` based on the selectors value. If there are no more branches (ie, this node is a leaf) or the attribute gives a value for an unexistent branch then this method returns None.
take_branch
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def _max_gain_split(self, examples): """ Returns an OnlineInformationGain of the attribute with max gain based on `examples`. """ gains = self._new_set_of_gain_counters() for example in examples: for gain in gains: gain.add(example) winner = max(gains, key=lambda gain: gain.get_gain()) if not winner.get_target_class_counts(): raise ValueError("Dataset is empty") return winner
Returns an OnlineInformationGain of the attribute with max gain based on `examples`.
_max_gain_split
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def _new_set_of_gain_counters(self): """ Creates a new set of OnlineInformationGain objects for each attribute. """ return [OnlineInformationGain(attribute, self.target) for attribute in self.attributes]
Creates a new set of OnlineInformationGain objects for each attribute.
_new_set_of_gain_counters
python
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/classifiers.py
MIT
def precision(classifier, testset): """ Runs the classifier for each example in `testset` and verifies that the classification is correct using the `target`. Returns a number between 0.0 and 1.0 with the precision of classification for this test set. """ hit = 0 total = 0 for example in testset: if classifier.classify(example)[0] == classifier.target(example): hit += 1 total += 1 if total == 0: raise ValueError("Empty testset!") return hit / float(total)
Runs the classifier for each example in `testset` and verifies that the classification is correct using the `target`. Returns a number between 0.0 and 1.0 with the precision of classification for this test set.
precision
python
simpleai-team/simpleai
simpleai/machine_learning/evaluation.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/evaluation.py
MIT
def kfold(dataset, problem, method, k=10): """ Does a k-fold on `dataset` with `method`. This is, it randomly creates k-partitions of the dataset, and k-times trains the method with k-1 parts and runs it with the partition left. After all this, returns the overall success ratio. """ if k <= 1: raise ValueError("k argument must be at least 2") dataset = list(dataset) random.shuffle(dataset) trials = 0 positive = 0 for i in range(k): train = [x for j, x in enumerate(dataset) if j % k != i] test = [x for j, x in enumerate(dataset) if j % k == i] classifier = method(train, problem) for data in test: trials += 1 result = classifier.classify(data) if result is not None and result[0] == problem.target(data): positive += 1 return float(positive) / float(trials)
Does a k-fold on `dataset` with `method`. This is, it randomly creates k-partitions of the dataset, and k-times trains the method with k-1 parts and runs it with the partition left. After all this, returns the overall success ratio.
kfold
python
simpleai-team/simpleai
simpleai/machine_learning/evaluation.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/evaluation.py
MIT
def save(self, filepath): """ Pickles the tree and saves it into `filepath` """ if not filepath or not isinstance(filepath, str): raise ValueError("Invalid filepath") # Removes dataset so is not saved in the pickle self.dataset = None with open(filepath, "wb") as filehandler: pickle.dump(self, filehandler)
Pickles the tree and saves it into `filepath`
save
python
simpleai-team/simpleai
simpleai/machine_learning/models.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py
MIT
def load(cls, filepath): """ Loads a pickled version of the classifier saved in `filepath` """ with open(filepath, "rb") as filehandler: classifier = pickle.load(filehandler) if not isinstance(classifier, Classifier): raise ValueError("Pickled object is not a Classifier") return classifier
Loads a pickled version of the classifier saved in `filepath`
load
python
simpleai-team/simpleai
simpleai/machine_learning/models.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py
MIT
def __init__(self, dataset, target_index): """ `dataset` should be an iterable, *not* an iterator. `target_index` is the index in the vector where the classification of an example is defined. """ super(VectorDataClassificationProblem, self).__init__() try: example = next(iter(dataset)) except StopIteration: raise ValueError("Dataset is empty") self.target_index = target_index N = len(example) if self.target_index < 0: # Negative number allowed, counts in reverse self.target_index = N + self.target_index if self.target_index < 0 or N <= self.target_index: raise ValueError("Target index is out of range") for i in range(N): if i == self.target_index: continue attribute = VectorIndexAttribute(i, "data at index {}".format(i)) self.attributes.append(attribute)
`dataset` should be an iterable, *not* an iterator. `target_index` is the index in the vector where the classification of an example is defined.
__init__
python
simpleai-team/simpleai
simpleai/machine_learning/models.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py
MIT
def __init__(self, function=None, name=None, description=None): """ Creates an attribute with `function`. Adds a name and a description if it's specified. """ self.name = name self.function = function self.description = description
Creates an attribute with `function`. Adds a name and a description if it's specified.
__init__
python
simpleai-team/simpleai
simpleai/machine_learning/models.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py
MIT
def is_attribute(method, name=None): """ Decorator for methods that are attributes. """ if name is None: name = method.__name__ method.is_attribute = True method.name = name return method
Decorator for methods that are attributes.
is_attribute
python
simpleai-team/simpleai
simpleai/machine_learning/models.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/models.py
MIT
def boltzmann_exploration(actions, utilities, temperature, action_counter): '''returns an action with a probability depending on utilities and temperature''' utilities = [utilities[x] for x in actions] temperature = max(temperature, 0.01) _max = max(utilities) _min = min(utilities) if _max == _min: return random.choice(actions) utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities] probs = [u / sum(utilities) for u in utilities] i = 0 tot = probs[i] r = random.random() while i < len(actions) and r >= tot: i += 1 tot += probs[i] return actions[i]
returns an action with a probability depending on utilities and temperature
boltzmann_exploration
python
simpleai-team/simpleai
simpleai/machine_learning/reinforcement_learning.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/reinforcement_learning.py
MIT
def make_exponential_temperature(initial_temperature, alpha): '''returns a function like initial / exp(n * alpha)''' def _function(n): try: return initial_temperature / math.exp(n * alpha) except OverflowError: return 0.01 return _function
returns a function like initial / exp(n * alpha)
make_exponential_temperature
python
simpleai-team/simpleai
simpleai/machine_learning/reinforcement_learning.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/machine_learning/reinforcement_learning.py
MIT
def revise(domains, arc, constraints): """ Given the arc X, Y (variables), removes the values from X's domain that do not meet the constraint between X and Y. That is, given x1 in X's domain, x1 will be removed from the domain, if there is no value y in Y's domain that makes constraint(X,Y) True, for those constraints affecting X and Y. """ x, y = arc related_constraints = [(neighbors, constraint) for neighbors, constraint in constraints if set(arc) == set(neighbors)] modified = False for neighbors, constraint in related_constraints: for x_value in domains[x]: constraint_results = (_call_constraint({x: x_value, y: y_value}, neighbors, constraint) for y_value in domains[y]) if not any(constraint_results): domains[x].remove(x_value) modified = True return modified
Given the arc X, Y (variables), removes the values from X's domain that do not meet the constraint between X and Y. That is, given x1 in X's domain, x1 will be removed from the domain, if there is no value y in Y's domain that makes constraint(X,Y) True, for those constraints affecting X and Y.
revise
python
simpleai-team/simpleai
simpleai/search/arc.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/arc.py
MIT
def all_arcs(constraints): """ For each constraint ((X, Y), const) adds: ((X, Y), const) ((Y, X), const) """ arcs = set() for neighbors, constraint in constraints: if len(neighbors) == 2: x, y = neighbors list(map(arcs.add, ((x, y), (y, x)))) return arcs
For each constraint ((X, Y), const) adds: ((X, Y), const) ((Y, X), const)
all_arcs
python
simpleai-team/simpleai
simpleai/search/arc.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/arc.py
MIT
def arc_consistency_3(domains, constraints): """ Makes a CSP problem arc consistent. Ignores any constraint that is not binary. """ arcs = list(all_arcs(constraints)) pending_arcs = set(arcs) while pending_arcs: x, y = pending_arcs.pop() if revise(domains, (x, y), constraints): if len(domains[x]) == 0: return False pending_arcs = pending_arcs.union((x2, y2) for x2, y2 in arcs if y2 == x) return True
Makes a CSP problem arc consistent. Ignores any constraint that is not binary.
arc_consistency_3
python
simpleai-team/simpleai
simpleai/search/arc.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/arc.py
MIT
def backtrack(problem, variable_heuristic='', value_heuristic='', inference=True): ''' Backtracking search. variable_heuristic is the heuristic for variable choosing, can be MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple ordered choosing. value_heuristic is the heuristic for value choosing, can be LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing. ''' assignment = {} domains = deepcopy(problem.domains) if variable_heuristic == MOST_CONSTRAINED_VARIABLE: variable_chooser = _most_constrained_variable_chooser elif variable_heuristic == HIGHEST_DEGREE_VARIABLE: variable_chooser = _highest_degree_variable_chooser else: variable_chooser = _basic_variable_chooser if value_heuristic == LEAST_CONSTRAINING_VALUE: values_sorter = _least_constraining_values_sorter else: values_sorter = _basic_values_sorter return _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=inference)
Backtracking search. variable_heuristic is the heuristic for variable choosing, can be MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple ordered choosing. value_heuristic is the heuristic for value choosing, can be LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing.
backtrack
python
simpleai-team/simpleai
simpleai/search/csp.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/csp.py
MIT
def _most_constrained_variable_chooser(problem, variables, domains): ''' Choose the variable that has less available values. ''' # the variable with fewer values available return sorted(variables, key=lambda v: len(domains[v]))[0]
Choose the variable that has less available values.
_most_constrained_variable_chooser
python
simpleai-team/simpleai
simpleai/search/csp.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/csp.py
MIT
def _highest_degree_variable_chooser(problem, variables, domains): ''' Choose the variable that is involved on more constraints. ''' # the variable involved in more constraints return sorted(variables, key=lambda v: problem.var_degrees[v], reverse=True)[0]
Choose the variable that is involved on more constraints.
_highest_degree_variable_chooser
python
simpleai-team/simpleai
simpleai/search/csp.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/csp.py
MIT
def _find_conflicts(problem, assignment, variable=None, value=None): ''' Find violated constraints on a given assignment, with the possibility of specifying a new variable and value to add to the assignment before checking. ''' if variable is not None and value is not None: assignment = deepcopy(assignment) assignment[variable] = value conflicts = [] for neighbors, constraint in problem.constraints: # if all the neighbors on the constraint have values, check if conflict if all(n in assignment for n in neighbors): if not _call_constraint(assignment, neighbors, constraint): conflicts.append((neighbors, constraint)) return conflicts
Find violated constraints on a given assignment, with the possibility of specifying a new variable and value to add to the assignment before checking.
_find_conflicts
python
simpleai-team/simpleai
simpleai/search/csp.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/csp.py
MIT
def _least_constraining_values_sorter(problem, assignment, variable, domains): ''' Sort values based on how many conflicts they generate if assigned. ''' # the value that generates less conflicts def update_assignment(value): new_assignment = deepcopy(assignment) new_assignment[variable] = value return new_assignment values = sorted(domains[variable][:], key=lambda v: _count_conflicts(problem, assignment, variable, v)) return values
Sort values based on how many conflicts they generate if assigned.
_least_constraining_values_sorter
python
simpleai-team/simpleai
simpleai/search/csp.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/csp.py
MIT
def convert_to_binary(variables, domains, constraints): """ Returns new constraint list, all binary, using hidden variables. You can use it as previous step when creating a problem. """ def wdiff(vars_): def diff(variables, values): hidden, other = variables if hidden.startswith('hidden'): idx = vars_.index(other) return values[1] == values[0][idx] else: idx = vars_.index(hidden) return values[0] == values[1][idx] diff.no_wrap = True # so it's not wrapped to swap values return diff new_constraints = [] new_domains = copy(domains) new_variables = list(variables) last = 0 for vars_, const in constraints: if len(vars_) == 2: new_constraints.append((vars_, const)) continue hidden = 'hidden%d' % last new_variables.append(hidden) last += 1 new_domains[hidden] = [t for t in product(*map(domains.get, vars_)) if const(vars_, t)] for var in vars_: new_constraints.append(((hidden, var), wdiff(vars_))) return new_variables, new_domains, new_constraints
Returns new constraint list, all binary, using hidden variables. You can use it as previous step when creating a problem.
convert_to_binary
python
simpleai-team/simpleai
simpleai/search/csp.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/csp.py
MIT
def _all_expander(fringe, iteration, viewer): ''' Expander that expands all nodes on the fringe. ''' expanded_neighbors = [node.expand(local_search=True) for node in fringe] if viewer: viewer.event('expanded', list(fringe), expanded_neighbors) list(map(fringe.extend, expanded_neighbors))
Expander that expands all nodes on the fringe.
_all_expander
python
simpleai-team/simpleai
simpleai/search/local.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/local.py
MIT
def _first_expander(fringe, iteration, viewer): ''' Expander that expands only the first node on the fringe. ''' current = fringe[0] neighbors = current.expand(local_search=True) if viewer: viewer.event('expanded', [current], [neighbors]) fringe.extend(neighbors)
Expander that expands only the first node on the fringe.
_first_expander
python
simpleai-team/simpleai
simpleai/search/local.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/local.py
MIT
def _random_best_expander(fringe, iteration, viewer): ''' Expander that expands one randomly chosen nodes on the fringe that is better than the current (first) node. ''' current = fringe[0] neighbors = current.expand(local_search=True) if viewer: viewer.event('expanded', [current], [neighbors]) betters = [n for n in neighbors if n.value > current.value] if betters: chosen = random.choice(betters) if viewer: viewer.event('chosen_node', chosen) fringe.append(chosen)
Expander that expands one randomly chosen nodes on the fringe that is better than the current (first) node.
_random_best_expander
python
simpleai-team/simpleai
simpleai/search/local.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/local.py
MIT
def _create_simulated_annealing_expander(schedule): ''' Creates an expander that has a random chance to choose a node that is worse than the current (first) node, but that chance decreases with time. ''' def _expander(fringe, iteration, viewer): T = schedule(iteration) current = fringe[0] neighbors = current.expand(local_search=True) if viewer: viewer.event('expanded', [current], [neighbors]) if neighbors: succ = random.choice(neighbors) delta_e = succ.value - current.value if delta_e > 0 or random.random() < math.exp(delta_e / T): fringe.pop() fringe.append(succ) if viewer: viewer.event('chosen_node', succ) return _expander
Creates an expander that has a random chance to choose a node that is worse than the current (first) node, but that chance decreases with time.
_create_simulated_annealing_expander
python
simpleai-team/simpleai
simpleai/search/local.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/local.py
MIT
def _create_genetic_expander(problem, mutation_chance): ''' Creates an expander that expands the bests nodes of the population, crossing over them. ''' def _expander(fringe, iteration, viewer): fitness = [x.value for x in fringe] sampler = InverseTransformSampler(fitness, fringe) new_generation = [] expanded_nodes = [] expanded_neighbors = [] for _ in fringe: node1 = sampler.sample() node2 = sampler.sample() child = problem.crossover(node1.state, node2.state) action = 'crossover' if random.random() < mutation_chance: # Noooouuu! she is... he is... *IT* is a mutant! child = problem.mutate(child) action += '+mutation' child_node = SearchNodeValueOrdered(state=child, problem=problem, action=action) new_generation.append(child_node) expanded_nodes.append(node1) expanded_neighbors.append([child_node]) expanded_nodes.append(node2) expanded_neighbors.append([child_node]) if viewer: viewer.event('expanded', expanded_nodes, expanded_neighbors) fringe.clear() for node in new_generation: fringe.append(node) return _expander
Creates an expander that expands the bests nodes of the population, crossing over them.
_create_genetic_expander
python
simpleai-team/simpleai
simpleai/search/local.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/local.py
MIT
def _local_search(problem, fringe_expander, iterations_limit=0, fringe_size=1, random_initial_states=False, stop_when_no_better=True, viewer=None): ''' Basic algorithm for all local search algorithms. ''' if viewer: viewer.event('started') fringe = BoundedPriorityQueue(fringe_size) if random_initial_states: for _ in range(fringe_size): s = problem.generate_random_state() fringe.append(SearchNodeValueOrdered(state=s, problem=problem)) else: fringe.append(SearchNodeValueOrdered(state=problem.initial_state, problem=problem)) finish_reason = '' iteration = 0 run = True best = None while run: if viewer: viewer.event('new_iteration', list(fringe)) old_best = fringe[0] fringe_expander(fringe, iteration, viewer) best = fringe[0] iteration += 1 if iterations_limit and iteration >= iterations_limit: run = False finish_reason = 'reaching iteration limit' elif old_best.value >= best.value and stop_when_no_better: run = False finish_reason = 'not being able to improve solution' if viewer: viewer.event('finished', fringe, best, 'returned after %s' % finish_reason) return best
Basic algorithm for all local search algorithms.
_local_search
python
simpleai-team/simpleai
simpleai/search/local.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/local.py
MIT
def path(self): '''Path (list of nodes and actions) from root to this node.''' node = self path = [] while node: path.append((node.action, node.state)) node = node.parent return list(reversed(path))
Path (list of nodes and actions) from root to this node.
path
python
simpleai-team/simpleai
simpleai/search/models.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/models.py
MIT
def breadth_first(problem, graph_search=False, viewer=None): ''' Breadth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, FifoList(), graph_search=graph_search, viewer=viewer)
Breadth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
breadth_first
python
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/traditional.py
MIT
def depth_first(problem, graph_search=False, viewer=None): ''' Depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, LifoList(), graph_search=graph_search, viewer=viewer)
Depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
depth_first
python
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/traditional.py
MIT
def uniform_cost(problem, graph_search=False, viewer=None): ''' Uniform cost search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, and SearchProblem.cost. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeCostOrdered, graph_replace_when_better=True, viewer=viewer)
Uniform cost search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, and SearchProblem.cost.
uniform_cost
python
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/traditional.py
MIT
def greedy(problem, graph_search=False, viewer=None): ''' Greedy search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeHeuristicOrdered, graph_replace_when_better=True, viewer=viewer)
Greedy search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic.
greedy
python
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/traditional.py
MIT
def astar(problem, graph_search=False, viewer=None): ''' A* search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeStarOrdered, graph_replace_when_better=True, viewer=viewer)
A* search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic.
astar
python
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/traditional.py
MIT
def _search(problem, fringe, graph_search=False, depth_limit=None, node_factory=SearchNode, graph_replace_when_better=False, viewer=None): ''' Basic search algorithm, base of all the other search algorithms. ''' if viewer: viewer.event('started') memory = set() initial_node = node_factory(state=problem.initial_state, problem=problem) fringe.append(initial_node) while fringe: if viewer: viewer.event('new_iteration', fringe.sorted()) node = fringe.pop() if problem.is_goal(node.state): if viewer: viewer.event('chosen_node', node, True) viewer.event('finished', fringe.sorted(), node, 'goal found') return node else: if viewer: viewer.event('chosen_node', node, False) memory.add(node.state) if depth_limit is None or node.depth < depth_limit: expanded = node.expand() if viewer: viewer.event('expanded', [node], [expanded]) for n in expanded: if graph_search: others = [x for x in fringe if x.state == n.state] assert len(others) in (0, 1) if n.state not in memory and len(others) == 0: fringe.append(n) elif graph_replace_when_better and len(others) > 0 and n < others[0]: fringe.remove(others[0]) fringe.append(n) else: fringe.append(n) if viewer: viewer.event('finished', fringe.sorted(), None, 'goal not found')
Basic search algorithm, base of all the other search algorithms.
_search
python
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/master/simpleai/search/traditional.py
MIT
def test_target_in_attributes(self): """ If target in attributes precision is 1.0. """ self.problem.attributes = [self.target] self.this = self.classifier(self.corpus, self.problem) prec = evaluation.precision(self.this, self.test_set) self.assertEqual(prec, 1.0)
If target in attributes precision is 1.0.
test_target_in_attributes
python
simpleai-team/simpleai
tests/machine_learning/test_classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/tests/machine_learning/test_classifiers.py
MIT
def test_equal_classification(self): """ This checks that the three tree learning methods are equal. """ pseudo = DecisionTreeLearner(self.corpus, self.problem) for test in self.test_set: self.assertEqual(pseudo.classify(test), self.this.classify(test))
This checks that the three tree learning methods are equal.
test_equal_classification
python
simpleai-team/simpleai
tests/machine_learning/test_classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/tests/machine_learning/test_classifiers.py
MIT
def setup_dataset(self): """ Creates a corpus with the iris dataset. Returns the dataset, the attributes getter and the target getter. """ dataset = [] with open(self.IRIS_PATH) as filehandler: file_data = filehandler.read() for line in file_data.split("\n"): line_data = [np.rint(float(x)) for x in line.split()] if line_data: dataset.append(line_data) problem = VectorDataClassificationProblem(dataset, target_index=4) problem.distance = euclidean_vector_distance self.corpus = dataset self.problem = problem
Creates a corpus with the iris dataset. Returns the dataset, the attributes getter and the target getter.
setup_dataset
python
simpleai-team/simpleai
tests/machine_learning/test_classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/tests/machine_learning/test_classifiers.py
MIT
def setup_dataset(self): """ Creates a corpus with n k-bit examples of the parity problem: k random bits followed by a 1 if an odd number of bits are 1, else 0 """ k = 2 n = 100 dataset = [] for i in range(n): # Pseudo random generation of bits bits = [(((i + j) * 1223) % (n + 1)) % 2 for j in range(k)] bits.append(sum(bits) % 2) dataset.append(bits) problem = VectorDataClassificationProblem(dataset, target_index=k) self.corpus = dataset self.problem = problem
Creates a corpus with n k-bit examples of the parity problem: k random bits followed by a 1 if an odd number of bits are 1, else 0
setup_dataset
python
simpleai-team/simpleai
tests/machine_learning/test_classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/tests/machine_learning/test_classifiers.py
MIT
def setup_dataset(self): """ Creates a corpus of primes. Returns the dataset, the attributes getter and the target getter. """ size = 105 # Magic number, chosen to avoid an "error" that cannot be # patched in Dtree Pseudo (with modifing the pseudocode). dataset = [] for i in range(size): dataset.append([ i % 2 == 0, i % 3 == 0, i % 5 == 0, i % 7 == 0, self.isprime(i) ]) problem = VectorDataClassificationProblem(dataset, target_index=-1) problem.distance = euclidean_vector_distance self.corpus = dataset self.problem = problem
Creates a corpus of primes. Returns the dataset, the attributes getter and the target getter.
setup_dataset
python
simpleai-team/simpleai
tests/machine_learning/test_classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/tests/machine_learning/test_classifiers.py
MIT
def isprime(self, number): """ Returns if a number is prime testing if is divisible by any number from 0 to sqrt(number) """ if number < 2: return False if number == 2: return True if not number & 1: return False for i in range(3, int(number ** 0.5) + 1, 2): if number % i == 0: return False return True
Returns if a number is prime testing if is divisible by any number from 0 to sqrt(number)
isprime
python
simpleai-team/simpleai
tests/machine_learning/test_classifiers.py
https://github.com/simpleai-team/simpleai/blob/master/tests/machine_learning/test_classifiers.py
MIT
def get_ray_directions( H: int, W: int, focal: Union[float, Tuple[float, float]], principal: Optional[Tuple[float, float]] = None, use_pixel_centers: bool = True, normalize: bool = True, ) -> torch.FloatTensor: """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal, principal, use_pixel_centers: image height, width, focal length, principal point and whether use pixel centers Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ pixel_center = 0.5 if use_pixel_centers else 0 if isinstance(focal, float): fx, fy = focal, focal cx, cy = W / 2, H / 2 else: fx, fy = focal assert principal is not None cx, cy = principal i, j = torch.meshgrid( torch.arange(W, dtype=torch.float32) + pixel_center, torch.arange(H, dtype=torch.float32) + pixel_center, indexing="xy", ) directions = torch.stack([(i - cx) / fx, -(j - cy) / fy, -torch.ones_like(i)], -1) if normalize: directions = F.normalize(directions, dim=-1) return directions
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal, principal, use_pixel_centers: image height, width, focal length, principal point and whether use pixel centers Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
get_ray_directions
python
VAST-AI-Research/TripoSR
tsr/utils.py
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/utils.py
MIT
def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, **cross_attention_kwargs, ) -> torch.Tensor: r""" The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer. """ # The `Attention` class can call different attention processors / attention functions # here we simply pass along all tensors to the selected processor class # For standard processors that are defined here, `**cross_attention_kwargs` is empty return self.processor( self, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, )
The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer.
forward
python
VAST-AI-Research/TripoSR
tsr/models/transformer/attention.py
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/attention.py
MIT