text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def erb(freq, Hz=None): """ ``B. C. J. Moore and B. R. Glasberg, "Suggested formulae for calculating auditory filter bandwidths and excitation patterns". J. Acoust. Soc. Am., 74, 1983, pp. 750-753.`` """
if Hz is None: if freq < 7: # Perhaps user tried something up to 2 * pi raise ValueError("Frequency out of range.") Hz = 1 fHz = freq / Hz result = 6.23e-6 * fHz ** 2 + 93.39e-3 * fHz + 28.52 return result * Hz
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gammatone(freq, bandwidth): """ ``A. Klapuri, "Multipich Analysis of Polyphonic Music and Speech Signals Using an Auditory Model". IEEE Transactions on Audio, Speech and Language Processing, vol. 16, no. 2, 2008, pp. 255-266.`` """
bw = thub(bandwidth, 1) bw2 = thub(bw * 2, 4) freq = thub(freq, 4) resons = [resonator.z_exp, resonator.poles_exp] * 2 return CascadeFilter(reson(freq, bw2) for reson in resons)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_window_strategies(): """ Create all window and wsymm strategies """
for wnd_dict in window._content_generation_table: names = wnd_dict["names"] sname = wnd_dict["sname"] = names[0] wnd_dict.setdefault("params_def", "") for sdict in [window, wsymm]: docs_dict = window._doc_kwargs(symm = sdict is wsymm, **wnd_dict) decorators = [format_docstring(**docs_dict), sdict.strategy(*names)] ns = dict(pi=pi, sin=sin, cos=cos, xrange=xrange, __name__=__name__) exec(sdict._code_template.format(**wnd_dict), ns, ns) reduce(lambda func, dec: dec(func), decorators, ns[sname]) if not wnd_dict.get("distinct", True): wsymm[sname] = window[sname] break wsymm[sname].periodic = window[sname].periodic = window[sname] wsymm[sname].symm = window[sname].symm = wsymm[sname]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def acorr(blk, max_lag=None): """ Calculate the autocorrelation of a given 1-D block sequence. Parameters blk : An iterable with well-defined length. Don't use this function with Stream objects! max_lag : The size of the result, the lags you'd need. Defaults to ``len(blk) - 1``, since any lag beyond would result in zero. Returns ------- A list with lags from 0 up to max_lag, where its ``i``-th element has the autocorrelation for a lag equals to ``i``. Be careful with negative lags! You should use abs(lag) indexes when working with them. Examples -------- [59, 52, 42, 30, 17, 8, 2] [59, 52, 42, 30, 17, 8, 2, 0, 0, 0] 4 [59, 52, 42, 30] """
if max_lag is None: max_lag = len(blk) - 1 return [sum(blk[n] * blk[n + tau] for n in xrange(len(blk) - tau)) for tau in xrange(max_lag + 1)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lag_matrix(blk, max_lag=None): """ Finds the lag matrix for a given 1-D block sequence. Parameters blk : An iterable with well-defined length. Don't use this function with Stream objects! max_lag : The size of the result, the lags you'd need. Defaults to ``len(blk) - 1``, the maximum lag that doesn't create fully zeroed matrices. Returns ------- The covariance matrix as a list of lists. Each cell (i, j) contains the sum of ``blk[n - i] * blk[n - j]`` elements for all n that allows such without padding the given block. """
if max_lag is None: max_lag = len(blk) - 1 elif max_lag >= len(blk): raise ValueError("Block length should be higher than order") return [[sum(blk[n - i] * blk[n - j] for n in xrange(max_lag, len(blk)) ) for i in xrange(max_lag + 1) ] for j in xrange(max_lag + 1)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dft(blk, freqs, normalize=True): """ Complex non-optimized Discrete Fourier Transform Finds the DFT for values in a given frequency list, in order, over the data block seen as periodic. Parameters blk : An iterable with well-defined length. Don't use this function with Stream objects! freqs : List of frequencies to find the DFT, in rad/sample. FFT implementations like numpy.fft.ftt finds the coefficients for N frequencies equally spaced as ``line(N, 0, 2 * pi, finish=False)`` for N frequencies. normalize : If True (default), the coefficient sums are divided by ``len(blk)``, and the coefficient for the DC level (frequency equals to zero) is the mean of the block. If False, that coefficient would be the sum of the data in the block. Returns ------- A list of DFT values for each frequency, in the same order that they appear in the freqs input. Note ---- This isn't a FFT implementation, and performs :math:`O(M . N)` float pointing operations, with :math:`M` and :math:`N` equals to the length of the inputs. This function can find the DFT for any specific frequency, with no need for zero padding or finding all frequencies in a linearly spaced band grid with N frequency bins at once. """
dft_data = (sum(xn * cexp(-1j * n * f) for n, xn in enumerate(blk)) for f in freqs) if normalize: lblk = len(blk) return [v / lblk for v in dft_data] return list(dft_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zcross(seq, hysteresis=0, first_sign=0): """ Zero-crossing stream. Parameters seq : Any iterable to be used as input for the zero crossing analysis hysteresis : Crossing exactly zero might happen many times too fast due to high frequency oscilations near zero. To avoid this, you can make two threshold limits for the zero crossing detection: ``hysteresis`` and ``-hysteresis``. Defaults to zero (0), which means no hysteresis and only one threshold. first_sign : Optional argument with the sign memory from past. Gets the sig from any signed number. Defaults to zero (0), which means "any", and the first sign will be the first one found in data. Returns ------- A Stream instance that outputs 1 for each crossing detected, 0 otherwise. """
neg_hyst = -hysteresis seq_iter = iter(seq) # Gets the first sign if first_sign == 0: last_sign = 0 for el in seq_iter: yield 0 if (el > hysteresis) or (el < neg_hyst): # Ignores hysteresis region last_sign = -1 if el < 0 else 1 # Define the first sign break else: last_sign = -1 if first_sign < 0 else 1 # Finds the full zero-crossing sequence for el in seq_iter: # Keep the same iterator (needed for non-generators) if el * last_sign < neg_hyst: last_sign = -1 if el < 0 else 1 yield 1 else: yield 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clip(sig, low=-1., high=1.): """ Clips the signal up to both a lower and a higher limit. Parameters sig : The signal to be clipped, be it a Stream instance, a list or any iterable. low, high : Lower and higher clipping limit, "saturating" the input to them. Defaults to -1.0 and 1.0, respectively. These can be None when needed one-sided clipping. When both limits are set to None, the output will be a Stream that yields exactly the ``sig`` input data. Returns ------- Clipped signal as a Stream instance. """
if low is None: if high is None: return Stream(sig) return Stream(el if el < high else high for el in sig) if high is None: return Stream(el if el > low else low for el in sig) if high < low: raise ValueError("Higher clipping limit is smaller than lower one") return Stream(high if el > high else (low if el < low else el) for el in sig)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unwrap(sig, max_delta=pi, step=2*pi): """ Parametrized signal unwrapping. Parameters sig : An iterable seen as an input signal. max_delta : Maximum value of :math:`\Delta = sig_i - sig_{i-1}` to keep output without another minimizing step change. Defaults to :math:`\pi`. step : The change in order to minimize the delta is an integer multiple of this value. Defaults to :math:`2 . \pi`. Returns ------- The signal unwrapped as a Stream, minimizing the step difference when any adjacency step in the input signal is higher than ``max_delta`` by summing/subtracting ``step``. """
idata = iter(sig) d0 = next(idata) yield d0 delta = d0 - d0 # Get the zero (e.g., integer, float) from data for d1 in idata: d_diff = d1 - d0 if abs(d_diff) > max_delta: delta += - d_diff + min((d_diff) % step, (d_diff) % -step, key=lambda x: abs(x)) yield d1 + delta d0 = d1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def amdf(lag, size): """ Average Magnitude Difference Function non-linear filter for a given size and a fixed lag. Parameters lag : Time lag, in samples. See ``freq2lag`` if needs conversion from frequency values. size : Moving average size. Returns ------- A callable that accepts two parameters: a signal ``sig`` and the starting memory element ``zero`` that behaves like the ``LinearFilter.__call__`` arguments. The output from that callable is a Stream instance, and has no decimation applied. See Also -------- freq2lag : Frequency (in rad/sample) to lag (in samples) converter. """
filt = (1 - z ** -lag).linearize() @tostream def amdf_filter(sig, zero=0.): return maverage(size)(abs(filt(sig, zero=zero)), zero=zero) return amdf_filter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True): """ Overlap-add algorithm using Numpy arrays. Parameters blk_sig : An iterable of blocks (sequences), such as the ``Stream.blocks`` result. size : Block size for each ``blk_sig`` element, in samples. hop : Number of samples for two adjacent blocks (defaults to the size). wnd : Windowing function to be applied to each block or any iterable with exactly ``size`` elements. If ``None`` (default), applies a rectangular window. normalize : Flag whether the window should be normalized so that the process could happen in the [-1; 1] range, dividing the window by its hop gain. Default is ``True``. Returns ------- A Stream instance with the blocks overlapped and added. See Also -------- Stream.blocks : Splits the Stream instance into blocks with given size and hop. blocks : Same to Stream.blocks but for without using the Stream class. chain : Lazily joins all iterables given as parameters. chain.from_iterable : Same to ``chain(*data)``, but the ``data`` evaluation is lazy. window : Window/apodization/tapering functions for a given size as a StrategyDict. Note ---- Each block has the window function applied to it and the result is the sum of the blocks without any edge-case special treatment for the first and last few blocks. """
import numpy as np # Finds the size from data, if needed if size is None: blk_sig = Stream(blk_sig) size = len(blk_sig.peek()) if hop is None: hop = size # Find the right windowing function to be applied if wnd is None: wnd = np.ones(size) elif callable(wnd) and not isinstance(wnd, Stream): wnd = wnd(size) if isinstance(wnd, Sequence): wnd = np.array(wnd) elif isinstance(wnd, Iterable): wnd = np.hstack(wnd) else: raise TypeError("Window should be an iterable or a callable") # Normalization to the [-1; 1] range if normalize: steps = Stream(wnd).blocks(hop).map(np.array) gain = np.sum(np.abs(np.vstack(steps)), 0).max() if gain: # If gain is zero, normalization couldn't have any effect wnd = wnd / gain # Can't use "/=" nor "*=" as Numpy would keep datatype # Overlap-add algorithm old = np.zeros(size) for blk in (wnd * blk for blk in blk_sig): blk[:-hop] += old[hop:] for el in blk[:hop]: yield el old = blk for el in old[hop:]: # No more blocks, finish yielding the last one yield el
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True): """ Overlap-add algorithm using lists instead of Numpy arrays. The behavior is the same to the ``overlap_add.numpy`` strategy, besides the data types. """
# Finds the size from data, if needed if size is None: blk_sig = Stream(blk_sig) size = len(blk_sig.peek()) if hop is None: hop = size # Find the window to be applied, resulting on a list or None if wnd is not None: if callable(wnd) and not isinstance(wnd, Stream): wnd = wnd(size) if isinstance(wnd, Iterable): wnd = list(wnd) else: raise TypeError("Window should be an iterable or a callable") # Normalization to the [-1; 1] range if normalize: if wnd: steps = Stream(wnd).map(abs).blocks(hop).map(tuple) gain = max(xmap(sum, xzip(*steps))) if gain: # If gain is zero, normalization couldn't have any effect wnd[:] = (w / gain for w in wnd) else: wnd = [1 / ceil(size / hop)] * size # Window application if wnd: mul = operator.mul if len(wnd) != size: raise ValueError("Incompatible window size") wnd = wnd + [0.] # Allows detecting when block size is wrong blk_sig = (xmap(mul, wnd, blk) for blk in blk_sig) # Overlap-add algorithm add = operator.add mem = [0.] * size s_h = size - hop for blk in xmap(iter, blk_sig): mem[:s_h] = xmap(add, mem[hop:], blk) mem[s_h:] = blk # Remaining elements if len(mem) != size: raise ValueError("Wrong block size or declared") for el in mem[:hop]: yield el for el in mem[hop:]: # No more blocks, finish yielding the last one yield el
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stft(func=None, **kwparams): """ Short Time Fourier Transform for complex data. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=numpy.fft.ifft) See ``stft.base`` docs for more. """
from numpy.fft import fft, ifft return stft.base(transform=fft, inverse_transform=ifft)(func, **kwparams)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stft(func=None, **kwparams): """ Short Time Fourier Transform for real data keeping the full FFT block. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=lambda *args: numpy.fft.ifft(*args).real) See ``stft.base`` docs for more. """
from numpy.fft import fft, ifft ifft_r = lambda *args: ifft(*args).real return stft.base(transform=fft, inverse_transform=ifft_r)(func, **kwparams)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """ Destructor for this audio interface. Waits the threads to finish their streams, if desired. """
with self.halting: # Avoid simultaneous "close" threads if not self.finished: # Ignore all "close" calls, but the first, self.finished = True # and any call to play would raise ThreadError # Closes all playing AudioThread instances while True: with self.lock: # Ensure there's no other thread messing around try: thread = self._threads[0] # Needless to say: pop = deadlock except IndexError: # Empty list break # No more threads if not self.wait: thread.stop() thread.join() # Closes all recording RecStream instances while self._recordings: recst = self._recordings[-1] recst.stop() recst.take(inf) # Ensure it'll be closed # Finishes assert not self._pa._streams # No stream should survive self._pa.terminate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def record(self, chunk_size = None, dfmt = "f", channels = 1, rate = DEFAULT_SAMPLE_RATE, **kwargs ): """ Records audio from device into a Stream. Parameters chunk_size : Number of samples per chunk (block sent to device). dfmt : Format, as in chunks(). Default is "f" (Float32). channels : Channels in audio stream (serialized). rate : Sample rate (same input used in sHz). Returns ------- Endless Stream instance that gather data from the audio input device. """
if chunk_size is None: chunk_size = chunks.size if hasattr(self, "api"): kwargs.setdefault("input_device_index", self.api["defaultInputDevice"]) channels = kwargs.pop("nchannels", channels) # Backwards compatibility input_stream = RecStream(self, self._pa.open(format=_STRUCT2PYAUDIO[dfmt], channels=channels, rate=rate, frames_per_buffer=chunk_size, input=True, **kwargs), chunk_size, dfmt ) self._recordings.append(input_stream) return input_stream
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Plays the audio. This method plays the audio, and shouldn't be called explicitly, let the constructor do so. """
# From now on, it's multi-thread. Let the force be with them. st = self.stream._stream for chunk in chunks(self.audio, size=self.chunk_size*self.nchannels, dfmt=self.dfmt): #Below is a faster way to call: # self.stream.write(chunk, self.chunk_size) self.write_stream(st, chunk, self.chunk_size, False) if not self.go.is_set(): self.stream.stop_stream() if self.halting: break self.go.wait() self.stream.start_stream() # Finished playing! Destructor-like step: let's close the thread with self.lock: if self in self.device_manager._threads: # If not already closed self.stream.close() self.device_manager.thread_finished(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self): """ Stops the playing thread and close """
with self.lock: self.halting = True self.go.clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def freq2midi(freq): """ Given a frequency in Hz, returns its MIDI pitch number. """
result = 12 * (log2(freq) - log2(FREQ_A4)) + MIDI_A4 return nan if isinstance(result, complex) else result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def octaves(freq, fmin=20., fmax=2e4): """ Given a frequency and a frequency range, returns all frequencies in that range that is an integer number of octaves related to the given frequency. Parameters freq : Frequency, in any (linear) unit. fmin, fmax : Frequency range, in the same unit of ``freq``. Defaults to 20.0 and 20,000.0, respectively. Returns ------- A list of frequencies, in the same unit of ``freq`` and in ascending order. Examples -------- [27.5, 55.0, 110.0, 220.0, 440.0, 880.0, 1760.0, 3520.0, 7040.0, 14080.0] [3520.0, 7040.0, 14080.0] 2 [0.062689, 0.125379] [440.0, 880.0] """
# Input validation if any(f <= 0 for f in (freq, fmin, fmax)): raise ValueError("Frequencies have to be positive") # If freq is out of range, avoid range extension while freq < fmin: freq *= 2 while freq > fmax: freq /= 2 if freq < fmin: # Gone back and forth return [] # Finds the range for a valid input return list(it.takewhile(lambda x: x > fmin, (freq * 2 ** harm for harm in it.count(0, -1)) ))[::-1] \ + list(it.takewhile(lambda x: x < fmax, (freq * 2 ** harm for harm in it.count(1)) ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def image_path_processor_factory(path): """ Processor for concatenating the ``path`` to relative path images """
def processor(line): markup = ".. image::" if line.startswith(markup): fname = line[len(markup):].strip() if not(fname.startswith("/") or "://" in fname): return "{} {}{}".format(markup, path, fname) return line return processor
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delay(sig): """ Simple feedforward delay effect """
smix = Streamix() sig = thub(sig, 3) # Auto-copy 3 times (remove this line if using feedback) smix.add(0, sig) # To get a feedback delay, use "smix.copy()" below instead of both "sig" smix.add(280 * ms, .1 * sig) # You can also try other constants smix.add(220 * ms, .1 * sig) return smix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def note2snd(pitch, quarters): """ Creates an audio Stream object for a single note. Parameters pitch : Pitch note like ``"A4"``, as a string, or ``None`` for a rest. quarters : Duration in quarters (see ``quarter_dur``). """
dur = quarters * quarter_dur if pitch is None: return zeros(dur) freq = str2freq(pitch) * Hz return synth(freq, dur)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_full_name(prefix, suffix="rst"): """ Script path to actual path relative file name converter. Parameters prefix : File name prefix (without extension), relative to the script location. suffix : File name extension (defaults to "rst"). Returns ------- A file name path relative to the actual location to a file inside the script location. Warning ------- Calling this OVERWRITES the RST files in the directory it's in, and don't ask for confirmation! """
return os.path.join(os.path.split(__file__)[0], os.path.extsep.join([prefix, suffix]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_to_rst(prefix, data): """ Saves a RST file with the given prefix into the script file location. """
with open(find_full_name(prefix), "w") as rst_file: rst_file.write(full_gpl_for_rst) rst_file.write(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ks_synth(freq): """ Synthesize the given frequency into a Stream by using a model based on Karplus-Strong. """
ks_mem = (sum(lz.sinusoid(x * freq) for x in [1, 3, 9]) + lz.white_noise() + lz.Stream(-1, 1)) / 5 return lz.karplus_strong(freq, memory=ks_mem)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def m21_to_stream(score, synth=ks_synth, beat=90, fdur=2., pad_dur=.5, rate=lz.DEFAULT_SAMPLE_RATE): """ Converts Music21 data to a Stream object. Parameters score : A Music21 data, usually a music21.stream.Score instance. synth : A function that receives a frequency as input and should yield a Stream instance with the note being played. beat : The BPM (beats per minute) value to be used in playing. fdur : Relative duration of a fermata. For example, 1.0 ignores the fermata, and 2.0 (default) doubles its duration. pad_dur : Duration in seconds, but not multiplied by ``s``, to be used as a zero-padding ending event (avoids clicks at the end when playing). rate : The sample rate, given in samples per second. """
# Configuration s, Hz = lz.sHz(rate) step = 60. / beat * s # Creates a score from the music21 data score = reduce(operator.concat, [[(pitch.frequency * Hz, # Note note.offset * step, # Starting time note.quarterLength * step, # Duration Fermata in note.expressions) for pitch in note.pitches] for note in score.flat.notes] ) # Mix all notes into song song = lz.Streamix() last_start = 0 for freq, start, dur, has_fermata in score: delta = start - last_start if has_fermata: delta *= 2 song.add(delta, synth(freq).limit(dur)) last_start = start # Zero-padding and finishing song.add(dur + pad_dur * s, lz.Stream([])) return song
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pair_strings_sum_formatter(a, b): """ Formats the sum of a and b. Note ---- Both inputs are numbers already converted to strings. """
if b[:1] == "-": return "{0} - {1}".format(a, b[1:]) return "{0} + {1}".format(a, b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def float_str(value, symbol_str="", symbol_value=1, after=False, max_denominator=1000000): """ Pretty rational string from float numbers. Converts a given numeric value to a string based on rational fractions of the given symbol, useful for labels in plots. Parameters value : A float number or an iterable with floats. symbol_str : String data that will be in the output representing the data as a numerator multiplier, if needed. Defaults to an empty string. symbol_value : to one (no effect). after : Chooses the place where the ``symbol_str`` should be written. If ``True``, that's the end of the string. If ``False``, that's in between the numerator and the denominator, before the slash. Defaults to ``False``. max_denominator : An int instance, used to round the float following the given limit. Defaults to the integer 1,000,000 (one million). Returns ------- A string with the rational number written into as a fraction, with or without a multiplying symbol. Examples -------- '25/2' '1/3' '333/1000' '1/3' 'steps/8' '1/8 Hz' See Also -------- float_str.pi : This fraction/ratio formatter, but configured with the "pi" symbol. """
if value == 0: return "0" frac = Fraction(value/symbol_value).limit_denominator(max_denominator) num, den = frac.numerator, frac.denominator output_data = [] if num < 0: num = -num output_data.append("-") if (num != 1) or (symbol_str == "") or after: output_data.append(str(num)) if (value != 0) and not after: output_data.append(symbol_str) if den != 1: output_data.extend(["/", str(den)]) if after: output_data.append(symbol_str) return "".join(output_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def small_doc(obj, indent="", max_width=80): """ Finds a useful small doc representation of an object. Parameters obj : Any object, which the documentation representation should be taken from. indent : Result indentation string to be insert in front of all lines. max_width : Each line of the result may have at most this length. Returns ------- For classes, modules, functions, methods, properties and StrategyDict instances, returns the first paragraph in the doctring of the given object, as a list of strings, stripped at right and with indent at left. For other inputs, it will use themselves cast to string as their docstring. """
if not getattr(obj, "__doc__", False): data = [el.strip() for el in str(obj).splitlines()] if len(data) == 1: if data[0].startswith("<audiolazy.lazy_"): # Instance data = data[0].split("0x", -1)[0] + "0x...>" # Hide its address else: data = "".join(["``", data[0], "``"]) else: data = " ".join(data) # No docstring elif (not obj.__doc__) or (obj.__doc__.strip() == ""): data = "\ * * * * ...no docstring... * * * * \ " # Docstring else: data = (el.strip() for el in obj.__doc__.strip().splitlines()) data = " ".join(it.takewhile(lambda el: el != "", data)) # Ensure max_width (word wrap) max_width -= len(indent) result = [] for word in data.split(): if len(word) <= max_width: if result: if len(result[-1]) + len(word) + 1 <= max_width: word = " ".join([result.pop(), word]) result.append(word) else: result = [word] else: # Splits big words result.extend("".join(w) for w in blocks(word, max_width, padval="")) # Apply indentation and finishes return [indent + el for el in result]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def meta(*bases, **kwargs): """ Allows unique syntax similar to Python 3 for working with metaclasses in both Python 2 and Python 3. Examples -------- True 'HUA HUA HUA HUA ' 'RRRRRR' """
metaclass = kwargs.get("metaclass", type) if not bases: bases = (object,) class NewMeta(type): def __new__(mcls, name, mbases, namespace): if name: return metaclass.__new__(metaclass, name, bases, namespace) return super(NewMeta, mcls).__new__(mcls, "", mbases, {}) return NewMeta("", tuple(), {})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attack(a, d, s): """ Linear ADS fading attack stream generator, useful to be multiplied with a given stream. Parameters a : "Attack" time, in number of samples. d : "Decay" time, in number of samples. s : "Sustain" amplitude level (should be based on attack amplitude). The sustain can be a Stream, if desired. Returns ------- Stream instance yielding an endless envelope, or a finite envelope if the sustain input is a finite Stream. The attack amplitude is is 1.0. """
# Configure sustain possibilities if isinstance(s, collections.Iterable): it_s = iter(s) s = next(it_s) else: it_s = None # Attack and decay lines m_a = 1. / a m_d = (s - 1.) / d len_a = int(a + .5) len_d = int(d + .5) for sample in xrange(len_a): yield sample * m_a for sample in xrange(len_d): yield 1. + sample * m_d # Sustain! if it_s is None: while True: yield s else: for s in it_s: yield s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ones(dur=None): """ Ones stream generator. You may multiply your endless stream by this to enforce an end to it. Parameters dur : Duration, in number of samples; endless if not given. Returns ------- Stream that repeats "1.0" during a given time duration (if any) or endlessly. """
if dur is None or (isinf(dur) and dur > 0): while True: yield 1.0 for x in xrange(int(.5 + dur)): yield 1.0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def adsr(dur, a, d, s, r): """ Linear ADSR envelope. Parameters dur : Duration, in number of samples, including the release time. a : "Attack" time, in number of samples. d : "Decay" time, in number of samples. s : "Sustain" amplitude level (should be based on attack amplitude). r : "Release" time, in number of samples. Returns ------- Stream instance yielding a finite ADSR envelope, starting and finishing with 0.0, having peak value of 1.0. """
m_a = 1. / a m_d = (s - 1.) / d m_r = - s * 1. / r len_a = int(a + .5) len_d = int(d + .5) len_r = int(r + .5) len_s = int(dur + .5) - len_a - len_d - len_r for sample in xrange(len_a): yield sample * m_a for sample in xrange(len_d): yield 1. + sample * m_d for sample in xrange(len_s): yield s for sample in xrange(len_r): yield s + sample * m_r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def white_noise(dur=None, low=-1., high=1.): """ White noise stream generator. Parameters dur : Duration, in number of samples; endless if not given (or None). low, high : Lower and higher limits. Defaults to the [-1; 1] range. Returns ------- Stream yielding random numbers between -1 and 1. """
if dur is None or (isinf(dur) and dur > 0): while True: yield random.uniform(low, high) for x in xrange(rint(dur)): yield random.uniform(low, high)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sinusoid(freq, phase=0.): """ Sinusoid based on the optimized math.sin """
# When at 44100 samples / sec, 5 seconds of this leads to an error of 8e-14 # peak to peak. That's fairly enough. for n in modulo_counter(start=phase, modulo=2 * pi, step=freq): yield sin(n)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def impulse(dur=None, one=1., zero=0.): """ Impulse stream generator. Parameters dur : Duration, in number of samples; endless if not given. Returns ------- Stream that repeats "0.0" during a given time duration (if any) or endlessly, but starts with one (and only one) "1.0". """
if dur is None or (isinf(dur) and dur > 0): yield one while True: yield zero elif dur >= .5: num_samples = int(dur - .5) yield one for x in xrange(num_samples): yield zero
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def karplus_strong(freq, tau=2e4, memory=white_noise): """ Karplus-Strong "digitar" synthesis algorithm. Parameters freq : Frequency, in rad/sample. tau : Time decay (up to ``1/e``, or -8.686 dB), in number of samples. Defaults to 2e4. Be careful: using the default value will make duration different on each sample rate value. Use ``sHz`` if you need that independent from the sample rate and in seconds unit. memory : Memory data for the comb filter (delayed "output" data in memory). Defaults to the ``white_noise`` function. Returns ------- Stream instance with the synthesized data. Note ---- The fractional delays are solved by exponent linearization. See Also -------- sHz : Second and hertz constants from samples/second rate. white_noise : White noise stream generator. """
return comb.tau(2 * pi / freq, tau).linearize()(zeros(), memory=memory)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize(self): """ Returns a new table with values ranging from -1 to 1, reaching at least one of these, unless there's no data. """
max_abs = max(self.table, key=abs) if max_abs == 0: raise ValueError("Can't normalize zeros") return self / max_abs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def strategy(self, *names, **kwargs): """ StrategyDict wrapping method for adding a new strategy. Parameters *names : Positional arguments with all names (strings) that could be used to call the strategy to be added, to be used both as key items and as attribute names. keep_name : Boolean keyword-only parameter for choosing whether the ``__name__`` attribute of the decorated/wrapped function should be changed or kept. Defaults to False (i.e., changes the name by default). Returns ------- A decorator/wrapper function to be used once on the new strategy to be added. Example ------- Let's create a StrategyDict that knows its name: Add a first strategy ``swapcase``, using this method as a decorator factory: Let's do it again, but wrapping the strategy functions inline. First two strategies have multiple names, the last keeps the function name, which would otherwise be replaced by the first given name: We can now iterate through the strategies to call them or see their function names ['JUST A TEST', 'Just a Test', 'jUST A tEST', 'just a test'] ['<lambda>', 'lower', 'swapcase', 'upper'] Calling a single strategy: 'testing' 'TESTING' 'tEsTiNg' 'tEsTiNg' 'TESTING' Hint ---- Default strategy is the one stored as the ``default`` attribute, you can change or remove it at any time. When removing all keys that are assigned to the default strategy, the default attribute will be removed from the StrategyDict instance as well. The first strategy added afterwards is the one that will become the new default, unless the attribute is created or changed manually. """
def decorator(func): keep_name = kwargs.pop("keep_name", False) if kwargs: key = next(iter(kwargs)) raise TypeError("Unknown keyword argument '{}'".format(key)) if not keep_name: func.__name__ = str(names[0]) self[names] = func return self return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def call_sphinx(out_type, build_dir = "build"): """ Call the ``sphinx-build`` for the given output type and the ``make`` when the target has this possibility. Parameters out_type : A builder name for ``sphinx-build``. See the full list at `<http://sphinx-doc.org/invocation.html>`_. build_dir : Directory for storing the output. Defaults to "build". """
sphinx_string = sphinx_template.format(build_dir=build_dir, out_type=out_type) if sphinx.main(shlex.split(sphinx_string)) != 0: raise RuntimeError("Something went wrong while building '{0}'" .format(out_type)) if out_type in make_target: make_string = make_template.format(build_dir=build_dir, out_type=out_type, make_param=make_target[out_type]) call(shlex.split(make_string))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def levinson_durbin(acdata, order=None): """ Solve the Yule-Walker linear system of equations. They're given by: .. math:: R . a = r where :math:`R` is a simmetric Toeplitz matrix where each element are lags from the given autocorrelation list. :math:`R` and :math:`r` are defined (Python indexing starts with zero and slices don't include the last element): .. math:: R[i][j] = acdata[abs(j - i)] r = acdata[1 : order + 1] Parameters acdata : Autocorrelation lag list, commonly the ``acorr`` function output. order : The order of the resulting ZFilter object. Defaults to ``len(acdata) - 1``. Returns ------- A FIR filter, as a ZFilter object. The mean squared error over the given data (variance of the white noise) is in its "error" attribute. See Also -------- acorr: Calculate the autocorrelation of a given block. lpc : Calculate the Linear Predictive Coding (LPC) coefficients. parcor : Partial correlation coefficients (PARCOR), or reflection coefficients, relative to the lattice implementation of a filter, obtained by reversing the Levinson-Durbin algorithm. Examples -------- [12, 6, 0, -3, -6, -3, 0, 2, 4, 2] 1 - 0.625 * z^-1 + 0.25 * z^-2 + 0.125 * z^-3 7.875 Notes ----- The Levinson-Durbin algorithm used to solve the equations needs :math:`O(order^2)` floating point operations. """
if order is None: order = len(acdata) - 1 elif order >= len(acdata): acdata = Stream(acdata).append(0).take(order + 1) # Inner product for filters based on above statistics def inner(a, b): # Be careful, this depends on acdata !!! return sum(acdata[abs(i-j)] * ai * bj for i, ai in enumerate(a.numlist) for j, bj in enumerate(b.numlist) ) try: A = ZFilter(1) for m in xrange(1, order + 1): B = A(1 / z) * z ** -m A -= inner(A, z ** -m) / inner(B, B) * B except ZeroDivisionError: raise ParCorError("Can't find next PARCOR coefficient") A.error = inner(A, A) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_skip(app, what, name, obj, skip, options): """ Callback object chooser function for docstring documentation. """
if name in ["__doc__", "__module__", "__dict__", "__weakref__", "__abstractmethods__" ] or name.startswith("_abc_"): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup(app): """ Just connects the docstring pre_processor and should_skip functions to be applied on all docstrings. """
app.connect('autodoc-process-docstring', lambda *args: pre_processor(*args, namer=audiolazy_namer)) app.connect('autodoc-skip-member', should_skip)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def newest_file(file_iterable): """ Returns the name of the newest file given an iterable of file names. """
return max(file_iterable, key=lambda fname: os.path.getmtime(fname))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overlap_correlation(wnd, hop): """ Overlap correlation percent for the given overlap hop in samples. """
return sum(wnd * Stream(wnd).skip(hop)) / sum(el ** 2 for el in wnd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scalloping_loss(wnd): """ Positive number with the scalloping loss in dB. """
return -dB20(abs(sum(wnd * cexp(line(len(wnd), 0, -1j * pi)))) / sum(wnd))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_xdb_bin(wnd, power=.5, res=1500): """ A not so fast way to find the x-dB cutoff frequency "bin" index. Parameters wnd: The window itself as an iterable. power: The power value (squared amplitude) where the x-dB value should lie, using ``x = dB10(power)``. res : Zero-padding factor. 1 for no zero-padding, 2 for twice the length, etc.. """
spectrum = dB20(rfft(wnd, res * len(wnd))) root_at_xdb = spectrum - spectrum[0] - dB10(power) return next(i for i, el in enumerate(zcross(root_at_xdb)) if el) / res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rint(x, step=1): """ Round to integer. Parameters x : Input number (integer or float) to be rounded. step : Quantization level (defaults to 1). If set to 2, the output will be the "best" even number. Result ------ The step multiple nearest to x. When x is exactly halfway between two possible outputs, it'll result the one farthest to zero. """
div, mod = divmod(x, step) err = min(step / 10., .1) result = div * step if x > 0: result += err elif x < 0: result -= err if (operator.ge if x >= 0 else operator.gt)(2 * mod, step): result += step return int(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def blocks(seq, size=None, hop=None, padval=0.): """ General iterable blockenizer. Generator that gets ``size`` elements from ``seq``, and outputs them in a collections.deque (mutable circular queue) sequence container. Next output starts ``hop`` elements after the first element in last output block. Last block may be appended with ``padval``, if needed to get the desired size. The ``seq`` can have hybrid / hetherogeneous data, it just need to be an iterable. You can use other type content as padval (e.g. None) to help segregate the padding at the end, if desired. Note ---- When hop is less than size, changing the returned contents will keep the new changed value in the next yielded container. """
# Initialization res = deque(maxlen=size) # Circular queue idx = 0 last_idx = size - 1 if hop is None: hop = size reinit_idx = size - hop # Yields each block, keeping last values when needed if hop <= size: for el in seq: res.append(el) if idx == last_idx: yield res idx = reinit_idx else: idx += 1 # Yields each block and skips (loses) data due to hop > size else: for el in seq: if idx < 0: # Skips data idx += 1 else: res.append(el) if idx == last_idx: yield res #res = dtype() idx = size-hop else: idx += 1 # Padding to finish if idx > max(size-hop, 0): for _ in xrange(idx,size): res.append(padval) yield res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def elementwise(name="", pos=None): """ Function auto-map decorator broadcaster. Creates an "elementwise" decorator for one input parameter. To create such, it should know the name (for use as a keyword argument and the position "pos" (input as a positional argument). Without a name, only the positional argument will be used. Without both name and position, the first positional argument will be used. """
if (name == "") and (pos is None): pos = 0 def elementwise_decorator(func): """ Element-wise decorator for functions known to have 1 input and 1 output be applied directly on iterables. When made to work with more than 1 input, all "secondary" parameters will the same in all function calls (i.e., they will not even be a copy). """ @wraps(func) def wrapper(*args, **kwargs): # Find the possibly Iterable argument positional = (pos is not None) and (pos < len(args)) arg = args[pos] if positional else kwargs[name] if isinstance(arg, Iterable) and not isinstance(arg, STR_TYPES): if positional: data = (func(*(args[:pos] + (x,) + args[pos+1:]), **kwargs) for x in arg) else: data = (func(*args, **dict(it.chain(iteritems(kwargs), [(name, x)]))) for x in arg) # Generators should still return generators if isinstance(arg, SOME_GEN_TYPES): return data # Cast to numpy array or matrix, if needed, without actually # importing its package type_arg = type(arg) try: is_numpy = type_arg.__module__ == "numpy" except AttributeError: is_numpy = False if is_numpy: np_type = {"ndarray": sys.modules["numpy"].array, "matrix": sys.modules["numpy"].mat }[type_arg.__name__] return np_type(list(data)) # If it's a Stream, let's use the Stream constructor from .lazy_stream import Stream if issubclass(type_arg, Stream): return Stream(data) # Tuple, list, set, dict, deque, etc.. all falls here return type_arg(data) return func(*args, **kwargs) # wrapper returned value return wrapper # elementwise_decorator returned value return elementwise_decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def almost_eq(a, b, bits=32, tol=1, ignore_type=True, pad=0.): """ Almost equal, based on the amount of floating point significand bits. Alternative to "a == b" for float numbers and iterables with float numbers, and tests for sequence contents (i.e., an elementwise a == b, that also works with generators, nested lists, nested generators, etc.). If the type of both the contents and the containers should be tested too, set the ignore_type keyword arg to False. Default version is based on 32 bits IEEE 754 format (23 bits significand). Could use 64 bits (52 bits significand) but needs a native float type with at least that size in bits. If a and b sizes differ, at least one will be padded with the pad input value to keep going with the comparison. Note ---- Be careful with endless generators! """
if not (ignore_type or type(a) == type(b)): return False is_it_a = isinstance(a, Iterable) is_it_b = isinstance(b, Iterable) if is_it_a != is_it_b: return False if is_it_a: return all(almost_eq.bits(ai, bi, bits, tol, ignore_type) for ai, bi in xzip_longest(a, b, fillvalue=pad)) significand = {32: 23, 64: 52, 80: 63, 128: 112 }[bits] # That doesn't include the sign bit power = tol - significand - 1 return abs(a - b) <= 2 ** power * abs(a + b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cached(func): """ Cache decorator for a function without keyword arguments You can access the cache contents using the ``cache`` attribute in the resulting function, which is a dictionary mapping the arguments tuple to the previously returned function result. """
class Cache(dict): def __missing__(self, key): result = self[key] = func(*key) return result cache = Cache() f = wraps(func)(lambda *key: cache[key]) f.cache = cache return f
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_dates(text, source=False, index=False, strict=False, base_date=None): """ Extract datetime strings from text :param text: A string that contains one or more natural language or literal datetime strings :type text: str|unicode :param source: Return the original string segment :type source: boolean :param index: Return the indices where the datetime string was located in text :type index: boolean :param strict: Only return datetimes with complete date information. For example: `July 2016` of `Monday` will not return datetimes. `May 16, 2015` will return datetimes. :type strict: boolean :param base_date: Set a default base datetime when parsing incomplete dates :type base_date: datetime :return: Returns a generator that produces :mod:`datetime.datetime` objects, or a tuple with the source text and index, if requested """
date_finder = DateFinder(base_date=base_date) return date_finder.find_dates(text, source=source, index=index, strict=strict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_tzinfo(self, datetime_obj, tz_string): """ take a naive datetime and add dateutil.tz.tzinfo object :param datetime_obj: naive datetime object :return: datetime object with tzinfo """
if datetime_obj is None: return None tzinfo_match = tz.gettz(tz_string) return datetime_obj.replace(tzinfo=tzinfo_match)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_style(value): """ Validate a logging format style. :param value: The logging format style to validate (any value). :returns: The logging format character (a string of one character). :raises: :exc:`~exceptions.ValueError` when the given style isn't supported. On Python 3.2+ this function accepts the logging format styles ``%``, ``{`` and ``$`` while on older versions only ``%`` is accepted (because older Python versions don't support alternative logging format styles). """
if sys.version_info[:2] >= (3, 2): if value not in FORMAT_STYLE_PATTERNS: msg = "Unsupported logging format style! (%r)" raise ValueError(format(msg, value)) elif value != DEFAULT_FORMAT_STYLE: msg = "Format string styles other than %r require Python 3.2+!" raise ValueError(msg, DEFAULT_FORMAT_STYLE) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def increase_verbosity(): """ Increase the verbosity of the root handler by one defined level. Understands custom logging levels like defined by my ``verboselogs`` module. """
defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = max(0, current_index - 1) set_level(defined_levels[selected_index])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decrease_verbosity(): """ Decrease the verbosity of the root handler by one defined level. Understands custom logging levels like defined by my ``verboselogs`` module. """
defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = min(current_index + 1, len(defined_levels) - 1) set_level(defined_levels[selected_index])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_level(): """ Get the logging level of the root handler. :returns: The logging level of the root handler (an integer) or :data:`DEFAULT_LOG_LEVEL` (if no root handler exists). """
handler, logger = find_handler(logging.getLogger(), match_stream_handler) return handler.level if handler else DEFAULT_LOG_LEVEL
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_level(level): """ Set the logging level of the root handler. :param level: The logging level to filter on (an integer or string). If no root handler exists yet this automatically calls :func:`install()`. """
handler, logger = find_handler(logging.getLogger(), match_stream_handler) if handler and logger: # Change the level of the existing handler. handler.setLevel(level_to_number(level)) # Adjust the level of the selected logger. adjust_level(logger, level) else: # Create a new handler with the given level. install(level=level)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def adjust_level(logger, level): """ Increase a logger's verbosity up to the requested level. :param logger: The logger to change (a :class:`~logging.Logger` object). :param level: The log level to enable (a string or number). This function is used by functions like :func:`install()`, :func:`increase_verbosity()` and :func:`.enable_system_logging()` to adjust a logger's level so that log messages up to the requested log level are propagated to the configured output handler(s). It uses :func:`logging.Logger.getEffectiveLevel()` to check whether `logger` propagates or swallows log messages of the requested `level` and sets the logger's level to the requested level if it would otherwise swallow log messages. Effectively this function will "widen the scope of logging" when asked to do so but it will never "narrow the scope of logging". This is because I am convinced that filtering of log messages should (primarily) be decided by handlers. """
level = level_to_number(level) if logger.getEffectiveLevel() > level: logger.setLevel(level)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_defined_levels(): """ Find the defined logging levels. :returns: A dictionary with level names as keys and integers as values. Here's what the result looks like by default (when no custom levels or level names have been defined): {'NOTSET': 0, 'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'WARNING': 30, 'ERROR': 40, 'FATAL': 50, 'CRITICAL': 50} """
defined_levels = {} for name in dir(logging): if name.isupper(): value = getattr(logging, name) if isinstance(value, int): defined_levels[name] = value return defined_levels
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def level_to_number(value): """ Coerce a logging level name to a number. :param value: A logging level (integer or string). :returns: The number of the log level (an integer). This function translates log level names into their numeric values. The :mod:`logging` module does this for us on Python 2.7 and 3.4 but fails to do so on Python 2.6 which :mod:`coloredlogs` still supports. """
if is_string(value): try: defined_levels = find_defined_levels() value = defined_levels[value.upper()] except KeyError: # Don't fail on unsupported log levels. value = DEFAULT_LOG_LEVEL return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_level_aliases(): """ Find log level names which are aliases of each other. :returns: A dictionary that maps aliases to their canonical name. .. note:: Canonical names are chosen to be the alias with the longest string length so that e.g. ``WARN`` is an alias for ``WARNING`` instead of the other way around. Here's what the result looks like by default (when no custom levels or level names have been defined): {'WARN': 'WARNING', 'FATAL': 'CRITICAL'} """
mapping = collections.defaultdict(list) for name, value in find_defined_levels().items(): mapping[value].append(name) aliases = {} for value, names in mapping.items(): if len(names) > 1: names = sorted(names, key=lambda n: len(n)) canonical_name = names.pop() for alias in names: aliases[alias] = canonical_name return aliases
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_encoded_styles(text, normalize_key=None): """ Parse text styles encoded in a string into a nested data structure. :param text: The encoded styles (a string). :returns: A dictionary in the structure of the :data:`DEFAULT_FIELD_STYLES` and :data:`DEFAULT_LEVEL_STYLES` dictionaries. Here's an example of how this function works: {'debug': {'color': 'green'}, 'warning': {'color': 'yellow'}, 'error': {'color': 'red'}, 'critical': {'bold': True, 'color': 'red'}} """
parsed_styles = {} for assignment in split(text, ';'): name, _, styles = assignment.partition('=') target = parsed_styles.setdefault(name, {}) for token in split(styles, ','): # When this code was originally written, setting background colors # wasn't supported yet, so there was no need to disambiguate # between the text color and background color. This explains why # a color name or number implies setting the text color (for # backwards compatibility). if token.isdigit(): target['color'] = int(token) elif token in ANSI_COLOR_CODES: target['color'] = token elif '=' in token: name, _, value = token.partition('=') if name in ('color', 'background'): if value.isdigit(): target[name] = int(value) elif value in ANSI_COLOR_CODES: target[name] = value else: target[token] = True return parsed_styles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_hostname(use_chroot=True): """ Find the host name to include in log messages. :param use_chroot: Use the name of the chroot when inside a chroot? (boolean, defaults to :data:`True`) :returns: A suitable host name (a string). Looks for :data:`CHROOT_FILES` that have a nonempty first line (taken to be the chroot name). If none are found then :func:`socket.gethostname()` is used as a fall back. """
for chroot_file in CHROOT_FILES: try: with open(chroot_file) as handle: first_line = next(handle) name = first_line.strip() if name: return name except Exception: pass return socket.gethostname()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_program_name(): """ Select a suitable program name to embed in log messages. :returns: One of the following strings (in decreasing order of preference): 1. The base name of the currently running Python program or script (based on the value at index zero of :data:`sys.argv`). 2. The base name of the Python executable (based on :data:`sys.executable`). 3. The string 'python'. """
# Gotcha: sys.argv[0] is '-c' if Python is started with the -c option. return ((os.path.basename(sys.argv[0]) if sys.argv and sys.argv[0] != '-c' else '') or (os.path.basename(sys.executable) if sys.executable else '') or 'python')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace_handler(logger, match_handler, reconfigure): """ Prepare to replace a handler. :param logger: Refer to :func:`find_handler()`. :param match_handler: Refer to :func:`find_handler()`. :param reconfigure: :data:`True` if an existing handler should be replaced, :data:`False` otherwise. :returns: A tuple of two values: 1. The matched :class:`~logging.Handler` object or :data:`None` if no handler was matched. 2. The :class:`~logging.Logger` to which the matched handler was attached or the logger given to :func:`replace_handler()`. """
handler, other_logger = find_handler(logger, match_handler) if handler and other_logger and reconfigure: # Remove the existing handler from the logger that its attached to # so that we can install a new handler that behaves differently. other_logger.removeHandler(handler) # Switch to the logger that the existing handler was attached to so # that reconfiguration doesn't narrow the scope of logging. logger = other_logger return handler, logger
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def walk_propagation_tree(logger): """ Walk through the propagation hierarchy of the given logger. :param logger: The logger whose hierarchy to walk (a :class:`~logging.Logger` object). :returns: A generator of :class:`~logging.Logger` objects. .. note:: This uses the undocumented :class:`logging.Logger.parent` attribute to find higher level loggers, however it won't raise an exception if the attribute isn't available. """
while isinstance(logger, logging.Logger): # Yield the logger to our caller. yield logger # Check if the logger has propagation enabled. if logger.propagate: # Continue with the parent logger. We use getattr() because the # `parent' attribute isn't documented so properly speaking we # shouldn't break if it's not available. logger = getattr(logger, 'parent', None) else: # The propagation chain stops here. logger = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def colorize_format(self, fmt, style=DEFAULT_FORMAT_STYLE): """ Rewrite a logging format string to inject ANSI escape sequences. :param fmt: The log format string. :param style: One of the characters ``%``, ``{`` or ``$`` (defaults to :data:`DEFAULT_FORMAT_STYLE`). :returns: The logging format string with ANSI escape sequences. This method takes a logging format string like the ones you give to :class:`logging.Formatter` and processes it as follows: 1. First the logging format string is separated into formatting directives versus surrounding text (according to the given `style`). 2. Then formatting directives and surrounding text are grouped based on whitespace delimiters (in the surrounding text). 3. For each group styling is selected as follows: 1. If the group contains a single formatting directive that has a style defined then the whole group is styled accordingly. 2. If the group contains multiple formatting directives that have styles defined then each formatting directive is styled individually and surrounding text isn't styled. As an example consider the default log format (:data:`DEFAULT_LOG_FORMAT`): : %(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s The default field styles (:data:`DEFAULT_FIELD_STYLES`) define a style for the `name` field but not for the `process` field, however because both fields are part of the same whitespace delimited token they'll be highlighted together in the style defined for the `name` field. """
result = [] parser = FormatStringParser(style=style) for group in parser.get_grouped_pairs(fmt): applicable_styles = [self.nn.get(self.field_styles, token.name) for token in group if token.name] if sum(map(bool, applicable_styles)) == 1: # If exactly one (1) field style is available for the group of # tokens then all of the tokens will be styled the same way. # This provides a limited form of backwards compatibility with # the (intended) behavior of coloredlogs before the release of # version 10. result.append(ansi_wrap( ''.join(token.text for token in group), **next(s for s in applicable_styles if s) )) else: for token in group: text = token.text if token.name: field_styles = self.nn.get(self.field_styles, token.name) if field_styles: text = ansi_wrap(text, **field_styles) result.append(text) return ''.join(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format(self, record): """ Apply level-specific styling to log records. :param record: A :class:`~logging.LogRecord` object. :returns: The result of :func:`logging.Formatter.format()`. This method injects ANSI escape sequences that are specific to the level of each log record (because such logic cannot be expressed in the syntax of a log format string). It works by making a copy of the log record, changing the `msg` field inside the copy and passing the copy into the :func:`~logging.Formatter.format()` method of the base class. """
style = self.nn.get(self.level_styles, record.levelname) # After the introduction of the `Empty' class it was reported in issue # 33 that format() can be called when `Empty' has already been garbage # collected. This explains the (otherwise rather out of place) `Empty # is not None' check in the following `if' statement. The reasoning # here is that it's much better to log a message without formatting # then to raise an exception ;-). # # For more details refer to issue 33 on GitHub: # https://github.com/xolox/python-coloredlogs/issues/33 if style and Empty is not None: # Due to the way that Python's logging module is structured and # documented the only (IMHO) clean way to customize its behavior is # to change incoming LogRecord objects before they get to the base # formatter. However we don't want to break other formatters and # handlers, so we copy the log record. # # In the past this used copy.copy() but as reported in issue 29 # (which is reproducible) this can cause deadlocks. The following # Python voodoo is intended to accomplish the same thing as # copy.copy() without all of the generalization and overhead that # we don't need for our -very limited- use case. # # For more details refer to issue 29 on GitHub: # https://github.com/xolox/python-coloredlogs/issues/29 copy = Empty() copy.__class__ = ( self.log_record_factory() if self.log_record_factory is not None else logging.LogRecord ) copy.__dict__.update(record.__dict__) copy.msg = ansi_wrap(coerce_string(record.msg), **style) record = copy # Delegate the remaining formatting to the base formatter. return logging.Formatter.format(self, record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pairs(self, format_string): """ Tokenize a logging format string and extract field names from tokens. :param format_string: The logging format string. :returns: A generator of :class:`FormatStringToken` objects. """
for token in self.get_tokens(format_string): match = self.name_pattern.search(token) name = match.group(1) if match else None yield FormatStringToken(name=name, text=token)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pattern(self, field_name): """ Get a regular expression to match a formatting directive that references the given field name. :param field_name: The name of the field to match (a string). :returns: A compiled regular expression object. """
return re.compile(self.raw_pattern.replace(r'\w+', field_name), re.VERBOSE)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tokens(self, format_string): """ Tokenize a logging format string. :param format_string: The logging format string. :returns: A list of strings with formatting directives separated from surrounding text. """
return [t for t in self.tokenize_pattern.split(format_string) if t]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_name(self, name): """ Normalize a field or level name. :param name: The field or level name (a string). :returns: The normalized name (a string). Transforms all strings to lowercase and resolves level name aliases (refer to :func:`find_level_aliases()`) to their canonical name: | DEBUG | debug | | INFO | info | | WARN | warning | | WARNING | warning | | ERROR | error | | FATAL | critical | | CRITICAL | critical | """
name = name.lower() if name in self.aliases: name = self.aliases[name] return name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Command line interface for the ``coloredlogs`` program."""
actions = [] try: # Parse the command line arguments. options, arguments = getopt.getopt(sys.argv[1:], 'cdh', [ 'convert', 'to-html', 'demo', 'help', ]) # Map command line options to actions. for option, value in options: if option in ('-c', '--convert', '--to-html'): actions.append(functools.partial(convert_command_output, *arguments)) arguments = [] elif option in ('-d', '--demo'): actions.append(demonstrate_colored_logging) elif option in ('-h', '--help'): usage(__doc__) return else: assert False, "Programming error: Unhandled option!" if not actions: usage(__doc__) return except Exception as e: warning("Error: %s", e) sys.exit(1) for function in actions: function()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def capture(command, encoding='UTF-8'): """ Capture the output of an external command as if it runs in an interactive terminal. :param command: The command name and its arguments (a list of strings). :param encoding: The encoding to use to decode the output (a string). :returns: The output of the command. This function runs an external command under ``script`` (emulating an interactive terminal) to capture the output of the command as if it was running in an interactive terminal (including ANSI escape sequences). """
with open(os.devnull, 'wb') as dev_null: # We start by invoking the `script' program in a form that is supported # by the Linux implementation [1] but fails command line validation on # the MacOS (BSD) implementation [2]: The command is specified using # the -c option and the typescript file is /dev/null. # # [1] http://man7.org/linux/man-pages/man1/script.1.html # [2] https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man1/script.1.html command_line = ['script', '-qc', ' '.join(map(pipes.quote, command)), '/dev/null'] script = subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=dev_null) stdout, stderr = script.communicate() if script.returncode == 0: # If `script' succeeded we assume that it understood our command line # invocation which means it's the Linux implementation (in this case # we can use standard output instead of a temporary file). output = stdout.decode(encoding) else: # If `script' failed we assume that it didn't understand our command # line invocation which means it's the MacOS (BSD) implementation # (in this case we need a temporary file because the command line # interface requires it). fd, temporary_file = tempfile.mkstemp(prefix='coloredlogs-', suffix='-capture.txt') try: command_line = ['script', '-q', temporary_file] + list(command) subprocess.Popen(command_line, stdout=dev_null, stderr=dev_null).wait() with codecs.open(temporary_file, 'r', encoding) as handle: output = handle.read() finally: os.unlink(temporary_file) # On MacOS when standard input is /dev/null I've observed # the captured output starting with the characters '^D': # # $ script -q capture.txt echo example </dev/null # example # $ xxd capture.txt # 00000000: 5e44 0808 6578 616d 706c 650d 0a ^D..example.. # # I'm not sure why this is here, although I suppose it has to do # with ^D in caret notation signifying end-of-file [1]. What I do # know is that this is an implementation detail that callers of the # capture() function shouldn't be bothered with, so we strip it. # # [1] https://en.wikipedia.org/wiki/End-of-file if output.startswith(b'^D'): output = output[2:] # Clean up backspace and carriage return characters and the 'erase line' # ANSI escape sequence and return the output as a Unicode string. return u'\n'.join(clean_terminal_output(output))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(text, code=True, tabsize=4): """ Convert text with ANSI escape sequences to HTML. :param text: The text with ANSI escape sequences (a string). :param code: Whether to wrap the returned HTML fragment in a to :data:`True`). :param tabsize: Refer to :func:`str.expandtabs()` for details. :returns: The text converted to HTML (a string). """
output = [] in_span = False compatible_text_styles = { # The following ANSI text styles have an obvious mapping to CSS. ANSI_TEXT_STYLES['bold']: {'font-weight': 'bold'}, ANSI_TEXT_STYLES['strike_through']: {'text-decoration': 'line-through'}, ANSI_TEXT_STYLES['underline']: {'text-decoration': 'underline'}, } for token in TOKEN_PATTERN.split(text): if token.startswith(('http://', 'https://', 'www.')): url = token if '://' in token else ('http://' + token) token = u'<a href="%s" style="color:inherit">%s</a>' % (html_encode(url), html_encode(token)) elif token.startswith(ANSI_CSI): ansi_codes = token[len(ANSI_CSI):-1].split(';') if all(c.isdigit() for c in ansi_codes): ansi_codes = list(map(int, ansi_codes)) # First we check for a reset code to close the previous <span> # element. As explained on Wikipedia [1] an absence of codes # implies a reset code as well: "No parameters at all in ESC[m acts # like a 0 reset code". # [1] https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences if in_span and (0 in ansi_codes or not ansi_codes): output.append('</span>') in_span = False # Now we're ready to generate the next <span> element (if any) in # the knowledge that we're emitting opening <span> and closing # </span> tags in the correct order. styles = {} is_faint = (ANSI_TEXT_STYLES['faint'] in ansi_codes) is_inverse = (ANSI_TEXT_STYLES['inverse'] in ansi_codes) while ansi_codes: number = ansi_codes.pop(0) # Try to match a compatible text style. if number in compatible_text_styles: styles.update(compatible_text_styles[number]) continue # Try to extract a text and/or background color. text_color = None background_color = None if 30 <= number <= 37: # 30-37 sets the text color from the eight color palette. text_color = EIGHT_COLOR_PALETTE[number - 30] elif 40 <= number <= 47: # 40-47 sets the background color from the eight color palette. background_color = EIGHT_COLOR_PALETTE[number - 40] elif 90 <= number <= 97: # 90-97 sets the text color from the high-intensity eight color palette. text_color = BRIGHT_COLOR_PALETTE[number - 90] elif 100 <= number <= 107: # 100-107 sets the background color from the high-intensity eight color palette. background_color = BRIGHT_COLOR_PALETTE[number - 100] elif number in (38, 39) and len(ansi_codes) >= 2 and ansi_codes[0] == 5: # 38;5;N is a text color in the 256 color mode palette, # 39;5;N is a background color in the 256 color mode palette. try: # Consume the 5 following 38 or 39. ansi_codes.pop(0) # Consume the 256 color mode color index. color_index = ansi_codes.pop(0) # Set the variable to the corresponding HTML/CSS color. if number == 38: text_color = EXTENDED_COLOR_PALETTE[color_index] elif number == 39: background_color = EXTENDED_COLOR_PALETTE[color_index] except (ValueError, IndexError): pass # Apply the 'faint' or 'inverse' text style # by manipulating the selected color(s). if text_color and is_inverse: # Use the text color as the background color and pick a # text color that will be visible on the resulting # background color. background_color = text_color text_color = select_text_color(*parse_hex_color(text_color)) if text_color and is_faint: # Because I wasn't sure how to implement faint colors # based on normal colors I looked at how gnome-terminal # (my terminal of choice) handles this and it appears # to just pick a somewhat darker color. text_color = '#%02X%02X%02X' % tuple( max(0, n - 40) for n in parse_hex_color(text_color) ) if text_color: styles['color'] = text_color if background_color: styles['background-color'] = background_color if styles: token = '<span style="%s">' % ';'.join(k + ':' + v for k, v in sorted(styles.items())) in_span = True else: token = '' else: token = html_encode(token) output.append(token) html = ''.join(output) html = encode_whitespace(html, tabsize) if code: html = '<code>%s</code>' % html return html
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode_whitespace(text, tabsize=4): """ Encode whitespace so that web browsers properly render it. :param text: The plain text (a string). :param tabsize: Refer to :func:`str.expandtabs()` for details. :returns: The text converted to HTML (a string). The purpose of this function is to encode whitespace in such a way that web browsers render the same whitespace regardless of whether 'preformatted' .. note:: While the string manipulation performed by this function is specifically intended not to corrupt the HTML generated by :func:`convert()` it definitely does have the potential to corrupt HTML from other sources. You have been warned :-). """
# Convert Windows line endings (CR+LF) to UNIX line endings (LF). text = text.replace('\r\n', '\n') # Convert UNIX line endings (LF) to HTML line endings (<br>). text = text.replace('\n', '<br>\n') # Convert tabs to spaces. text = text.expandtabs(tabsize) # Convert leading spaces (that is to say spaces at the start of the string # and/or directly after a line ending) into non-breaking spaces, otherwise # HTML rendering engines will simply ignore these spaces. text = re.sub(INDENT_PATTERN, encode_whitespace_cb, text) # The conversion of leading spaces we just did misses a corner case where a # line starts with an HTML tag but the first visible text is a space. Web # browsers seem to ignore these spaces, so we need to convert them. text = re.sub(TAG_INDENT_PATTERN, r'\1&nbsp;', text) # Convert runs of multiple spaces into non-breaking spaces to avoid HTML # rendering engines from visually collapsing runs of spaces into a single # space. We specifically don't replace single spaces for several reasons: # 1. We'd break the HTML emitted by convert() by replacing spaces # inside HTML elements (for example the spaces that separate # element names from attribute names). # 2. If every single space is replaced by a non-breaking space, # web browsers perform awkwardly unintuitive word wrapping. # 3. The HTML output would be bloated for no good reason. text = re.sub(' {2,}', encode_whitespace_cb, text) return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def html_encode(text): """ Encode characters with a special meaning as HTML. :param text: The plain text (a string). :returns: The text converted to HTML (a string). """
text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') text = text.replace('"', '&quot;') return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_hex_color(value): """ Convert a CSS color in hexadecimal notation into its R, G, B components. :param value: A CSS color in hexadecimal notation (a string like '#000000'). :return: A tuple with three integers (with values between 0 and 255) corresponding to the R, G and B components of the color. :raises: :exc:`~exceptions.ValueError` on values that can't be parsed. """
if value.startswith('#'): value = value[1:] if len(value) == 3: return ( int(value[0] * 2, 16), int(value[1] * 2, 16), int(value[2] * 2, 16), ) elif len(value) == 6: return ( int(value[0:2], 16), int(value[2:4], 16), int(value[4:6], 16), ) else: raise ValueError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_syslog_address(): """ Find the most suitable destination for system log messages. :returns: The pathname of a log device (a string) or an address/port tuple as supported by :class:`~logging.handlers.SysLogHandler`. On Mac OS X this prefers :data:`LOG_DEVICE_MACOSX`, after that :data:`LOG_DEVICE_UNIX` is checked for existence. If both of these device files don't exist the default used by :class:`~logging.handlers.SysLogHandler` is returned. """
if sys.platform == 'darwin' and os.path.exists(LOG_DEVICE_MACOSX): return LOG_DEVICE_MACOSX elif os.path.exists(LOG_DEVICE_UNIX): return LOG_DEVICE_UNIX else: return 'localhost', logging.handlers.SYSLOG_UDP_PORT
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_screenshots(): """Generate screenshots from shell scripts."""
this_script = os.path.abspath(__file__) this_directory = os.path.dirname(this_script) repository = os.path.join(this_directory, os.pardir) examples_directory = os.path.join(repository, 'docs', 'examples') images_directory = os.path.join(repository, 'docs', 'images') for shell_script in sorted(glob.glob(os.path.join(examples_directory, '*.sh'))): basename, extension = os.path.splitext(os.path.basename(shell_script)) image_file = os.path.join(images_directory, '%s.png' % basename) logger.info("Generating %s by running %s ..", format_path(image_file), format_path(shell_script)) command_line = [sys.executable, __file__, shell_script] random_title = random_string(25) # Generate the urxvt command line. urxvt_command = [ 'urxvt', # Enforce a default geometry. '-geometry', '98x30', # Set the text and background color. '-fg', TEXT_COLOR, '-bg', BACKGROUND_COLOR, # Set the font name and pixel size. '-fn', 'xft:%s:pixelsize=%i' % (FONT_NAME, FONT_SIZE), # Set the window title. '-title', random_title, # Hide scrollbars. '+sb', ] if which('qtile-run'): # I've been using tiling window managers for years now, at the # moment 'qtile' is my window manager of choice. It requires the # following special handling to enable the 'urxvt' window to float, # which in turn enables it to respect the '--geometry' option. urxvt_command.insert(0, 'qtile-run') urxvt_command.insert(1, '-f') # Apply the Ubuntu color scheme to urxvt. for index, css_color in enumerate(EIGHT_COLOR_PALETTE): urxvt_command.extend(('--color%i' % index, css_color)) # Add the command that should run inside the terminal. urxvt_command.extend(('-e', 'sh', '-c', 'setterm -cursor off; %s' % quote(command_line))) # Launch urxvt. execute(*urxvt_command, async=True) # Make sure we close the urxvt window. try: # Wait for urxvt to start up. If I were to improve this I could # instead wait for the creation of a file by interpret_script(). time.sleep(10) # Take a screen shot of the window using ImageMagick. execute('import', '-window', random_title, image_file) # Auto-trim the screen shot, then give it a 5px border. execute('convert', image_file, '-trim', '-bordercolor', BACKGROUND_COLOR, '-border', '5', image_file) finally: execute('wmctrl', '-c', random_title)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interpret_script(shell_script): """Make it appear as if commands are typed into the terminal."""
with CaptureOutput() as capturer: shell = subprocess.Popen(['bash', '-'], stdin=subprocess.PIPE) with open(shell_script) as handle: for line in handle: sys.stdout.write(ansi_wrap('$', color='green') + ' ' + line) sys.stdout.flush() shell.stdin.write(line) shell.stdin.flush() shell.stdin.close() time.sleep(12) # Get the text that was shown in the terminal. captured_output = capturer.get_text() # Store the text that was shown in the terminal. filename, extension = os.path.splitext(shell_script) transcript_file = '%s.txt' % filename logger.info("Updating %s ..", format_path(transcript_file)) with open(transcript_file, 'w') as handle: handle.write(ansi_strip(captured_output))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_version(*args): """Extract the version number from a Python module."""
contents = get_contents(*args) metadata = dict(re.findall('__([a-z]+)__ = [\'"]([^\'"]+)', contents)) return metadata['version']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def have_environment_marker_support(): """ Check whether setuptools has support for PEP-426 environment marker support. Based on the ``setup.py`` script of the ``pytest`` package: https://bitbucket.org/pytest-dev/pytest/src/default/setup.py """
try: from pkg_resources import parse_version from setuptools import __version__ return parse_version(__version__) >= parse_version('0.7.2') except Exception: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _log_http_event(response): """ It will create a log event as werkzeug but at the end of request holding the request-id Intended usage is a handler of Flask.after_request :return: The same response object """
logger.info( '{ip} - - "{method} {path} {status_code}"'.format( ip=request.remote_addr, method=request.method, path=request.path, status_code=response.status_code) ) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_valid_time(time, step): """ Return dimensions and data of the valid_time corresponding to the given ``time`` and ``step``. The data is seconds from the same epoch as ``time`` and may have one or two dimensions. :param time: given in seconds from an epoch, as returned by ``from_grib_date_time`` :param step: given in hours, as returned by ``from_grib_step`` """
step_s = step * 3600 if len(time.shape) == 0 and len(step.shape) == 0: data = time + step_s dims = () # type: T.Tuple[str, ...] elif len(time.shape) > 0 and len(step.shape) == 0: data = time + step_s dims = ('time',) elif len(time.shape) == 0 and len(step.shape) > 0: data = time + step_s dims = ('step',) else: data = time[:, None] + step_s[None, :] dims = ('time', 'step') return dims, data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_file(path, grib_errors='warn', **kwargs): """Open a GRIB file as a ``cfgrib.Dataset``."""
if 'mode' in kwargs: warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning) kwargs.pop('mode') stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors) return Dataset(*build_dataset_components(stream, **kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_dataset(path, **kwargs): # type: (str, T.Any) -> xr.Dataset """ Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file. """
if 'engine' in kwargs and kwargs['engine'] != 'cfgrib': raise ValueError("only engine=='cfgrib' is supported") kwargs['engine'] = 'cfgrib' return xr.backends.api.open_dataset(path, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_datasets(path, backend_kwargs={}, no_warn=False, **kwargs): # type: (str, T.Dict[str, T.Any], bool, T.Any) -> T.List[xr.Dataset] """ Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics. """
if not no_warn: warnings.warn("open_datasets is an experimental API, DO NOT RELY ON IT!", FutureWarning) fbks = [] datasets = [] try: datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs)) except DatasetBuildError as ex: fbks.extend(ex.args[2]) # NOTE: the recursive call needs to stay out of the exception handler to avoid showing # to the user a confusing error message due to exception chaining for fbk in fbks: bks = backend_kwargs.copy() bks['filter_by_keys'] = fbk datasets.extend(open_datasets(path, backend_kwargs=bks, no_warn=True, **kwargs)) return datasets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_size(handle, key): # type: (cffi.FFI.CData, str) -> int """ Get the number of coded value from a key. If several keys of the same name are present, the total sum is returned. :param bytes key: the keyword to get the size of :rtype: int """
size = ffi.new('size_t *') _codes_get_size(handle, key.encode(ENC), size) return size[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_string_length(handle, key): # type: (cffi.FFI.CData, str) -> int """ Get the length of the string representation of the key. If several keys of the same name are present, the maximum length is returned. :param bytes key: the keyword to get the string representation size of. :rtype: int """
size = ffi.new('size_t *') _codes_get_length(handle, key.encode(ENC), size) return size[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_bytes_array(handle, key, size): # type: (cffi.FFI.CData, str, int) -> T.List[int] """ Get unsigned chars array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int) """
values = ffi.new('unsigned char[]', size) size_p = ffi.new('size_t *', size) _codes_get_bytes(handle, key.encode(ENC), values, size_p) return list(values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_long_array(handle, key, size): # type: (cffi.FFI.CData, str, int) -> T.List[int] """ Get long array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int) """
values = ffi.new('long[]', size) size_p = ffi.new('size_t *', size) _codes_get_long_array(handle, key.encode(ENC), values, size_p) return list(values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_double_array(handle, key, size): # type: (cffi.FFI.CData, str, int) -> T.List[float] """ Get double array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List(float) """
values = ffi.new('double[]', size) size_p = ffi.new('size_t *', size) _codes_get_double_array(handle, key.encode(ENC), values, size_p) return list(values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_string_array(handle, key, size, length=None): # type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes] """ Get string array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List[bytes] """
if length is None: length = codes_get_string_length(handle, key) values_keepalive = [ffi.new('char[]', length) for _ in range(size)] values = ffi.new('char*[]', values_keepalive) size_p = ffi.new('size_t *', size) _codes_get_string_array(handle, key.encode(ENC), values, size_p) return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_string(handle, key, length=None): # type: (cffi.FFI.CData, str, int) -> str """ Get string element from a key. It may or may not fail in case there are more than one key in a message. Outputs the last element. :param bytes key: the keyword to select the value of :param bool strict: flag to select if the method should fail in case of more than one key in single message :rtype: bytes """
if length is None: length = codes_get_string_length(handle, key) values = ffi.new('char[]', length) length_p = ffi.new('size_t *', length) _codes_get_string = check_return(lib.codes_get_string) _codes_get_string(handle, key.encode(ENC), values, length_p) return ffi.string(values, length_p[0]).decode(ENC)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_get_api_version(): """ Get the API version. Returns the version of the API as a string in the format "major.minor.revision". """
ver = lib.codes_get_api_version() patch = ver % 100 ver = ver // 100 minor = ver % 100 major = ver // 100 return "%d.%d.%d" % (major, minor, patch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def codes_write(handle, outfile): # type: (cffi.FFI.CData, T.BinaryIO) -> None """ Write a coded message to a file. If the file does not exist, it is created. :param str path: (optional) the path to the GRIB file; defaults to the one of the open index. """
mess = ffi.new('const void **') mess_len = ffi.new('size_t*') codes_get_message = check_return(lib.codes_get_message) codes_get_message(handle, mess, mess_len) message = ffi.buffer(mess[0], size=mess_len[0]) outfile.write(message)