func_code_string
stringlengths
52
1.94M
func_documentation_string
stringlengths
1
47.2k
def transpose(self, semitone): for track in self.tracks: if not track.is_drum: track.transpose(semitone)
Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ---------- semitone : int The number of semitones to transpose the pianorolls.
def trim_trailing_silence(self): active_length = self.get_active_length() for track in self.tracks: track.pianoroll = track.pianoroll[:active_length]
Trim the trailing silences of the pianorolls of all tracks. Trailing silences are considered globally.
def write(self, filename): if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')): filename = filename + '.mid' pm = self.to_pretty_midi() pm.write(filename)
Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written.
def check_pianoroll(arr): if not isinstance(arr, np.ndarray): raise TypeError("`arr` must be of np.ndarray type") if not (np.issubdtype(arr.dtype, np.bool_) or np.issubdtype(arr.dtype, np.number)): return False if arr.ndim != 2: return False if arr.shape[1] != 12...
Return True if the array is a standard piano-roll matrix. Otherwise, return False. Raise TypeError if the input object is not a numpy array.
def binarize(obj, threshold=0): _check_supported(obj) copied = deepcopy(obj) copied.binarize(threshold) return copied
Return a copy of the object with binarized piano-roll(s). Parameters ---------- threshold : int or float Threshold to binarize the piano-roll(s). Default to zero.
def clip(obj, lower=0, upper=127): _check_supported(obj) copied = deepcopy(obj) copied.clip(lower, upper) return copied
Return a copy of the object with piano-roll(s) clipped by a lower bound and an upper bound specified by `lower` and `upper`, respectively. Parameters ---------- lower : int or float The lower bound to clip the piano-roll. Default to 0. upper : int or float The upper bound to clip th...
def pad(obj, pad_length): _check_supported(obj) copied = deepcopy(obj) copied.pad(pad_length) return copied
Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros.
def pad_to_multiple(obj, factor): _check_supported(obj) copied = deepcopy(obj) copied.pad_to_multiple(factor) return copied
Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting piano-roll will be...
def pad_to_same(obj): if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") copied = deepcopy(obj) copied.pad_to_same() return copied
Return a copy of the object with shorter piano-rolls padded with zeros at the end along the time axis to the length of the piano-roll with the maximal length.
def parse(filepath, beat_resolution=24, name='unknown'): if not filepath.endswith(('.mid', '.midi', '.MID', '.MIDI')): raise ValueError("Only MIDI files are supported") return Multitrack(filepath, beat_resolution=beat_resolution, name=name)
Return a :class:`pypianoroll.Multitrack` object loaded from a MIDI (.mid, .midi, .MID, .MIDI) file. Parameters ---------- filepath : str The file path to the MIDI file.
def save(filepath, obj, compressed=True): if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.save(filepath, compressed)
Save the object to a .npz file. Parameters ---------- filepath : str The path to save the file. obj: `pypianoroll.Multitrack` objects The object to be saved.
def transpose(obj, semitone): _check_supported(obj) copied = deepcopy(obj) copied.transpose(semitone) return copied
Return a copy of the object with piano-roll(s) transposed by `semitones` semitones. Parameters ---------- semitone : int Number of semitones to transpose the piano-roll(s).
def trim_trailing_silence(obj): _check_supported(obj) copied = deepcopy(obj) length = copied.get_active_length() copied.pianoroll = copied.pianoroll[:length] return copied
Return a copy of the object with trimmed trailing silence of the piano-roll(s).
def write(obj, filepath): if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.write(filepath)
Write the object to a MIDI file. Parameters ---------- filepath : str The path to write the MIDI file.
def _validate_pianoroll(pianoroll): if not isinstance(pianoroll, np.ndarray): raise TypeError("`pianoroll` must be of np.ndarray type.") if not (np.issubdtype(pianoroll.dtype, np.bool_) or np.issubdtype(pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` ...
Raise an error if the input array is not a standard pianoroll.
def _to_chroma(pianoroll): _validate_pianoroll(pianoroll) reshaped = pianoroll[:, :120].reshape(-1, 12, 10) reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8) return np.sum(reshaped, 1)
Return the unnormalized chroma features of a pianoroll.
def empty_beat_rate(pianoroll, beat_resolution): _validate_pianoroll(pianoroll) reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1]) n_empty_beats = np.count_nonzero(reshaped.any(1)) return n_empty_beats / len(reshaped)
Return the ratio of empty beats to the total number of beats in a pianoroll.
def n_pitche_classes_used(pianoroll): _validate_pianoroll(pianoroll) chroma = _to_chroma(pianoroll) return np.count_nonzero(np.any(chroma, 0))
Return the number of unique pitch classes used in a pianoroll.
def qualified_note_rate(pianoroll, threshold=2): _validate_pianoroll(pianoroll) if np.issubdtype(pianoroll.dtype, np.bool_): pianoroll = pianoroll.astype(np.uint8) padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant') diff = np.diff(padded, axis=0).reshape(-1) onsets = (diff > 0).non...
Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll.
def polyphonic_rate(pianoroll, threshold=2): _validate_pianoroll(pianoroll) n_poly = np.count_nonzero(np.count_nonzero(pianoroll, 1) > threshold) return n_poly / len(pianoroll)
Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll.
def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1): if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24): raise ValueError("Unsupported beat resolution. Only 4, 6, 8 ,9, 12, " "16, 18, 42 are supported.") _validate_pianoroll(pianoroll) def _drum_patte...
Return the ratio of the number of drum notes that lie on the drum pattern (i.e., at certain time steps) to the total number of drum notes.
def in_scale_rate(pianoroll, key=3, kind='major'): if not isinstance(key, int): raise TypeError("`key` must an integer.") if key > 11 or key < 0: raise ValueError("`key` must be in an integer in between 0 and 11.") if kind not in ('major', 'minor'): raise ValueError("`kind` must...
Return the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. Default to C major scale.
def tonal_distance(pianoroll_1, pianoroll_2, beat_resolution, r1=1.0, r2=1.0, r3=0.5): _validate_pianoroll(pianoroll_1) _validate_pianoroll(pianoroll_2) assert len(pianoroll_1) == len(pianoroll_2), ( "Input pianorolls must have the same length.") def _tonal_matrix(r1, r2,...
Return the tonal distance [1] between the two input pianorolls. [1] Christopher Harte, Mark Sandler, and Martin Gasser. Detecting harmonic change in musical audio. In Proc. ACM Workshop on Audio and Music Computing Multimedia, 2006.
def assign_constant(self, value, dtype=None): if not self.is_binarized(): self.pianoroll[self.pianoroll.nonzero()] = value return if dtype is None: if isinstance(value, int): dtype = int elif isinstance(value, float): ...
Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be as...
def binarize(self, threshold=0): if not self.is_binarized(): self.pianoroll = (self.pianoroll > threshold)
Binarize the pianoroll. Parameters ---------- threshold : int or float A threshold used to binarize the pianorolls. Defaults to zero.
def check_validity(self): # pianoroll if not isinstance(self.pianoroll, np.ndarray): raise TypeError("`pianoroll` must be a numpy array.") if not (np.issubdtype(self.pianoroll.dtype, np.bool_) or np.issubdtype(self.pianoroll.dtype, np.number)): ra...
Raise error if any invalid attribute found.
def clip(self, lower=0, upper=127): self.pianoroll = self.pianoroll.clip(lower, upper)
Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127.
def get_active_length(self): nonzero_steps = np.any(self.pianoroll, axis=1) inv_last_nonzero_step = np.argmax(np.flip(nonzero_steps, axis=0)) active_length = self.pianoroll.shape[0] - inv_last_nonzero_step return active_length
Return the active length (i.e., without trailing silence) of the pianoroll. The unit is time step. Returns ------- active_length : int The active length (i.e., without trailing silence) of the pianoroll.
def get_active_pitch_range(self): if self.pianoroll.shape[1] < 1: raise ValueError("Cannot compute the active pitch range for an " "empty pianoroll") lowest = 0 highest = 127 while lowest < highest: if np.any(self.pianoroll[:,...
Return the active pitch range as a tuple (lowest, highest). Returns ------- lowest : int The lowest active pitch in the pianoroll. highest : int The highest active pitch in the pianoroll.
def is_binarized(self): is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_) return is_binarized
Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False.
def pad(self, pad_length): self.pianoroll = np.pad( self.pianoroll, ((0, pad_length), (0, 0)), 'constant')
Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis.
def pad_to_multiple(self, factor): remainder = self.pianoroll.shape[0] % factor if remainder: pad_width = ((0, (factor - remainder)), (0, 0)) self.pianoroll = np.pad(self.pianoroll, pad_width, 'constant')
Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multip...
def transpose(self, semitone): if semitone > 0 and semitone < 128: self.pianoroll[:, semitone:] = self.pianoroll[:, :(128 - semitone)] self.pianoroll[:, :semitone] = 0 elif semitone < 0 and semitone > -128: self.pianoroll[:, :(128 + semitone)] = self.pianorol...
Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose the pianoroll.
def trim_trailing_silence(self): length = self.get_active_length() self.pianoroll = self.pianoroll[:length]
Trim the trailing silence of the pianoroll.
def plot_conv_weights(layer, figsize=(6, 6)): W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten()...
Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer
def plot_conv_activity(layer, x, figsize=(6, 8)): if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_acti...
Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1.
def occlusion_heatmap(net, x, target, square_length=7): if (x.ndim != 4) or x.shape[0] != 1: raise ValueError("This function requires the input data to be of " "shape (1, c, x, y), instead got {}".format(x.shape)) if square_length % 2 == 0: raise ValueError("Square ...
An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded....
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)): return _plot_heat_map( net, X, figsize, lambda net, X, n: occlusion_heatmap( net, X, target[n], square_length))
Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes ...
def get_hex_color(layer_type): COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B', '#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416', '#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D', '#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12'] hash...
Determines the hex color for a layer. :parameters: - layer_type : string Class name of the layer :returns: - color : string containing a hex color for filling block.
def make_pydot_graph(layers, output_shape=True, verbose=False): import pydotplus as pydot pydot_graph = pydot.Dot('Network', graph_type='digraph') pydot_nodes = {} pydot_edges = [] for i, layer in enumerate(layers): layer_name = getattr(layer, 'name', None) if layer_name is None...
:parameters: - layers : list List of the layers, as obtained from lasagne.layers.get_all_layers - output_shape: (default `True`) If `True`, the output shape of each layer will be displayed. - verbose: (default `False`) If `True`, layer attributes like filter s...
def draw_to_file(layers, filename, **kwargs): layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) ext = filename[filename.rfind('.') + 1:] with io.open(filename, 'wb') as fid: fid.write(dot.create(format=...
Draws a network diagram to a file :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - filename : string The filename to save output to - **kwargs: see docstring of make_pydot_graph for other options
def draw_to_notebook(layers, **kwargs): from IPython.display import Image layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) return Image(dot.create_png())
Draws a network diagram in an IPython notebook :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - **kwargs : see the docstring of make_pydot_graph for other options
def get_real_filter(layers, img_size): real_filter = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: real_filter[j] = img_size continue ...
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
def get_receptive_field(layers, img_size): receptive_field = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: receptive_field[j] = img_size conti...
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
def prepare_image(self, image): from decaf.util import transform # soft dep _JEFFNET_FLIP = True # first, extract the 256x256 center. image = transform.scale_and_extract(transform.as_rgb(image), 256) # convert to [0,255] float32 image = image.astype(np.float32) ...
Returns image of shape `(256, 256, 3)`, as expected by `transform` when `classify_direct = True`.
def multiclass_logloss(actual, predicted, eps=1e-15): # Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 ...
Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None): if get_output_kw is None: get_output_kw = {} output_layer = layers[-1] networ...
Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a de...
def initialize(self): if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs = self._create_iter_funcs( self.layer...
Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action.
def initialize_layers(self, layers=None): if layers is not None: self.layers = layers self.layers_ = Layers() #If a Layer, or a list of Layers was passed in if isinstance(self.layers[0], Layer): for out_layer in self.layers: for i, layer i...
Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def`
def fit(self, X, y, epochs=None): if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder() y = self.enc_.fit_transform(y).astype(np.int32) self.classes_ = self.enc_.classes_ self.initia...
Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance
def partial_fit(self, X, y, classes=None): return self.fit(X, y, epochs=1)
Runs a single epoch using the provided data :return: This instance
def _register(self, defaults=None, **kwargs): f = lambda: self.update_or_create(defaults=defaults, **kwargs)[0] ret = SimpleLazyObject(f) self._lazy_entries.append(ret) return ret
Fetch (update or create) an instance, lazily. We're doing this lazily, so that it becomes possible to define custom enums in your code, even before the Django ORM is fully initialized. Domain.objects.SHOPPING = Domain.objects.register( ref='shopping', name='Web...
def narrow(self, **kwargs): from_date = kwargs.pop('from_date', None) to_date = kwargs.pop('to_date', None) date = kwargs.pop('date', None) qs = self if from_date: qs = qs.filter(date__gte=from_date) if to_date: qs = qs.filter(date__lte=to...
Up-to including
def set_environment_variables(json_file_path): if json_file_path: with open(json_file_path) as json_file: env_vars = json.loads(json_file.read()) export_variables(env_vars)
Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``...
def millis_interval(start, end): diff = end - start millis = diff.days * 24 * 60 * 60 * 1000 millis += diff.seconds * 1000 millis += diff.microseconds / 1000 return millis
start and end are datetime instances
def _execute_lua(self, keys, args, client): lua, lua_globals = Script._import_lua(self.load_dependencies) lua_globals.KEYS = self._python_to_lua(keys) lua_globals.ARGV = self._python_to_lua(args) def _call(*call_args): # redis-py and native redis commands are mostly ...
Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script
def _import_lua(load_dependencies=True): try: import lua except ImportError: raise RuntimeError("Lua not installed") lua_globals = lua.globals() if load_dependencies: Script._import_lua_dependencies(lua, lua_globals) return lua, lua_gl...
Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available
def _import_lua_dependencies(lua, lua_globals): if sys.platform not in ('darwin', 'windows'): import ctypes ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL) try: lua_globals.cjson = lua.eval('require "cjson"') except RuntimeError: rais...
Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib. Pending: - base lib. - table li...
def _lua_to_python(lval, return_status=False): import lua lua_globals = lua.globals() if lval is None: # Lua None --> Python None return None if lua_globals.type(lval) == "table": # Lua table --> Python list pval = [] f...
Convert Lua object(s) into Python object(s), as at times Lua object(s) are not compatible with Python functions
def _python_to_lua(pval): import lua if pval is None: # Python None --> Lua None return lua.eval("") if isinstance(pval, (list, tuple, set)): # Python list --> Lua table # e.g.: in lrange # in Python returns: [v1, v2, v3] ...
Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions
def lock(self, key, timeout=0, sleep=0): return MockRedisLock(self, key, timeout, sleep)
Emulate lock.
def keys(self, pattern='*'): # making sure the pattern is unicode/str. try: pattern = pattern.decode('utf-8') # This throws an AttributeError in python 3, or an # UnicodeEncodeError in python 2 except (AttributeError, UnicodeEncodeError): ...
Emulate keys.
def delete(self, *keys): key_counter = 0 for key in map(self._encode, keys): if key in self.redis: del self.redis[key] key_counter += 1 if key in self.timeouts: del self.timeouts[key] return key_counter
Emulate delete.
def expire(self, key, delta): delta = delta if isinstance(delta, timedelta) else timedelta(seconds=delta) return self._expire(self._encode(key), delta)
Emulate expire
def pexpire(self, key, milliseconds): return self._expire(self._encode(key), timedelta(milliseconds=milliseconds))
Emulate pexpire
def expireat(self, key, when): expire_time = datetime.fromtimestamp(when) key = self._encode(key) if key in self.redis: self.timeouts[key] = expire_time return True return False
Emulate expireat
def ttl(self, key): value = self.pttl(key) if value is None or value < 0: return value return value // 1000
Emulate ttl Even though the official redis commands documentation at http://redis.io/commands/ttl states "Return value: Integer reply: TTL in seconds, -2 when key does not exist or -1 when key does not have a timeout." the redis-py lib returns None for both these cases. The lib behavior...
def pttl(self, key): key = self._encode(key) if key not in self.redis: # as of redis 2.8, -2 is returned if the key does not exist return long(-2) if self.strict else None if key not in self.timeouts: # as of redis 2.8, -1 is returned if the ...
Emulate pttl :param key: key for which pttl is requested. :returns: the number of milliseconds till timeout, None if the key does not exist or if the key has no timeout(as per the redis-py lib behavior).
def do_expire(self): # Deep copy to avoid RuntimeError: dictionary changed size during iteration _timeouts = deepcopy(self.timeouts) for key, value in _timeouts.items(): if value - self.clock.now() < timedelta(0): del self.timeouts[key] # remo...
Expire objects assuming now == time
def set(self, key, value, ex=None, px=None, nx=False, xx=False): key = self._encode(key) value = self._encode(value) if nx and xx: return None mode = "nx" if nx else "xx" if xx else None if self._should_set(key, mode): expire = None if...
Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, t...
def _should_set(self, key, mode): if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: # nx means set only if key is absent # false if the key already exists return False e...
Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx).
def setex(self, name, time, value): if not self.strict: # when not strict mode swap value and time args order time, value = value, time return self.set(name, value, ex=time)
Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object.
def psetex(self, key, time, value): return self.set(key, value, px=time)
Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object.
def setnx(self, key, value): return self.set(key, value, nx=True)
Set the value of ``key`` to ``value`` if key doesn't exist
def mset(self, *args, **kwargs): mapping = kwargs if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSET requires **kwargs or a single dict arg') mapping.update(args[0]) if len(mapping) == 0: raise Response...
Sets key/values based on a mapping. Mapping can be supplied as a single dictionary argument or as kwargs.
def msetnx(self, *args, **kwargs): if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSETNX requires **kwargs or a single dict arg') mapping = args[0] else: mapping = kwargs if len(mapping) == 0: ...
Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful.
def setbit(self, key, offset, value): key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): bits.extend(b"\x00" * (index + 1 - len(bits))) prev_val = 1 if (bits[index] & mask) else 0 if value: bi...
Set the bit at ``offset`` in ``key`` to ``value``.
def getbit(self, key, offset): key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): return 0 return 1 if (bits[index] & mask) else 0
Returns the bit value at ``offset`` in ``key``.
def hexists(self, hashkey, attribute): redis_hash = self._get_hash(hashkey, 'HEXISTS') return self._encode(attribute) in redis_hash
Emulate hexists.
def hget(self, hashkey, attribute): redis_hash = self._get_hash(hashkey, 'HGET') return redis_hash.get(self._encode(attribute))
Emulate hget.
def hdel(self, hashkey, *keys): redis_hash = self._get_hash(hashkey, 'HDEL') count = 0 for key in keys: attribute = self._encode(key) if attribute in redis_hash: count += 1 del redis_hash[attribute] if not redis_has...
Emulate hdel
def hmset(self, hashkey, value): redis_hash = self._get_hash(hashkey, 'HMSET', create=True) for key, value in value.items(): attribute = self._encode(key) redis_hash[attribute] = self._encode(value) return True
Emulate hmset.
def hmget(self, hashkey, keys, *args): redis_hash = self._get_hash(hashkey, 'HMGET') attributes = self._list_or_args(keys, args) return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
Emulate hmget.
def hset(self, hashkey, attribute, value): redis_hash = self._get_hash(hashkey, 'HSET', create=True) attribute = self._encode(attribute) attribute_present = attribute in redis_hash redis_hash[attribute] = self._encode(value) return long(0) if attribute_present else long(...
Emulate hset.
def hsetnx(self, hashkey, attribute, value): redis_hash = self._get_hash(hashkey, 'HSETNX', create=True) attribute = self._encode(attribute) if attribute in redis_hash: return long(0) else: redis_hash[attribute] = self._encode(value) return lo...
Emulate hsetnx.
def hincrby(self, hashkey, attribute, increment=1): return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
Emulate hincrby.
def hincrbyfloat(self, hashkey, attribute, increment=1.0): return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
Emulate hincrbyfloat.
def _hincrby(self, hashkey, attribute, command, type_, increment): redis_hash = self._get_hash(hashkey, command, create=True) attribute = self._encode(attribute) previous_value = type_(redis_hash.get(attribute, '0')) redis_hash[attribute] = self._encode(previous_value + incremen...
Shared hincrby and hincrbyfloat routine
def lrange(self, key, start, stop): redis_list = self._get_list(key, 'LRANGE') start, stop = self._translate_range(len(redis_list), start, stop) return redis_list[start:stop + 1]
Emulate lrange.
def lindex(self, key, index): redis_list = self._get_list(key, 'LINDEX') if self._encode(key) not in self.redis: return None try: return redis_list[index] except (IndexError): # Redis returns nil if the index doesn't exist return N...
Emulate lindex.
def _blocking_pop(self, pop_func, keys, timeout): if not isinstance(timeout, (int, long)): raise RuntimeError('timeout is not an integer or out of range') if timeout is None or timeout == 0: timeout = self.blocking_timeout if isinstance(keys, basestring): ...
Emulate blocking pop functionality
def blpop(self, keys, timeout=0): return self._blocking_pop(self.lpop, keys, timeout)
Emulate blpop
def brpop(self, keys, timeout=0): return self._blocking_pop(self.rpop, keys, timeout)
Emulate brpop
def lpush(self, key, *args): redis_list = self._get_list(key, 'LPUSH', create=True) # Creates the list at this key if it doesn't exist, and appends args to its beginning args_reversed = [self._encode(arg) for arg in args] args_reversed.reverse() updated_list = args_rever...
Emulate lpush.
def rpop(self, key): redis_list = self._get_list(key, 'RPOP') if self._encode(key) not in self.redis: return None try: value = redis_list.pop() if len(redis_list) == 0: self.delete(key) return value except (IndexErr...
Emulate lpop.
def rpush(self, key, *args): redis_list = self._get_list(key, 'RPUSH', create=True) # Creates the list at this key if it doesn't exist, and appends args to it redis_list.extend(map(self._encode, args)) # Return the length of the list after the push operation return len(r...
Emulate rpush.
def lrem(self, key, value, count=0): value = self._encode(value) redis_list = self._get_list(key, 'LREM') removed_count = 0 if self._encode(key) in self.redis: if count == 0: # Remove all ocurrences while redis_list.count(value): ...
Emulate lrem.
def ltrim(self, key, start, stop): redis_list = self._get_list(key, 'LTRIM') if redis_list: start, stop = self._translate_range(len(redis_list), start, stop) self.redis[self._encode(key)] = redis_list[start:stop + 1] return True
Emulate ltrim.
def rpoplpush(self, source, destination): transfer_item = self.rpop(source) if transfer_item is not None: self.lpush(destination, transfer_item) return transfer_item
Emulate rpoplpush
def brpoplpush(self, source, destination, timeout=0): transfer_item = self.brpop(source, timeout) if transfer_item is None: return None key, val = transfer_item self.lpush(destination, val) return val
Emulate brpoplpush