_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q272000 | login | test | def login(provider_name):
"""
Login handler, must accept both GET and POST to be able to use OpenID.
"""
# We need response object for the WerkzeugAdapter.
response = make_response()
# Log the user in, pass it the adapter and the provider name.
result = authomatic.login(
WerkzeugAdapter(
request,
response),
provider_name)
# If there is no LoginResult object, the login procedure is still pending.
if result:
if result.user:
| python | {
"resource": ""
} |
q272001 | normalize_dict | test | def normalize_dict(dict_):
"""
Replaces all values that are single-item iterables with the value of its
index 0.
:param dict dict_:
Dictionary to normalize.
:returns:
Normalized dictionary.
"""
| python | {
"resource": ""
} |
q272002 | items_to_dict | test | def items_to_dict(items):
"""
Converts list of tuples to dictionary with duplicate keys converted to
lists.
:param list items:
List of tuples.
:returns:
:class:`dict`
"""
| python | {
"resource": ""
} |
q272003 | json_qs_parser | test | def json_qs_parser(body):
"""
Parses response body from JSON, XML or query string.
:param body:
string
:returns:
:class:`dict`, :class:`list` if input is JSON or query string,
:class:`xml.etree.ElementTree.Element` if XML.
| python | {
"resource": ""
} |
q272004 | resolve_provider_class | test | def resolve_provider_class(class_):
"""
Returns a provider class.
:param class_name: :class:`string` or
:class:`authomatic.providers.BaseProvider` subclass.
"""
if isinstance(class_, str):
# prepare path for authomatic.providers package
path = '.'.join([__package__, 'providers', class_])
| python | {
"resource": ""
} |
q272005 | Session.create_cookie | test | def create_cookie(self, delete=None):
"""
Creates the value for ``Set-Cookie`` HTTP header.
:param bool delete:
If ``True`` the cookie value will be ``deleted`` and the
Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``.
"""
value = 'deleted' if delete else self._serialize(self.data)
split_url = parse.urlsplit(self.adapter.url)
domain = split_url.netloc.split(':')[0]
# Work-around for issue #11, failure of WebKit-based browsers to accept
# cookies set as part of a redirect response in some circumstances.
if '.' not in domain:
template = '{name}={value}; Path={path}; HttpOnly{secure}{expires}'
| python | {
"resource": ""
} |
q272006 | Session.save | test | def save(self):
"""
Adds the session cookie to headers.
"""
if self.data:
cookie = self.create_cookie()
cookie_len = len(cookie)
if cookie_len > 4093:
raise SessionError('Cookie too long! The cookie size {0} '
'is more than 4093 bytes.'
| python | {
"resource": ""
} |
q272007 | Session._get_data | test | def _get_data(self):
"""
Extracts the session data from cookie.
| python | {
"resource": ""
} |
q272008 | Session.data | test | def data(self):
"""
Gets session data lazily.
"""
if not self._data:
self._data = self._get_data()
# Always return a dict, even if deserialization | python | {
"resource": ""
} |
q272009 | Session._signature | test | def _signature(self, *parts):
"""
Creates signature for the session.
"""
signature = hmac.new(six.b(self.secret), digestmod=hashlib.sha1)
| python | {
"resource": ""
} |
q272010 | Session._serialize | test | def _serialize(self, value):
"""
Converts the value to a signed string with timestamp.
:param value:
Object to be serialized.
:returns:
Serialized value.
"""
# data = copy.deepcopy(value)
data = value
# 1. Serialize
| python | {
"resource": ""
} |
q272011 | Credentials.valid | test | def valid(self):
"""
``True`` if credentials are valid, ``False`` if expired.
| python | {
"resource": ""
} |
q272012 | Credentials.expire_soon | test | def expire_soon(self, seconds):
"""
Returns ``True`` if credentials expire sooner than specified.
:param int seconds:
Number of seconds.
:returns:
``True`` if credentials expire sooner than specified,
| python | {
"resource": ""
} |
q272013 | Credentials.serialize | test | def serialize(self):
"""
Converts the credentials to a percent encoded string to be stored for
later use.
:returns:
:class:`string`
"""
if self.provider_id is None:
raise ConfigError(
'To serialize credentials you need to specify a '
'unique integer under the "id" key in the config '
'for each provider!')
# Get the provider type specific items.
rest = self.provider_type_class().to_tuple(self)
# Provider ID and provider type ID are always the first two items. | python | {
"resource": ""
} |
q272014 | Response.is_binary_string | test | def is_binary_string(content):
"""
Return true if string is binary data.
"""
| python | {
"resource": ""
} |
q272015 | Response.content | test | def content(self):
"""
The whole response content.
"""
if not self._content:
content = self.httplib_response.read()
| python | {
"resource": ""
} |
q272016 | OAuth1.create_request_elements | test | def create_request_elements(
cls, request_type, credentials, url, params=None, headers=None,
body='', method='GET', verifier='', callback=''
):
"""
Creates |oauth1| request elements.
"""
params = params or {}
headers = headers or {}
consumer_key = credentials.consumer_key or ''
consumer_secret = credentials.consumer_secret or ''
token = credentials.token or ''
token_secret = credentials.token_secret or ''
# separate url base and query parameters
url, base_params = cls._split_url(url)
# add extracted params to future params
params.update(dict(base_params))
if request_type == cls.USER_AUTHORIZATION_REQUEST_TYPE:
# no need for signature
if token:
params['oauth_token'] = token
else:
raise OAuth1Error(
'Credentials with valid token are required to create '
'User Authorization URL!')
else:
# signature needed
if request_type == cls.REQUEST_TOKEN_REQUEST_TYPE:
# Request Token URL
if consumer_key and consumer_secret and callback:
params['oauth_consumer_key'] = consumer_key
params['oauth_callback'] = callback
else:
raise OAuth1Error(
'Credentials with valid consumer_key, consumer_secret '
'and callback are required to create Request Token '
'URL!')
elif request_type == cls.ACCESS_TOKEN_REQUEST_TYPE:
# Access Token URL
if consumer_key and consumer_secret and token and verifier:
params['oauth_token'] = token
params['oauth_consumer_key'] = consumer_key
params['oauth_verifier'] = verifier
else:
raise OAuth1Error(
'Credentials with valid consumer_key, '
'consumer_secret, token and argument verifier'
| python | {
"resource": ""
} |
q272017 | Bitbucket._access_user_info | test | def _access_user_info(self):
"""
Email is available in separate method so second request is needed.
"""
response = super(Bitbucket, self)._access_user_info()
response.data.setdefault("email", None)
email_response = self.access(self.user_email_url)
if email_response.data:
for | python | {
"resource": ""
} |
q272018 | FlaskAuthomatic.login | test | def login(self, *login_args, **login_kwargs):
"""
Decorator for Flask view functions.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
self.response = make_response()
adapter = WerkzeugAdapter(request, self.response)
login_kwargs.setdefault('session', session)
login_kwargs.setdefault('session_saver', | python | {
"resource": ""
} |
q272019 | GAEOpenID.login | test | def login(self):
"""
Launches the OpenID authentication procedure.
"""
if self.params.get(self.identifier_param):
# =================================================================
# Phase 1 before redirect.
# =================================================================
self._log(
logging.INFO,
u'Starting OpenID authentication procedure.')
url = users.create_login_url(
dest_url=self.url, federated_identity=self.identifier)
self._log(logging.INFO, u'Redirecting user to {0}.'.format(url))
self.redirect(url)
else:
# =================================================================
# Phase 2 after redirect.
# =================================================================
self._log(
logging.INFO,
u'Continuing OpenID authentication procedure after redirect.')
user = users.get_current_user()
if user:
self._log(logging.INFO, u'Authentication successful.')
| python | {
"resource": ""
} |
q272020 | BaseProvider._session_key | test | def _session_key(self, key):
"""
Generates session key string.
:param str key:
e.g. ``"authomatic:facebook:key"``
| python | {
"resource": ""
} |
q272021 | BaseProvider._session_set | test | def _session_set(self, key, value):
"""
Saves a value to session.
"""
| python | {
"resource": ""
} |
q272022 | BaseProvider.csrf_generator | test | def csrf_generator(secret):
"""
Generates CSRF token.
Inspired by this article:
http://blog.ptsecurity.com/2012/10/random-number-security-in-python.html
:returns:
:class:`str` Random unguessable string.
"""
# Create hash from random string plus salt.
| python | {
"resource": ""
} |
q272023 | BaseProvider._log | test | def _log(cls, level, msg, **kwargs):
"""
Logs a message with pre-formatted prefix.
:param int level:
Logging level as specified in the
`login module <http://docs.python.org/2/library/logging.html>`_ of
| python | {
"resource": ""
} |
q272024 | BaseProvider._http_status_in_category | test | def _http_status_in_category(status, category):
"""
Checks whether a HTTP status code is in the category denoted by the
hundreds digit.
"""
assert category < 10, 'HTTP status category must | python | {
"resource": ""
} |
q272025 | AuthorizationProvider._split_url | test | def _split_url(url):
"""
Splits given url to url base and params converted to list of tuples.
"""
split = parse.urlsplit(url)
base = parse.urlunsplit((split.scheme, split.netloc, | python | {
"resource": ""
} |
q272026 | cross_origin | test | def cross_origin(app, *args, **kwargs):
"""
This function is the decorator which is used to wrap a Sanic route with.
In the simplest case, simply use the default parameters to allow all
origins in what is the most permissive configuration. If this method
modifies state or performs authentication which may be brute-forced, you
should add some degree of protection, such as Cross Site Forgery
Request protection.
:param origins:
The origin, or list of origins to allow requests from.
The origin(s) may be regular expressions, case-sensitive strings,
or else an asterisk
Default : '*'
:type origins: list, string or regex
:param methods:
The method or list of methods which the allowed origins are allowed to
access for non-simple requests.
Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]
:type methods: list or string
:param expose_headers:
The header or list which are safe to expose to the API of a CORS API
specification.
Default : None
:type expose_headers: list or string
:param allow_headers:
The header or list of header field names which can be used when this
resource is accessed by allowed origins. The header(s) may be regular
expressions, case-sensitive strings, or else an asterisk.
Default : '*', allow all headers
:type allow_headers: list, string or regex
:param supports_credentials:
Allows users to make authenticated requests. If true, injects the
`Access-Control-Allow-Credentials` header in responses. This allows
cookies and credentials to be submitted across domains.
:note: This option cannot be used in conjuction with a '*' origin
Default : False
:type supports_credentials: bool
:param max_age:
The maximum time for which this CORS request maybe cached. This value
is set as the `Access-Control-Max-Age` header.
Default : None
:type max_age: timedelta, integer, string or None
:param send_wildcard: If True, and the origins parameter is `*`, a wildcard
`Access-Control-Allow-Origin` header is sent, rather than the
request's `Origin` header.
Default : False
:type send_wildcard: bool
:param vary_header:
If True, the header Vary: Origin will be returned as per the W3
implementation guidelines.
Setting this header when the `Access-Control-Allow-Origin` is
dynamically generated (e.g. when there is more than one allowed
origin, and an Origin than '*' is returned) informs CDNs and other
| python | {
"resource": ""
} |
q272027 | set_cors_headers | test | def set_cors_headers(req, resp, context, options):
"""
Performs the actual evaluation of Sanic-CORS options and actually
modifies the response object.
This function is used both in the decorator and the after_request
callback
:param sanic.request.Request req:
"""
try:
request_context = context.request[id(req)]
except AttributeError:
LOG.debug("Cannot find the request context. Is request already finished?")
return resp
# If CORS has already been evaluated via the decorator, skip
evaluated = request_context.get(SANIC_CORS_EVALUATED, False)
if evaluated:
LOG.debug('CORS have been already evaluated, skipping')
return resp
# `resp` can be None in the case of using Websockets
# however this case should have been handled in the `extension` and `decorator` methods
# before getting here. This is a final failsafe check to | python | {
"resource": ""
} |
q272028 | get_app_kwarg_dict | test | def get_app_kwarg_dict(appInstance):
"""Returns the dictionary of CORS specific app configurations."""
# In order to support blueprints which do not have a config attribute
app_config = getattr(appInstance, 'config', {})
return | python | {
"resource": ""
} |
q272029 | flexible_str | test | def flexible_str(obj):
"""
A more flexible str function which intelligently handles stringifying
strings, lists and other iterables. The results are lexographically sorted
to ensure generated responses are consistent when iterables such as Set
are used.
"""
if obj is None:
return | python | {
"resource": ""
} |
q272030 | ensure_iterable | test | def ensure_iterable(inst):
"""
Wraps scalars or string types as a list, or returns the iterable instance.
"""
if isinstance(inst, str):
| python | {
"resource": ""
} |
q272031 | isclose | test | def isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0):
"""
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
"""
try:
return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol)
except AttributeError:
# Running on older version of python, fall back to hand-rolled implementation
if (rel_tol < 0.0) or (abs_tol < 0.0):
raise ValueError("Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}".format(rel_tol, abs_tol))
if math.isnan(a) or math.isnan(b):
return False # NaNs are never close to anything, even other NaNs
if | python | {
"resource": ""
} |
q272032 | deprecated | test | def deprecated(func):
"""
Deprecator decorator.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn("Call | python | {
"resource": ""
} |
q272033 | deserialize | test | def deserialize(bstr):
"""
Attempts to deserialize a bytestring into an audiosegment.
:param bstr: The bytestring serialized via an | python | {
"resource": ""
} |
q272034 | from_file | test | def from_file(path):
"""
Returns an AudioSegment object from the given file based on its file extension.
If the extension is wrong, this will throw some sort of error.
:param path: | python | {
"resource": ""
} |
q272035 | from_numpy_array | test | def from_numpy_array(nparr, framerate):
"""
Returns an AudioSegment created from the given numpy array.
The numpy array must have shape = (num_samples, num_channels).
:param nparr: The numpy array to create an AudioSegment from.
:returns: An AudioSegment created from the given array.
"""
# interleave the audio across all channels and collapse
if nparr.dtype.itemsize not in (1, 2, 4):
raise ValueError("Numpy Array must contain 8, 16, or 32 bit values.")
if len(nparr.shape) == 1:
arrays = [nparr]
elif len(nparr.shape) == 2:
arrays = [nparr[i,:] for i in range(nparr.shape[0])]
else:
raise ValueError("Numpy Array must be one or two dimensional. Shape must | python | {
"resource": ""
} |
q272036 | AudioSegment._execute_sox_cmd | test | def _execute_sox_cmd(self, cmd, console_output=False):
"""
Executes a Sox command in a platform-independent manner.
`cmd` must be a format string that includes {inputfile} and {outputfile}.
"""
on_windows = platform.system().lower() == "windows"
# On Windows, a temporary file cannot be shared outside the process that creates it
# so we need to create a "permanent" file that we will use and delete afterwards
def _get_random_tmp_file():
if on_windows:
rand_string = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
tmp = self.name + "_" + rand_string
WinTempFile = collections.namedtuple("WinTempFile", "name")
tmp = WinTempFile(tmp)
else:
tmp = tempfile.NamedTemporaryFile()
return tmp
# Get a temp file to put our data and a temp file to store the result
tmp = _get_random_tmp_file()
othertmp = _get_random_tmp_file()
# Store our data in the temp file
self.export(tmp.name, format="WAV")
| python | {
"resource": ""
} |
q272037 | AudioSegment.filter_silence | test | def filter_silence(self, duration_s=1, threshold_percentage=1, console_output=False):
"""
Returns a copy of this AudioSegment, but whose silence has been removed.
.. note:: This method requires that you have the program 'sox' installed.
.. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single
function call, the IO may add up for large numbers of AudioSegment objects.
:param duration_s: The number of seconds of "silence" that must be present in a row to
be stripped.
:param threshold_percentage: Silence is defined as any samples whose absolute | python | {
"resource": ""
} |
q272038 | AudioSegment.fft | test | def fft(self, start_s=None, duration_s=None, start_sample=None, num_samples=None, zero_pad=False):
"""
Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins
and the values.
If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample
of the AudioSegment.
If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start
to the end of the segment.
.. code-block:: python
# Example for plotting the FFT using this function
import matplotlib.pyplot as plt
import numpy as np
seg = audiosegment.from_file("furelise.wav")
# Just take the first 3 seconds
hist_bins, hist_vals = seg[1:3000].fft()
hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)
plt.plot(hist_bins / 1000, hist_vals_real_normed)
plt.xlabel("kHz")
plt.ylabel("dB")
plt.show()
.. image:: images/fft.png
:param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`.
:param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`.
:param start_sample: The zero-based index of the first sample to include in the slice.
If this is specified, you cannot specify `start_s`.
:param num_samples: The number of samples to include in the slice. If this is specified, you cannot
specify `duration_s`.
:param zero_pad: If True and the combination of start and duration result in running off the end of
the AudioSegment, the end is zero padded to prevent this.
:returns: np.ndarray of frequencies in Hz, np.ndarray of amount of each frequency
:raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and
`num_samples` are specified.
"""
if start_s is not None and start_sample is not None:
raise ValueError("Only one of start_s and start_sample can be specified.")
if duration_s is not None and num_samples is not None:
raise ValueError("Only one of duration_s and num_samples can be specified.")
if start_s is | python | {
"resource": ""
} |
q272039 | AudioSegment.generate_frames | test | def generate_frames(self, frame_duration_ms, zero_pad=True):
"""
Yields self's data in chunks of frame_duration_ms.
This function adapted from pywebrtc's example [https://github.com/wiseman/py-webrtcvad/blob/master/example.py].
:param frame_duration_ms: The length of each frame in ms.
:param zero_pad: Whether or not to zero pad the end of the AudioSegment object to get all
the audio data out as frames. If not, there may be a part at the end
of the Segment that is cut off (the part will be <= `frame_duration_ms` in length).
:returns: A Frame object with properties 'bytes (the data)', 'timestamp (start time)', and 'duration'.
| python | {
"resource": ""
} |
q272040 | AudioSegment.normalize_spl_by_average | test | def normalize_spl_by_average(self, db):
"""
Normalize the values in the AudioSegment so that its `spl` property
gives `db`.
.. note:: This method is currently broken - it returns an AudioSegment whose
values are much smaller than reasonable, yet which yield an SPL value
that equals the given `db`. Such an AudioSegment will not be serializable
as a WAV file, which will also break any method that relies on SOX.
I may remove this method in the future, since the SPL of an AudioSegment is
pretty questionable to begin with.
:param db: The decibels to normalize average to.
:returns: A new AudioSegment object whose values are changed so that their
average is `db`.
:raises: ValueError if there are no samples in this AudioSegment.
"""
arr = self.to_numpy_array().copy()
if len(arr) == 0:
raise ValueError("Cannot normalize the SPL of an empty AudioSegment")
def rms(x):
return np.sqrt(np.mean(np.square(x)))
# Figure out what RMS we would like
desired_rms = P_REF_PCM * ((10 ** (db/20.0)) - 1E-9)
# Use successive approximation to solve
| python | {
"resource": ""
} |
q272041 | AudioSegment.reduce | test | def reduce(self, others):
"""
Reduces others into this one by concatenating all the others onto this one and
returning the result. Does not modify self, instead, makes a copy and returns that.
:param others: The other AudioSegment objects to append to this one.
:returns: The concatenated result.
"""
| python | {
"resource": ""
} |
q272042 | AudioSegment.resample | test | def resample(self, sample_rate_Hz=None, sample_width=None, channels=None, console_output=False):
"""
Returns a new AudioSegment whose data is the same as this one, but which has been resampled to the
specified characteristics. Any parameter left None will be unchanged.
.. note:: This method requires that you have the program 'sox' installed.
.. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single
function call, the IO may add up for large numbers of AudioSegment objects.
:param sample_rate_Hz: The new sample rate in Hz.
:param sample_width: The new sample width in bytes, so sample_width=2 would correspond to 16 bit (2 byte) width.
:param channels: The new number of channels.
:param console_output: Will print the output of sox to the console if True.
| python | {
"resource": ""
} |
q272043 | AudioSegment.serialize | test | def serialize(self):
"""
Serializes into a bytestring.
:returns: An object of type Bytes.
"""
d = self.__getstate__()
return pickle.dumps({
| python | {
"resource": ""
} |
q272044 | AudioSegment.spectrogram | test | def spectrogram(self, start_s=None, duration_s=None, start_sample=None, num_samples=None,
window_length_s=None, window_length_samples=None, overlap=0.5, window=('tukey', 0.25)):
"""
Does a series of FFTs from `start_s` or `start_sample` for `duration_s` or `num_samples`.
Effectively, transforms a slice of the AudioSegment into the frequency domain across different
time bins.
.. code-block:: python
# Example for plotting a spectrogram using this function
import audiosegment
import matplotlib.pyplot as plt
#...
seg = audiosegment.from_file("somebodytalking.wav")
freqs, times, amplitudes = seg.spectrogram(window_length_s=0.03, overlap=0.5)
amplitudes = 10 * np.log10(amplitudes + 1e-9)
# Plot
plt.pcolormesh(times, freqs, amplitudes)
plt.xlabel("Time in Seconds")
plt.ylabel("Frequency in Hz")
plt.show()
.. image:: images/spectrogram.png
:param start_s: The start time. Starts at the beginning if neither this nor `start_sample` is specified.
:param duration_s: The duration of the spectrogram in seconds. Goes to the end if neither this nor
`num_samples` is specified.
:param start_sample: The index of the first sample to use. Starts at the beginning if neither this nor
`start_s` is specified.
:param num_samples: The number of samples in the spectrogram. Goes to the end if neither this nor
`duration_s` is specified.
:param window_length_s: The length of each FFT in seconds. If the total number of samples in the spectrogram
is not a multiple of the window length in samples, the last window will be zero-padded.
:param window_length_samples: The length of each FFT in number of samples. If the total number of samples in the
spectrogram is not a multiple of the window length in samples, the last window will
be zero-padded.
:param overlap: The fraction of each window to overlap.
:param window: See Scipy's spectrogram-function_.
This parameter is passed as-is directly into the Scipy spectrogram function. It's documentation is reproduced here:
Desired | python | {
"resource": ""
} |
q272045 | _choose_front_id_from_candidates | test | def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets):
"""
Returns a front ID which is the id of the offset front that contains the most overlap
with offsets that correspond to the given onset front ID.
"""
noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id)
for offset_front_id in candidate_offset_front_ids:
offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id)
offset_front_idxs = [(f, i) for f, | python | {
"resource": ""
} |
q272046 | _get_offset_front_id_after_onset_sample_idx | test | def _get_offset_front_id_after_onset_sample_idx(onset_sample_idx, offset_fronts):
"""
Returns the offset_front_id which corresponds to the offset front which occurs
first entirely after the given onset sample_idx.
"""
# get all the offset_front_ids
offset_front_ids = [i for i in np.unique(offset_fronts) if i != 0]
best_id_so_far = -1
closest_offset_sample_idx = sys.maxsize
for offset_front_id in offset_front_ids:
# get all that offset front's indexes
offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_front_id)
# get the sample indexes
offset_front_sample_idxs = [s for _f, s in offset_front_idxs]
# if each sample index is greater | python | {
"resource": ""
} |
q272047 | _get_offset_front_id_after_onset_front | test | def _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts):
"""
Get the ID corresponding to the offset which occurs first after the given onset_front_id.
By `first` I mean the front which contains the offset which is closest to the latest point
in the onset front. By `after`, I mean that the offset must contain only offsets which
occur after the latest onset in the onset | python | {
"resource": ""
} |
q272048 | _match_offset_front_id_to_onset_front_id | test | def _match_offset_front_id_to_onset_front_id(onset_front_id, onset_fronts, offset_fronts, onsets, offsets):
"""
Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the
given onset front.
The offset front which contains the most of such offsets is the match.
If there are no such offset fronts, return -1.
"""
# find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front
# the offset front which contains the most of such offsets is the match
# get the onsets that make up front_id
onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# get the offsets that match the onsets in front_id
offset_idxs = [_lookup_offset_by_onset_idx(i, onsets, offsets) for i in onset_idxs]
# get all offset_fronts which contain at least one of these offsets
candidate_offset_front_ids = set([int(offset_fronts[f, i]) for f, i in offset_idxs])
# It is possible that offset_idxs | python | {
"resource": ""
} |
q272049 | _get_consecutive_and_overlapping_fronts | test | def _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id):
"""
Gets an onset_front and an offset_front such that they both occupy at least some of the same
frequency channels, then returns the portion of each that overlaps with the other.
"""
# Get the onset front of interest
onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# Get the offset front of interest
offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id)
# Keep trying consecutive portions of this onset front until we find a consecutive portion
# that overlaps with part of the offset front
consecutive_portions_of_onset_front = [c for c in _get_consecutive_portions_of_front(onset_front)]
for consecutive_portion_of_onset_front in consecutive_portions_of_onset_front:
# Only get the segment of this front that overlaps in frequencies with the onset front of interest
onset_front_frequency_indexes = [f for f, _ in consecutive_portion_of_onset_front]
| python | {
"resource": ""
} |
q272050 | _update_segmentation_mask | test | def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap):
"""
Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between
`onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively.
This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of
less than 3 channels wide are removed.
This function also returns a boolean value indicating whether the onset channel went to completion.
Specifically, segments by doing the following:
- Going across frequencies in the onset_front,
- add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front,
if the offset_front is in that frequency.
Possible scenarios:
Fronts line up completely:
::
| | S S S
| | => S S S
| | S S S
| | S S S
Onset front starts before offset front:
::
| |
| | S S S
| | => S S S
| | S S S
Onset front ends after offset front:
::
| | S S S
| | => S S S
| | S S S
| |
Onset front starts before and ends after offset front:
::
| |
| | => S S S
| | S S S
| |
The above three options in reverse:
::
| |S S| |
|S S| |S S| |S S|
|S S| |S S| |S S|
|S S| | |
There is one last scenario:
::
| |
\ /
\ /
/ \
| |
Where the offset and onset fronts cross one another. If this happens, we simply
reverse the indices and accept:
::
|sss|
\sss/
\s/
/s\
|sss|
The other option would be to destroy the offset front from the crossover point on, and
then search for a new offset front for the rest of the onset front.
"""
# Get the portions of the onset and offset fronts that overlap and are consecutive
onset_front_overlap, offset_front_overlap = _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap)
onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)
offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id_most_overlap)
msg = "Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty".format(
onset_front, offset_front, onset_front_overlap, offset_front_overlap
)
assert onset_front_overlap, msg
assert offset_front_overlap, msg
onset_front = onset_front_overlap
offset_front = offset_front_overlap
# Figure out which frequencies will go in the segment
flow_on, _slow_on = onset_front[0]
fhigh_on, _shigh_on = onset_front[-1]
flow_off, _slow_off = offset_front[0]
fhigh_off, _shigh_off = offset_front[-1]
flow = max(flow_on, flow_off)
fhigh = min(fhigh_on, fhigh_off)
# Update all the masks with the segment
for fidx, _freqchan in enumerate(segmentation_mask[flow:fhigh + 1, :], start=flow):
assert fidx >= flow, "Frequency index is {}, but we should have started at {}".format(fidx, flow)
assert (fidx - flow) < len(onset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}".format(
fidx, flow, len(onset_front), onset_front
| python | {
"resource": ""
} |
q272051 | _front_id_from_idx | test | def _front_id_from_idx(front, index):
"""
Returns the front ID found in `front` at the given `index`.
:param front: An onset or offset front array of shape [nfrequencies, nsamples]
:index: A tuple of the form (frequency index, sample index)
:returns: | python | {
"resource": ""
} |
q272052 | _get_front_ids_one_at_a_time | test | def _get_front_ids_one_at_a_time(onset_fronts):
"""
Yields one onset front ID at a time until they are gone. All the onset fronts from a
frequency channel are yielded, then all of the next channel's, etc., though one at a time.
"""
yielded_so_far = set()
for row in onset_fronts:
| python | {
"resource": ""
} |
q272053 | _get_corresponding_offsets | test | def _get_corresponding_offsets(onset_fronts, onset_front_id, onsets, offsets):
"""
Gets the offsets that occur as close as possible to the onsets in the given onset-front.
"""
corresponding_offsets = []
for index in _get_front_idxs_from_id(onset_fronts, onset_front_id):
| python | {
"resource": ""
} |
q272054 | _remove_overlaps | test | def _remove_overlaps(segmentation_mask, fronts):
"""
Removes all points in the fronts that overlap with the | python | {
"resource": ""
} |
q272055 | _remove_fronts_that_are_too_small | test | def _remove_fronts_that_are_too_small(fronts, size):
"""
Removes all fronts from `fronts` which are strictly smaller than
`size` consecutive frequencies in length.
"""
ids = np.unique(fronts)
for id in ids:
if id == 0 or id == -1:
| python | {
"resource": ""
} |
q272056 | _break_poorly_matched_fronts | test | def _break_poorly_matched_fronts(fronts, threshold=0.1, threshold_overlap_samples=3):
"""
For each onset front, for each frequency in that front, break the onset front if the signals
between this frequency's onset and the next frequency's onset are not similar enough.
Specifically:
If we have the following two frequency channels, and the two O's are part of the same onset front,
::
[ . O . . . . . . . . . . ]
[ . . . . O . . . . . . . ]
We compare the signals x and y:
::
[ . x x x x . . . . . . . ]
[ . y y y y . . . . . . . ]
And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset
front between these two channels.
Once this is done, remove any onset fronts that are less than 3 channels wide.
"""
assert threshold_overlap_samples > 0, "Number of samples of overlap must be greater than zero"
breaks_after = {}
for front_id in _get_front_ids_one_at_a_time(fronts):
front = _get_front_idxs_from_id(fronts, front_id)
for i, (f, s) in enumerate(front):
if i < len(front) - 1:
# Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1
next_f, next_s = front[i + 1]
low_s = min(s, next_s)
high_s = max(s, next_s)
sig_this_f = fronts[f, low_s:high_s]
sig_next_f = fronts[next_f, low_s:high_s]
assert len(sig_next_f) == len(sig_this_f)
if len(sig_next_f) > threshold_overlap_samples:
# If these two signals are not sufficiently close in form, this front should be broken up
correlation = signal.correlate(sig_this_f, sig_next_f, mode='same')
assert len(correlation) > 0
correlation = correlation / max(correlation + 1E-9)
similarity = | python | {
"resource": ""
} |
q272057 | _merge_adjacent_segments | test | def _merge_adjacent_segments(mask):
"""
Merges all segments in `mask` which are touching.
"""
mask_ids = [id for id in np.unique(mask) if id != 0]
for id in mask_ids:
myfidxs, mysidxs = np.where(mask == id)
for other in mask_ids: # Ugh, brute force O(N^2) algorithm.. gross..
if id == other:
continue
else:
| python | {
"resource": ""
} |
q272058 | _separate_masks | test | def _separate_masks(mask, threshold=0.025):
"""
Returns a list of segmentation masks each of the same dimension as the input one,
but where they each have exactly one segment in them and all other samples in them
are zeroed.
Only bothers to return segments that are larger in total area than `threshold * mask.size`.
"""
try:
ncpus = multiprocessing.cpu_count()
except NotImplementedError:
ncpus = 2
with multiprocessing.Pool(processes=ncpus) as pool:
mask_ids = [id for id in np.unique(mask) if id != 0]
| python | {
"resource": ""
} |
q272059 | _downsample_one_or_the_other | test | def _downsample_one_or_the_other(mask, mask_indexes, stft, stft_indexes):
"""
Takes the given `mask` and `stft`, which must be matrices of shape `frequencies, times`
and downsamples one of them into the other one's times, so that the time dimensions
are equal. Leaves the frequency dimension untouched.
"""
assert len(mask.shape) == 2, "Expected a two-dimensional `mask`, but got one of {} dimensions.".format(len(mask.shape))
| python | {
"resource": ""
} |
q272060 | _asa_task | test | def _asa_task(q, masks, stft, sample_width, frame_rate, nsamples_for_each_fft):
"""
Worker for the ASA algorithm's multiprocessing step.
"""
# Convert each mask to (1 or 0) rather than (ID or 0)
for mask in masks:
mask = np.where(mask > 0, 1, 0)
# Multiply the masks against STFTs
masks = [mask * stft for mask in masks]
nparrs = []
dtype_dict = {1: | python | {
"resource": ""
} |
q272061 | bandpass_filter | test | def bandpass_filter(data, low, high, fs, order=5):
"""
Does a bandpass filter over the given data.
:param data: The data (numpy array) to be filtered.
:param low: The low cutoff in Hz.
:param high: The high cutoff in Hz.
:param fs: The sample rate (in Hz) of the data.
:param order: The order of the filter. The higher the order, the tighter the roll-off.
| python | {
"resource": ""
} |
q272062 | lowpass_filter | test | def lowpass_filter(data, cutoff, fs, order=5):
"""
Does a lowpass filter over the given data.
:param data: The data (numpy array) to be filtered.
:param cutoff: The high cutoff in Hz.
:param fs: The sample rate in Hz of the data.
:param order: The order of the filter. The higher the order, the tighter the roll-off.
:returns: Filtered data (numpy array).
| python | {
"resource": ""
} |
q272063 | list_to_tf_input | test | def list_to_tf_input(data, response_index, num_outcomes):
"""
Separates the outcome feature from the data and creates the onehot vector for each row.
"""
matrix = np.matrix([row[:response_index] + | python | {
"resource": ""
} |
q272064 | expand_and_standardize_dataset | test | def expand_and_standardize_dataset(response_index, response_header, data_set, col_vals, headers, standardizers, feats_to_ignore, columns_to_expand, outcome_trans_dict):
"""
Standardizes continuous features and expands categorical features.
"""
# expand and standardize
modified_set = []
for row_index, row in enumerate(data_set):
new_row = []
for col_index, val in enumerate(row):
header = headers[col_index]
# Outcome feature -> index outcome
if col_index == response_index:
new_outcome = outcome_trans_dict[val]
new_row.append(new_outcome)
# Ignored feature -> pass
elif header in feats_to_ignore:
pass
# Categorical feature -> create new binary column for each possible value of the column
elif header in columns_to_expand:
for poss_val in col_vals[header]:
if val == poss_val:
new_cat_val = 1.0
else:
new_cat_val = -1.0
new_row.append(new_cat_val)
# Continuous feature -> standardize value with respect to its column
else:
new_cont_val = | python | {
"resource": ""
} |
q272065 | equal_ignore_order | test | def equal_ignore_order(a, b):
"""
Used to check whether the two edge lists have the same edges
when elements are neither hashable nor sortable.
"""
unmatched = list(b)
for element in a:
| python | {
"resource": ""
} |
q272066 | group_audit_ranks | test | def group_audit_ranks(filenames, measurer, similarity_bound=0.05):
"""
Given a list of audit files, rank them using the `measurer` and
return the features that never deviate more than `similarity_bound`
across repairs.
"""
def _partition_groups(feature_scores):
groups = []
for feature, score in feature_scores:
added_to_group = False
# Check to see if the feature belongs in a group with any other features.
for i, group in enumerate(groups):
mean_score, group_feature_scores = group
if abs(mean_score - score) < similarity_bound:
groups[i][1].append( (feature, score) )
# Recalculate the representative mean.
groups[i][0] = sum([s for _, s in group_feature_scores])/len(group_feature_scores)
added_to_group = True
| python | {
"resource": ""
} |
q272067 | load_audit_confusion_matrices | test | def load_audit_confusion_matrices(filename):
"""
Loads a confusion matrix in a two-level dictionary format.
For example, the confusion matrix of a 75%-accurate model
that predicted 15 values (and mis-classified 5) may look like:
{"A": {"A":10, "B": 5}, "B": {"B":5}}
Note that raw boolean values are translated into strings, such that
a value that was the boolean True will be returned as the string "True".
"""
with open(filename) as audit_file:
audit_file.next() # Skip the first line.
# Extract the confusion matrices and repair levels from the audit file.
confusion_matrices = []
for line in audit_file:
separator = ":"
separator_index = line.index(separator)
comma_index = line.index(',')
| python | {
"resource": ""
} |
q272068 | list_to_tf_input | test | def list_to_tf_input(data, response_index, num_outcomes):
"""
Separates the outcome feature from the data.
"""
matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data])
| python | {
"resource": ""
} |
q272069 | PackagesStatusDetector._update_index_url_from_configs | test | def _update_index_url_from_configs(self):
""" Checks for alternative index-url in pip.conf """
if 'VIRTUAL_ENV' in os.environ:
self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.conf'))
self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.ini'))
if site_config_files:
self.pip_config_locations.extend(site_config_files)
index_url = None
custom_config = None
if 'PIP_INDEX_URL' in os.environ and os.environ['PIP_INDEX_URL']:
# environ variable takes priority
index_url = os.environ['PIP_INDEX_URL']
custom_config = 'PIP_INDEX_URL environment variable'
else:
for pip_config_filename in self.pip_config_locations:
if pip_config_filename.startswith('~'):
pip_config_filename = os.path.expanduser(pip_config_filename)
if os.path.isfile(pip_config_filename):
config = ConfigParser()
config.read([pip_config_filename])
| python | {
"resource": ""
} |
q272070 | RequirementsDetector.autodetect_files | test | def autodetect_files(self):
""" Attempt to detect requirements files in the current working directory """
if self._is_valid_requirements_file('requirements.txt'):
self.filenames.append('requirements.txt')
if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover
self.filenames.append('requirements.pip')
if os.path.isdir('requirements'):
| python | {
"resource": ""
} |
q272071 | resolve_streams | test | def resolve_streams(wait_time=1.0):
"""Resolve all streams on the network.
This function returns all currently available streams from any outlet on
the network. The network is usually the subnet specified at the local
router, but may also include a group of machines visible to each other via
multicast packets (given that the network supports it), or list of
hostnames. These details may optionally be customized by the experimenter
in a configuration file (see Network Connectivity in the LSL wiki).
| python | {
"resource": ""
} |
q272072 | resolve_byprop | test | def resolve_byprop(prop, value, minimum=1, timeout=FOREVER):
"""Resolve all streams with a specific value for a given property.
If the goal is to resolve a specific stream, this method is preferred over
resolving all streams and then selecting the desired one.
Keyword arguments:
prop -- The StreamInfo property that should have a specific value (e.g.,
"name", "type", "source_id", or "desc/manufaturer").
value -- The string value that the property should have (e.g., "EEG" as
the type property).
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
| python | {
"resource": ""
} |
q272073 | resolve_bypred | test | def resolve_bypred(predicate, minimum=1, timeout=FOREVER):
"""Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) | python | {
"resource": ""
} |
q272074 | handle_error | test | def handle_error(errcode):
"""Error handler function. Translates an error code into an exception."""
if type(errcode) is c_int:
errcode = errcode.value
if errcode == 0:
pass # no error
elif errcode == -1:
raise TimeoutError("the operation failed due to a timeout.")
elif errcode == -2:
raise LostError("the stream has been lost.")
elif errcode == -3: | python | {
"resource": ""
} |
q272075 | StreamOutlet.push_sample | test | def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
| python | {
"resource": ""
} |
q272076 | StreamOutlet.push_chunk | test | def push_chunk(self, x, timestamp=0.0, pushthrough=True):
"""Push a list of samples into the outlet.
samples -- A list of samples, either as a list of lists or a list of
multiplexed values.
timestamp -- Optionally the capture time of the most recent sample, in
agreement with local_clock(); if omitted, the current
time is used. The time stamps of other samples are
automatically derived according to the sampling rate of
the stream. (default 0.0)
pushthrough Whether to push the chunk through to the receivers instead
of buffering it with subsequent samples. Note that the
chunk_size, if specified at outlet construction, takes
precedence over the pushthrough flag. (default True)
"""
try:
n_values = self.channel_count * len(x)
data_buff = (self.value_type * n_values).from_buffer(x)
handle_error(self.do_push_chunk(self.obj, data_buff,
c_long(n_values),
c_double(timestamp),
c_int(pushthrough)))
except TypeError:
if len(x):
if type(x[0]) is list:
x = [v for sample in x for v in sample]
| python | {
"resource": ""
} |
q272077 | StreamInlet.info | test | def info(self, timeout=FOREVER):
"""Retrieve the complete information of the given stream.
This includes the extended description. Can be invoked at any time of
the stream's lifetime.
Keyword arguments:
timeout -- Timeout of the operation. (default | python | {
"resource": ""
} |
q272078 | StreamInlet.open_stream | test | def open_stream(self, timeout=FOREVER):
"""Subscribe to the data stream.
All samples pushed in at the other end from this moment onwards will be
queued and eventually be delivered in response to pull_sample() or
pull_chunk() calls. Pulling a sample without some preceding open_stream
is permitted (the stream will then be opened implicitly).
Keyword arguments:
timeout -- Optional timeout of the operation (default FOREVER).
Throws | python | {
"resource": ""
} |
q272079 | StreamInlet.time_correction | test | def time_correction(self, timeout=FOREVER):
"""Retrieve an estimated time correction offset for the given stream.
The first call to this function takes several miliseconds until a
reliable first estimate is obtained. Subsequent calls are instantaneous
(and rely on periodic background updates). The precision of these
estimates should be below 1 ms (empirically within +/-0.2 ms).
Keyword arguments:
timeout -- Timeout to acquire the first time-correction estimate
(default FOREVER).
Returns the current time correction estimate. This is the number that | python | {
"resource": ""
} |
q272080 | XMLElement.child | test | def child(self, name):
"""Get a child with a specified name."""
| python | {
"resource": ""
} |
q272081 | XMLElement.next_sibling | test | def next_sibling(self, name=None):
"""Get the next sibling in the children list of the parent node.
If a name is provided, the next sibling with the given name is returned.
"""
if name is None:
| python | {
"resource": ""
} |
q272082 | XMLElement.previous_sibling | test | def previous_sibling(self, name=None):
"""Get the previous sibling in the children list of the parent node.
If a name is provided, the previous sibling with the given name is
returned.
"""
if name is None:
| python | {
"resource": ""
} |
q272083 | XMLElement.set_name | test | def set_name(self, name):
"""Set the element's name. Returns False | python | {
"resource": ""
} |
q272084 | XMLElement.set_value | test | def set_value(self, value):
"""Set the element's value. Returns False | python | {
"resource": ""
} |
q272085 | XMLElement.append_child | test | def append_child(self, name):
"""Append a child element with the specified name."""
| python | {
"resource": ""
} |
q272086 | XMLElement.prepend_child | test | def prepend_child(self, name):
"""Prepend a child element with the specified name."""
| python | {
"resource": ""
} |
q272087 | XMLElement.append_copy | test | def append_copy(self, elem):
"""Append a copy of the specified element as a child."""
| python | {
"resource": ""
} |
q272088 | XMLElement.prepend_copy | test | def prepend_copy(self, elem):
"""Prepend a copy of the specified element as a child."""
| python | {
"resource": ""
} |
q272089 | XMLElement.remove_child | test | def remove_child(self, rhs):
"""Remove a given child element, specified by name or as element."""
if type(rhs) is XMLElement:
| python | {
"resource": ""
} |
q272090 | ContinuousResolver.results | test | def results(self):
"""Obtain the set of currently present streams on the network.
Returns a list of matching StreamInfo objects (with empty desc
field), any of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
| python | {
"resource": ""
} |
q272091 | pair | test | def pair(cmd, word):
"""See all token associated with a given token.
PAIR lilas"""
word = list(preprocess_query(word))[0]
key = pair_key(word)
tokens = [t.decode() for t in DB.smembers(key)] | python | {
"resource": ""
} |
q272092 | do_AUTOCOMPLETE | test | def do_AUTOCOMPLETE(cmd, s):
"""Shows autocomplete results for a given token."""
s = list(preprocess_query(s))[0]
keys = [k.decode() for k | python | {
"resource": ""
} |
q272093 | compute_edge_ngrams | test | def compute_edge_ngrams(token, min=None):
"""Compute edge ngram of token from min. Does not include token itself."""
if min is None:
min = | python | {
"resource": ""
} |
q272094 | iter_pipe | test | def iter_pipe(pipe, processors):
"""Allow for iterators to return either an item or an iterator of items."""
if isinstance(pipe, str):
pipe | python | {
"resource": ""
} |
q272095 | ChunkedPool.imap_unordered | test | def imap_unordered(self, func, iterable, chunksize):
"""Customized version of imap_unordered.
Directly send chunks to func, instead of iterating in each process and
sending one by one.
Original:
https://hg.python.org/cpython/file/tip/Lib/multiprocessing/pool.py#l271
Other tried options:
- map_async: makes a list(iterable), so it loads all the data for each
process into RAM
- apply_async: needs manual chunking
"""
assert self._state == RUN
| python | {
"resource": ""
} |
q272096 | make_fuzzy | test | def make_fuzzy(word, max=1):
"""Naive neighborhoods algo."""
# inversions
neighbors = []
for i in range(0, len(word) - 1):
neighbor = list(word)
neighbor[i], neighbor[i+1] = neighbor[i+1], neighbor[i]
neighbors.append(''.join(neighbor))
# substitutions
for letter in string.ascii_lowercase:
for i in range(0, len(word)):
neighbor = list(word)
if letter != neighbor[i]:
neighbor[i] = letter
neighbors.append(''.join(neighbor))
# insertions
for letter in string.ascii_lowercase:
| python | {
"resource": ""
} |
q272097 | do_fuzzy | test | def do_fuzzy(self, word):
"""Compute fuzzy extensions of word.
FUZZY lilas"""
| python | {
"resource": ""
} |
q272098 | do_fuzzyindex | test | def do_fuzzyindex(self, word):
"""Compute fuzzy extensions of word that exist in index.
FUZZYINDEX lilas"""
word = list(preprocess_query(word))[0]
token = Token(word)
neighbors = make_fuzzy(token)
neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in | python | {
"resource": ""
} |
q272099 | extend_results_extrapoling_relations | test | def extend_results_extrapoling_relations(helper):
"""Try to extract the bigger group of interlinked tokens.
Should generally be used at last in the collectors chain.
"""
if not helper.bucket_dry:
return # No need.
tokens = set(helper.meaningful + helper.common)
for relation in _extract_manytomany_relations(tokens):
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.