_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q272000 | login | test | def login(provider_name):
"""
Login handler, must accept both GET and POST to be able to use OpenID.
"""
# We need response object for the WerkzeugAdapter.
response = make_response()
# Log the user in, pass it the adapter and the provider name.
result = authomatic.login(
WerkzeugAdapter(
request,
response),
provider_name)
# If there is no LoginResult object, the login procedure is still pending.
if result:
if result.user:
# We need to update the user to get more info.
result.user.update()
# The rest happens inside the template.
return render_template('login.html', result=result)
# Don't forget to return the response.
return response | python | {
"resource": ""
} |
q272001 | normalize_dict | test | def normalize_dict(dict_):
"""
Replaces all values that are single-item iterables with the value of its
index 0.
:param dict dict_:
Dictionary to normalize.
:returns:
Normalized dictionary.
"""
return dict([(k, v[0] if not isinstance(v, str) and len(v) == 1 else v)
for k, v in list(dict_.items())]) | python | {
"resource": ""
} |
q272002 | items_to_dict | test | def items_to_dict(items):
"""
Converts list of tuples to dictionary with duplicate keys converted to
lists.
:param list items:
List of tuples.
:returns:
:class:`dict`
"""
res = collections.defaultdict(list)
for k, v in items:
res[k].append(v)
return normalize_dict(dict(res)) | python | {
"resource": ""
} |
q272003 | json_qs_parser | test | def json_qs_parser(body):
"""
Parses response body from JSON, XML or query string.
:param body:
string
:returns:
:class:`dict`, :class:`list` if input is JSON or query string,
:class:`xml.etree.ElementTree.Element` if XML.
"""
try:
# Try JSON first.
return json.loads(body)
except (OverflowError, TypeError, ValueError):
pass
try:
# Then XML.
return ElementTree.fromstring(body)
except (ElementTree.ParseError, TypeError, ValueError):
pass
# Finally query string.
return dict(parse.parse_qsl(body)) | python | {
"resource": ""
} |
q272004 | resolve_provider_class | test | def resolve_provider_class(class_):
"""
Returns a provider class.
:param class_name: :class:`string` or
:class:`authomatic.providers.BaseProvider` subclass.
"""
if isinstance(class_, str):
# prepare path for authomatic.providers package
path = '.'.join([__package__, 'providers', class_])
# try to import class by string from providers module or by fully
# qualified path
return import_string(class_, True) or import_string(path)
else:
return class_ | python | {
"resource": ""
} |
q272005 | Session.create_cookie | test | def create_cookie(self, delete=None):
"""
Creates the value for ``Set-Cookie`` HTTP header.
:param bool delete:
If ``True`` the cookie value will be ``deleted`` and the
Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``.
"""
value = 'deleted' if delete else self._serialize(self.data)
split_url = parse.urlsplit(self.adapter.url)
domain = split_url.netloc.split(':')[0]
# Work-around for issue #11, failure of WebKit-based browsers to accept
# cookies set as part of a redirect response in some circumstances.
if '.' not in domain:
template = '{name}={value}; Path={path}; HttpOnly{secure}{expires}'
else:
template = ('{name}={value}; Domain={domain}; Path={path}; '
'HttpOnly{secure}{expires}')
return template.format(
name=self.name,
value=value,
domain=domain,
path=split_url.path,
secure='; Secure' if self.secure else '',
expires='; Expires=Thu, 01-Jan-1970 00:00:01 GMT' if delete else ''
) | python | {
"resource": ""
} |
q272006 | Session.save | test | def save(self):
"""
Adds the session cookie to headers.
"""
if self.data:
cookie = self.create_cookie()
cookie_len = len(cookie)
if cookie_len > 4093:
raise SessionError('Cookie too long! The cookie size {0} '
'is more than 4093 bytes.'
.format(cookie_len))
self.adapter.set_header('Set-Cookie', cookie)
# Reset data
self._data = {} | python | {
"resource": ""
} |
q272007 | Session._get_data | test | def _get_data(self):
"""
Extracts the session data from cookie.
"""
cookie = self.adapter.cookies.get(self.name)
return self._deserialize(cookie) if cookie else {} | python | {
"resource": ""
} |
q272008 | Session.data | test | def data(self):
"""
Gets session data lazily.
"""
if not self._data:
self._data = self._get_data()
# Always return a dict, even if deserialization returned nothing
if self._data is None:
self._data = {}
return self._data | python | {
"resource": ""
} |
q272009 | Session._signature | test | def _signature(self, *parts):
"""
Creates signature for the session.
"""
signature = hmac.new(six.b(self.secret), digestmod=hashlib.sha1)
signature.update(six.b('|'.join(parts)))
return signature.hexdigest() | python | {
"resource": ""
} |
q272010 | Session._serialize | test | def _serialize(self, value):
"""
Converts the value to a signed string with timestamp.
:param value:
Object to be serialized.
:returns:
Serialized value.
"""
# data = copy.deepcopy(value)
data = value
# 1. Serialize
serialized = pickle.dumps(data).decode('latin-1')
# 2. Encode
# Percent encoding produces smaller result then urlsafe base64.
encoded = parse.quote(serialized, '')
# 3. Concatenate
timestamp = str(int(time.time()))
signature = self._signature(self.name, encoded, timestamp)
concatenated = '|'.join([encoded, timestamp, signature])
return concatenated | python | {
"resource": ""
} |
q272011 | Credentials.valid | test | def valid(self):
"""
``True`` if credentials are valid, ``False`` if expired.
"""
if self.expiration_time:
return self.expiration_time > int(time.time())
else:
return True | python | {
"resource": ""
} |
q272012 | Credentials.expire_soon | test | def expire_soon(self, seconds):
"""
Returns ``True`` if credentials expire sooner than specified.
:param int seconds:
Number of seconds.
:returns:
``True`` if credentials expire sooner than specified,
else ``False``.
"""
if self.expiration_time:
return self.expiration_time < int(time.time()) + int(seconds)
else:
return False | python | {
"resource": ""
} |
q272013 | Credentials.serialize | test | def serialize(self):
"""
Converts the credentials to a percent encoded string to be stored for
later use.
:returns:
:class:`string`
"""
if self.provider_id is None:
raise ConfigError(
'To serialize credentials you need to specify a '
'unique integer under the "id" key in the config '
'for each provider!')
# Get the provider type specific items.
rest = self.provider_type_class().to_tuple(self)
# Provider ID and provider type ID are always the first two items.
result = (self.provider_id, self.provider_type_id) + rest
# Make sure that all items are strings.
stringified = [str(i) for i in result]
# Concatenate by newline.
concatenated = '\n'.join(stringified)
# Percent encode.
return parse.quote(concatenated, '') | python | {
"resource": ""
} |
q272014 | Response.is_binary_string | test | def is_binary_string(content):
"""
Return true if string is binary data.
"""
textchars = (bytearray([7, 8, 9, 10, 12, 13, 27]) +
bytearray(range(0x20, 0x100)))
return bool(content.translate(None, textchars)) | python | {
"resource": ""
} |
q272015 | Response.content | test | def content(self):
"""
The whole response content.
"""
if not self._content:
content = self.httplib_response.read()
if self.is_binary_string(content):
self._content = content
else:
self._content = content.decode('utf-8')
return self._content | python | {
"resource": ""
} |
q272016 | OAuth1.create_request_elements | test | def create_request_elements(
cls, request_type, credentials, url, params=None, headers=None,
body='', method='GET', verifier='', callback=''
):
"""
Creates |oauth1| request elements.
"""
params = params or {}
headers = headers or {}
consumer_key = credentials.consumer_key or ''
consumer_secret = credentials.consumer_secret or ''
token = credentials.token or ''
token_secret = credentials.token_secret or ''
# separate url base and query parameters
url, base_params = cls._split_url(url)
# add extracted params to future params
params.update(dict(base_params))
if request_type == cls.USER_AUTHORIZATION_REQUEST_TYPE:
# no need for signature
if token:
params['oauth_token'] = token
else:
raise OAuth1Error(
'Credentials with valid token are required to create '
'User Authorization URL!')
else:
# signature needed
if request_type == cls.REQUEST_TOKEN_REQUEST_TYPE:
# Request Token URL
if consumer_key and consumer_secret and callback:
params['oauth_consumer_key'] = consumer_key
params['oauth_callback'] = callback
else:
raise OAuth1Error(
'Credentials with valid consumer_key, consumer_secret '
'and callback are required to create Request Token '
'URL!')
elif request_type == cls.ACCESS_TOKEN_REQUEST_TYPE:
# Access Token URL
if consumer_key and consumer_secret and token and verifier:
params['oauth_token'] = token
params['oauth_consumer_key'] = consumer_key
params['oauth_verifier'] = verifier
else:
raise OAuth1Error(
'Credentials with valid consumer_key, '
'consumer_secret, token and argument verifier'
' are required to create Access Token URL!')
elif request_type == cls.PROTECTED_RESOURCE_REQUEST_TYPE:
# Protected Resources URL
if consumer_key and consumer_secret and token and token_secret:
params['oauth_token'] = token
params['oauth_consumer_key'] = consumer_key
else:
raise OAuth1Error(
'Credentials with valid consumer_key, ' +
'consumer_secret, token and token_secret are required '
'to create Protected Resources URL!')
# Sign request.
# http://oauth.net/core/1.0a/#anchor13
# Prepare parameters for signature base string
# http://oauth.net/core/1.0a/#rfc.section.9.1
params['oauth_signature_method'] = cls._signature_generator.method
params['oauth_timestamp'] = str(int(time.time()))
params['oauth_nonce'] = cls.csrf_generator(str(uuid.uuid4()))
params['oauth_version'] = '1.0'
# add signature to params
params['oauth_signature'] = cls._signature_generator.create_signature( # noqa
method, url, params, consumer_secret, token_secret)
request_elements = core.RequestElements(
url, method, params, headers, body)
return cls._x_request_elements_filter(
request_type, request_elements, credentials) | python | {
"resource": ""
} |
q272017 | Bitbucket._access_user_info | test | def _access_user_info(self):
"""
Email is available in separate method so second request is needed.
"""
response = super(Bitbucket, self)._access_user_info()
response.data.setdefault("email", None)
email_response = self.access(self.user_email_url)
if email_response.data:
for item in email_response.data:
if item.get("primary", False):
response.data.update(email=item.get("email", None))
return response | python | {
"resource": ""
} |
q272018 | FlaskAuthomatic.login | test | def login(self, *login_args, **login_kwargs):
"""
Decorator for Flask view functions.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
self.response = make_response()
adapter = WerkzeugAdapter(request, self.response)
login_kwargs.setdefault('session', session)
login_kwargs.setdefault('session_saver', self.session_saver)
self.result = super(FlaskAuthomatic, self).login(
adapter,
*login_args,
**login_kwargs)
return f(*args, **kwargs)
return decorated
return decorator | python | {
"resource": ""
} |
q272019 | GAEOpenID.login | test | def login(self):
"""
Launches the OpenID authentication procedure.
"""
if self.params.get(self.identifier_param):
# =================================================================
# Phase 1 before redirect.
# =================================================================
self._log(
logging.INFO,
u'Starting OpenID authentication procedure.')
url = users.create_login_url(
dest_url=self.url, federated_identity=self.identifier)
self._log(logging.INFO, u'Redirecting user to {0}.'.format(url))
self.redirect(url)
else:
# =================================================================
# Phase 2 after redirect.
# =================================================================
self._log(
logging.INFO,
u'Continuing OpenID authentication procedure after redirect.')
user = users.get_current_user()
if user:
self._log(logging.INFO, u'Authentication successful.')
self._log(logging.INFO, u'Creating user.')
self.user = core.User(self,
id=user.federated_identity(),
email=user.email(),
gae_user=user)
# =============================================================
# We're done
# =============================================================
else:
raise FailureError(
'Unable to authenticate identifier "{0}"!'.format(
self.identifier)) | python | {
"resource": ""
} |
q272020 | BaseProvider._session_key | test | def _session_key(self, key):
"""
Generates session key string.
:param str key:
e.g. ``"authomatic:facebook:key"``
"""
return '{0}:{1}:{2}'.format(self.settings.prefix, self.name, key) | python | {
"resource": ""
} |
q272021 | BaseProvider._session_set | test | def _session_set(self, key, value):
"""
Saves a value to session.
"""
self.session[self._session_key(key)] = value | python | {
"resource": ""
} |
q272022 | BaseProvider.csrf_generator | test | def csrf_generator(secret):
"""
Generates CSRF token.
Inspired by this article:
http://blog.ptsecurity.com/2012/10/random-number-security-in-python.html
:returns:
:class:`str` Random unguessable string.
"""
# Create hash from random string plus salt.
hashed = hashlib.md5(uuid.uuid4().bytes + six.b(secret)).hexdigest()
# Each time return random portion of the hash.
span = 5
shift = random.randint(0, span)
return hashed[shift:shift - span - 1] | python | {
"resource": ""
} |
q272023 | BaseProvider._log | test | def _log(cls, level, msg, **kwargs):
"""
Logs a message with pre-formatted prefix.
:param int level:
Logging level as specified in the
`login module <http://docs.python.org/2/library/logging.html>`_ of
Python standard library.
:param str msg:
The actual message.
"""
logger = getattr(cls, '_logger', None) or authomatic.core._logger
logger.log(
level, ': '.join(
('authomatic', cls.__name__, msg)), **kwargs) | python | {
"resource": ""
} |
q272024 | BaseProvider._http_status_in_category | test | def _http_status_in_category(status, category):
"""
Checks whether a HTTP status code is in the category denoted by the
hundreds digit.
"""
assert category < 10, 'HTTP status category must be a one-digit int!'
cat = category * 100
return status >= cat and status < cat + 100 | python | {
"resource": ""
} |
q272025 | AuthorizationProvider._split_url | test | def _split_url(url):
"""
Splits given url to url base and params converted to list of tuples.
"""
split = parse.urlsplit(url)
base = parse.urlunsplit((split.scheme, split.netloc, split.path, 0, 0))
params = parse.parse_qsl(split.query, True)
return base, params | python | {
"resource": ""
} |
q272026 | cross_origin | test | def cross_origin(app, *args, **kwargs):
"""
This function is the decorator which is used to wrap a Sanic route with.
In the simplest case, simply use the default parameters to allow all
origins in what is the most permissive configuration. If this method
modifies state or performs authentication which may be brute-forced, you
should add some degree of protection, such as Cross Site Forgery
Request protection.
:param origins:
The origin, or list of origins to allow requests from.
The origin(s) may be regular expressions, case-sensitive strings,
or else an asterisk
Default : '*'
:type origins: list, string or regex
:param methods:
The method or list of methods which the allowed origins are allowed to
access for non-simple requests.
Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]
:type methods: list or string
:param expose_headers:
The header or list which are safe to expose to the API of a CORS API
specification.
Default : None
:type expose_headers: list or string
:param allow_headers:
The header or list of header field names which can be used when this
resource is accessed by allowed origins. The header(s) may be regular
expressions, case-sensitive strings, or else an asterisk.
Default : '*', allow all headers
:type allow_headers: list, string or regex
:param supports_credentials:
Allows users to make authenticated requests. If true, injects the
`Access-Control-Allow-Credentials` header in responses. This allows
cookies and credentials to be submitted across domains.
:note: This option cannot be used in conjuction with a '*' origin
Default : False
:type supports_credentials: bool
:param max_age:
The maximum time for which this CORS request maybe cached. This value
is set as the `Access-Control-Max-Age` header.
Default : None
:type max_age: timedelta, integer, string or None
:param send_wildcard: If True, and the origins parameter is `*`, a wildcard
`Access-Control-Allow-Origin` header is sent, rather than the
request's `Origin` header.
Default : False
:type send_wildcard: bool
:param vary_header:
If True, the header Vary: Origin will be returned as per the W3
implementation guidelines.
Setting this header when the `Access-Control-Allow-Origin` is
dynamically generated (e.g. when there is more than one allowed
origin, and an Origin than '*' is returned) informs CDNs and other
caches that the CORS headers are dynamic, and cannot be cached.
If False, the Vary header will never be injected or altered.
Default : True
:type vary_header: bool
:param automatic_options:
Only applies to the `cross_origin` decorator. If True, Sanic-CORS will
override Sanic's default OPTIONS handling to return CORS headers for
OPTIONS requests.
Default : True
:type automatic_options: bool
"""
_options = kwargs
_real_decorator = cors.decorate(app, *args, run_middleware=False, with_context=False, **kwargs)
def wrapped_decorator(f):
spf = SanicPluginsFramework(app) # get the singleton from the app
try:
plugin = spf.register_plugin(cors, skip_reg=True)
except ValueError as e:
# this is normal, if this plugin has been registered previously
assert e.args and len(e.args) > 1
plugin = e.args[1]
context = cors.get_context_from_spf(spf)
log = context.log
log(logging.DEBUG, "Enabled {:s} for cross_origin using options: {}".format(str(f), str(_options)))
return _real_decorator(f)
return wrapped_decorator | python | {
"resource": ""
} |
q272027 | set_cors_headers | test | def set_cors_headers(req, resp, context, options):
"""
Performs the actual evaluation of Sanic-CORS options and actually
modifies the response object.
This function is used both in the decorator and the after_request
callback
:param sanic.request.Request req:
"""
try:
request_context = context.request[id(req)]
except AttributeError:
LOG.debug("Cannot find the request context. Is request already finished?")
return resp
# If CORS has already been evaluated via the decorator, skip
evaluated = request_context.get(SANIC_CORS_EVALUATED, False)
if evaluated:
LOG.debug('CORS have been already evaluated, skipping')
return resp
# `resp` can be None in the case of using Websockets
# however this case should have been handled in the `extension` and `decorator` methods
# before getting here. This is a final failsafe check to prevent crashing
if resp is None:
return None
if resp.headers is None:
resp.headers = CIMultiDict()
headers_to_set = get_cors_headers(options, req.headers, req.method)
LOG.debug('Settings CORS headers: %s', str(headers_to_set))
# dict .extend() does not work on CIDict so
# iterate over them and add them individually.
try:
resp.headers.extend(headers_to_set)
except Exception as e1:
for k, v in headers_to_set.items():
try:
resp.headers.add(k, v)
except Exception as e2:
resp.headers[k] = v
return resp | python | {
"resource": ""
} |
q272028 | get_app_kwarg_dict | test | def get_app_kwarg_dict(appInstance):
"""Returns the dictionary of CORS specific app configurations."""
# In order to support blueprints which do not have a config attribute
app_config = getattr(appInstance, 'config', {})
return dict(
(k.lower().replace('cors_', ''), app_config.get(k))
for k in CONFIG_OPTIONS
if app_config.get(k) is not None
) | python | {
"resource": ""
} |
q272029 | flexible_str | test | def flexible_str(obj):
"""
A more flexible str function which intelligently handles stringifying
strings, lists and other iterables. The results are lexographically sorted
to ensure generated responses are consistent when iterables such as Set
are used.
"""
if obj is None:
return None
elif(not isinstance(obj, str)
and isinstance(obj, collections.abc.Iterable)):
return ', '.join(str(item) for item in sorted(obj))
else:
return str(obj) | python | {
"resource": ""
} |
q272030 | ensure_iterable | test | def ensure_iterable(inst):
"""
Wraps scalars or string types as a list, or returns the iterable instance.
"""
if isinstance(inst, str):
return [inst]
elif not isinstance(inst, collections.abc.Iterable):
return [inst]
else:
return inst | python | {
"resource": ""
} |
q272031 | isclose | test | def isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0):
"""
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
"""
try:
return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol)
except AttributeError:
# Running on older version of python, fall back to hand-rolled implementation
if (rel_tol < 0.0) or (abs_tol < 0.0):
raise ValueError("Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}".format(rel_tol, abs_tol))
if math.isnan(a) or math.isnan(b):
return False # NaNs are never close to anything, even other NaNs
if (a == b):
return True
if math.isinf(a) or math.isinf(b):
return False # Infinity is only close to itself, and we already handled that case
diff = abs(a - b)
return (diff <= rel_tol * abs(b)) or (diff <= rel_tol * abs(a)) or (diff <= abs_tol) | python | {
"resource": ""
} |
q272032 | deprecated | test | def deprecated(func):
"""
Deprecator decorator.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return new_func | python | {
"resource": ""
} |
q272033 | deserialize | test | def deserialize(bstr):
"""
Attempts to deserialize a bytestring into an audiosegment.
:param bstr: The bytestring serialized via an audiosegment's serialize() method.
:returns: An AudioSegment object deserialized from `bstr`.
"""
d = pickle.loads(bstr)
seg = pickle.loads(d['seg'])
return AudioSegment(seg, d['name']) | python | {
"resource": ""
} |
q272034 | from_file | test | def from_file(path):
"""
Returns an AudioSegment object from the given file based on its file extension.
If the extension is wrong, this will throw some sort of error.
:param path: The path to the file, including the file extension.
:returns: An AudioSegment instance from the file.
"""
_name, ext = os.path.splitext(path)
ext = ext.lower()[1:]
seg = pydub.AudioSegment.from_file(path, ext)
return AudioSegment(seg, path) | python | {
"resource": ""
} |
q272035 | from_numpy_array | test | def from_numpy_array(nparr, framerate):
"""
Returns an AudioSegment created from the given numpy array.
The numpy array must have shape = (num_samples, num_channels).
:param nparr: The numpy array to create an AudioSegment from.
:returns: An AudioSegment created from the given array.
"""
# interleave the audio across all channels and collapse
if nparr.dtype.itemsize not in (1, 2, 4):
raise ValueError("Numpy Array must contain 8, 16, or 32 bit values.")
if len(nparr.shape) == 1:
arrays = [nparr]
elif len(nparr.shape) == 2:
arrays = [nparr[i,:] for i in range(nparr.shape[0])]
else:
raise ValueError("Numpy Array must be one or two dimensional. Shape must be: (num_samples, num_channels).")
interleaved = np.vstack(arrays).reshape((-1,), order='F')
dubseg = pydub.AudioSegment(interleaved.tobytes(),
frame_rate=framerate,
sample_width=interleaved.dtype.itemsize,
channels=len(interleaved.shape)
)
return AudioSegment(dubseg, "") | python | {
"resource": ""
} |
q272036 | AudioSegment._execute_sox_cmd | test | def _execute_sox_cmd(self, cmd, console_output=False):
"""
Executes a Sox command in a platform-independent manner.
`cmd` must be a format string that includes {inputfile} and {outputfile}.
"""
on_windows = platform.system().lower() == "windows"
# On Windows, a temporary file cannot be shared outside the process that creates it
# so we need to create a "permanent" file that we will use and delete afterwards
def _get_random_tmp_file():
if on_windows:
rand_string = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
tmp = self.name + "_" + rand_string
WinTempFile = collections.namedtuple("WinTempFile", "name")
tmp = WinTempFile(tmp)
else:
tmp = tempfile.NamedTemporaryFile()
return tmp
# Get a temp file to put our data and a temp file to store the result
tmp = _get_random_tmp_file()
othertmp = _get_random_tmp_file()
# Store our data in the temp file
self.export(tmp.name, format="WAV")
# Write the command to sox
stdout = stderr = subprocess.PIPE if console_output else subprocess.DEVNULL
command = cmd.format(inputfile=tmp.name, outputfile=othertmp.name)
res = subprocess.call(command.split(' '), stdout=stdout, stderr=stderr)
assert res == 0, "Sox did not work as intended, or perhaps you don't have Sox installed?"
# Create a new AudioSegment from the other temp file (where Sox put the result)
other = AudioSegment(pydub.AudioSegment.from_wav(othertmp.name), self.name)
# Clean up the temp files
if on_windows:
os.remove(tmp.name)
os.remove(othertmp.name)
else:
tmp.close()
othertmp.close()
return other | python | {
"resource": ""
} |
q272037 | AudioSegment.filter_silence | test | def filter_silence(self, duration_s=1, threshold_percentage=1, console_output=False):
"""
Returns a copy of this AudioSegment, but whose silence has been removed.
.. note:: This method requires that you have the program 'sox' installed.
.. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single
function call, the IO may add up for large numbers of AudioSegment objects.
:param duration_s: The number of seconds of "silence" that must be present in a row to
be stripped.
:param threshold_percentage: Silence is defined as any samples whose absolute value is below
`threshold_percentage * max(abs(samples in this segment))`.
:param console_output: If True, will pipe all sox output to the console.
:returns: A copy of this AudioSegment, but whose silence has been removed.
"""
command = "sox {inputfile} -t wav {outputfile} silence -l 1 0.1 "\
+ str(threshold_percentage) + "% -1 " + str(float(duration_s)) + " " + str(threshold_percentage) + "%"
try:
result = self._execute_sox_cmd(command)
except pydub.exceptions.CouldntDecodeError:
warnings.warn("After silence filtering, the resultant WAV file is corrupted, and so its data cannot be retrieved. Perhaps try a smaller threshold value.", stacklevel=2)
# Return a copy of us
result = AudioSegment(self.seg, self.name)
return result | python | {
"resource": ""
} |
q272038 | AudioSegment.fft | test | def fft(self, start_s=None, duration_s=None, start_sample=None, num_samples=None, zero_pad=False):
"""
Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins
and the values.
If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample
of the AudioSegment.
If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start
to the end of the segment.
.. code-block:: python
# Example for plotting the FFT using this function
import matplotlib.pyplot as plt
import numpy as np
seg = audiosegment.from_file("furelise.wav")
# Just take the first 3 seconds
hist_bins, hist_vals = seg[1:3000].fft()
hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)
plt.plot(hist_bins / 1000, hist_vals_real_normed)
plt.xlabel("kHz")
plt.ylabel("dB")
plt.show()
.. image:: images/fft.png
:param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`.
:param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`.
:param start_sample: The zero-based index of the first sample to include in the slice.
If this is specified, you cannot specify `start_s`.
:param num_samples: The number of samples to include in the slice. If this is specified, you cannot
specify `duration_s`.
:param zero_pad: If True and the combination of start and duration result in running off the end of
the AudioSegment, the end is zero padded to prevent this.
:returns: np.ndarray of frequencies in Hz, np.ndarray of amount of each frequency
:raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and
`num_samples` are specified.
"""
if start_s is not None and start_sample is not None:
raise ValueError("Only one of start_s and start_sample can be specified.")
if duration_s is not None and num_samples is not None:
raise ValueError("Only one of duration_s and num_samples can be specified.")
if start_s is None and start_sample is None:
start_sample = 0
if duration_s is None and num_samples is None:
num_samples = len(self.get_array_of_samples()) - int(start_sample)
if duration_s is not None:
num_samples = int(round(duration_s * self.frame_rate))
if start_s is not None:
start_sample = int(round(start_s * self.frame_rate))
end_sample = start_sample + num_samples # end_sample is excluded
if end_sample > len(self.get_array_of_samples()) and not zero_pad:
raise ValueError("The combination of start and duration will run off the end of the AudioSegment object.")
elif end_sample > len(self.get_array_of_samples()) and zero_pad:
arr = np.array(self.get_array_of_samples())
zeros = np.zeros(end_sample - len(arr))
arr = np.append(arr, zeros)
else:
arr = np.array(self.get_array_of_samples())
audioslice = np.array(arr[start_sample:end_sample])
fft_result = np.fft.fft(audioslice)[range(int(round(num_samples/2)) + 1)]
step_size = self.frame_rate / num_samples
bins = np.arange(0, int(round(num_samples/2)) + 1, 1.0) * step_size
return bins, fft_result | python | {
"resource": ""
} |
q272039 | AudioSegment.generate_frames | test | def generate_frames(self, frame_duration_ms, zero_pad=True):
"""
Yields self's data in chunks of frame_duration_ms.
This function adapted from pywebrtc's example [https://github.com/wiseman/py-webrtcvad/blob/master/example.py].
:param frame_duration_ms: The length of each frame in ms.
:param zero_pad: Whether or not to zero pad the end of the AudioSegment object to get all
the audio data out as frames. If not, there may be a part at the end
of the Segment that is cut off (the part will be <= `frame_duration_ms` in length).
:returns: A Frame object with properties 'bytes (the data)', 'timestamp (start time)', and 'duration'.
"""
Frame = collections.namedtuple("Frame", "bytes timestamp duration")
# (samples/sec) * (seconds in a frame) * (bytes/sample)
bytes_per_frame = int(self.frame_rate * (frame_duration_ms / 1000) * self.sample_width)
offset = 0 # where we are so far in self's data (in bytes)
timestamp = 0.0 # where we are so far in self (in seconds)
# (bytes/frame) * (sample/bytes) * (sec/samples)
frame_duration_s = (bytes_per_frame / self.frame_rate) / self.sample_width
while offset + bytes_per_frame < len(self.raw_data):
yield Frame(self.raw_data[offset:offset + bytes_per_frame], timestamp, frame_duration_s)
timestamp += frame_duration_s
offset += bytes_per_frame
if zero_pad:
rest = self.raw_data[offset:]
zeros = bytes(bytes_per_frame - len(rest))
yield Frame(rest + zeros, timestamp, frame_duration_s) | python | {
"resource": ""
} |
q272040 | AudioSegment.normalize_spl_by_average | test | def normalize_spl_by_average(self, db):
"""
Normalize the values in the AudioSegment so that its `spl` property
gives `db`.
.. note:: This method is currently broken - it returns an AudioSegment whose
values are much smaller than reasonable, yet which yield an SPL value
that equals the given `db`. Such an AudioSegment will not be serializable
as a WAV file, which will also break any method that relies on SOX.
I may remove this method in the future, since the SPL of an AudioSegment is
pretty questionable to begin with.
:param db: The decibels to normalize average to.
:returns: A new AudioSegment object whose values are changed so that their
average is `db`.
:raises: ValueError if there are no samples in this AudioSegment.
"""
arr = self.to_numpy_array().copy()
if len(arr) == 0:
raise ValueError("Cannot normalize the SPL of an empty AudioSegment")
def rms(x):
return np.sqrt(np.mean(np.square(x)))
# Figure out what RMS we would like
desired_rms = P_REF_PCM * ((10 ** (db/20.0)) - 1E-9)
# Use successive approximation to solve
## Keep trying different multiplication factors until we get close enough or run out of time
max_ntries = 50
res_rms = 0.0
ntries = 0
factor = 0.1
left = 0.0
right = desired_rms
while (ntries < max_ntries) and not util.isclose(res_rms, desired_rms, abs_tol=0.1):
res_rms = rms(arr * factor)
if res_rms < desired_rms:
left = factor
else:
right = factor
factor = 0.5 * (left + right)
ntries += 1
dtype_dict = {1: np.int8, 2: np.int16, 4: np.int32}
dtype = dtype_dict[self.sample_width]
new_seg = from_numpy_array(np.array(arr * factor, dtype=dtype), self.frame_rate)
return new_seg | python | {
"resource": ""
} |
q272041 | AudioSegment.reduce | test | def reduce(self, others):
"""
Reduces others into this one by concatenating all the others onto this one and
returning the result. Does not modify self, instead, makes a copy and returns that.
:param others: The other AudioSegment objects to append to this one.
:returns: The concatenated result.
"""
ret = AudioSegment(self.seg, self.name)
selfdata = [self.seg._data]
otherdata = [o.seg._data for o in others]
ret.seg._data = b''.join(selfdata + otherdata)
return ret | python | {
"resource": ""
} |
q272042 | AudioSegment.resample | test | def resample(self, sample_rate_Hz=None, sample_width=None, channels=None, console_output=False):
"""
Returns a new AudioSegment whose data is the same as this one, but which has been resampled to the
specified characteristics. Any parameter left None will be unchanged.
.. note:: This method requires that you have the program 'sox' installed.
.. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single
function call, the IO may add up for large numbers of AudioSegment objects.
:param sample_rate_Hz: The new sample rate in Hz.
:param sample_width: The new sample width in bytes, so sample_width=2 would correspond to 16 bit (2 byte) width.
:param channels: The new number of channels.
:param console_output: Will print the output of sox to the console if True.
:returns: The newly sampled AudioSegment.
"""
if sample_rate_Hz is None:
sample_rate_Hz = self.frame_rate
if sample_width is None:
sample_width = self.sample_width
if channels is None:
channels = self.channels
# TODO: Replace this with librosa's implementation to remove SOX dependency here
command = "sox {inputfile} -b " + str(sample_width * 8) + " -r " + str(sample_rate_Hz) \
+ " -t wav {outputfile} channels " + str(channels)
return self._execute_sox_cmd(command, console_output=console_output) | python | {
"resource": ""
} |
q272043 | AudioSegment.serialize | test | def serialize(self):
"""
Serializes into a bytestring.
:returns: An object of type Bytes.
"""
d = self.__getstate__()
return pickle.dumps({
'name': d['name'],
'seg': pickle.dumps(d['seg'], protocol=-1),
}, protocol=-1) | python | {
"resource": ""
} |
q272044 | AudioSegment.spectrogram | test | def spectrogram(self, start_s=None, duration_s=None, start_sample=None, num_samples=None,
window_length_s=None, window_length_samples=None, overlap=0.5, window=('tukey', 0.25)):
"""
Does a series of FFTs from `start_s` or `start_sample` for `duration_s` or `num_samples`.
Effectively, transforms a slice of the AudioSegment into the frequency domain across different
time bins.
.. code-block:: python
# Example for plotting a spectrogram using this function
import audiosegment
import matplotlib.pyplot as plt
#...
seg = audiosegment.from_file("somebodytalking.wav")
freqs, times, amplitudes = seg.spectrogram(window_length_s=0.03, overlap=0.5)
amplitudes = 10 * np.log10(amplitudes + 1e-9)
# Plot
plt.pcolormesh(times, freqs, amplitudes)
plt.xlabel("Time in Seconds")
plt.ylabel("Frequency in Hz")
plt.show()
.. image:: images/spectrogram.png
:param start_s: The start time. Starts at the beginning if neither this nor `start_sample` is specified.
:param duration_s: The duration of the spectrogram in seconds. Goes to the end if neither this nor
`num_samples` is specified.
:param start_sample: The index of the first sample to use. Starts at the beginning if neither this nor
`start_s` is specified.
:param num_samples: The number of samples in the spectrogram. Goes to the end if neither this nor
`duration_s` is specified.
:param window_length_s: The length of each FFT in seconds. If the total number of samples in the spectrogram
is not a multiple of the window length in samples, the last window will be zero-padded.
:param window_length_samples: The length of each FFT in number of samples. If the total number of samples in the
spectrogram is not a multiple of the window length in samples, the last window will
be zero-padded.
:param overlap: The fraction of each window to overlap.
:param window: See Scipy's spectrogram-function_.
This parameter is passed as-is directly into the Scipy spectrogram function. It's documentation is reproduced here:
Desired window to use. If window is a string or tuple, it is passed to get_window to generate the window values,
which are DFT-even by default. See get_window for a list of windows and required parameters.
If window is array_like it will be used directly as the window and its length must be
`window_length_samples`.
Defaults to a Tukey window with shape parameter of 0.25.
:returns: Three np.ndarrays: The frequency values in Hz (the y-axis in a spectrogram), the time values starting
at start time and then increasing by `duration_s` each step (the x-axis in a spectrogram), and
the dB of each time/frequency bin as a 2D array of shape [len(frequency values), len(duration)].
:raises ValueError: If `start_s` and `start_sample` are both specified, if `duration_s` and `num_samples` are both
specified, if the first window's duration plus start time lead to running off the end
of the AudioSegment, or if `window_length_s` and `window_length_samples` are either
both specified or if they are both not specified.
.. _spectrogram-function: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html
"""
if start_s is not None and start_sample is not None:
raise ValueError("Only one of start_s and start_sample may be specified.")
if duration_s is not None and num_samples is not None:
raise ValueError("Only one of duration_s and num_samples may be specified.")
if window_length_s is not None and window_length_samples is not None:
raise ValueError("Only one of window_length_s and window_length_samples may be specified.")
if window_length_s is None and window_length_samples is None:
raise ValueError("You must specify a window length, either in window_length_s or in window_length_samples.")
# Determine the start sample
if start_s is None and start_sample is None:
start_sample = 0
elif start_s is not None:
start_sample = int(round(start_s * self.frame_rate))
# Determine the number of samples
if duration_s is None and num_samples is None:
num_samples = len(self.get_array_of_samples()) - int(start_sample)
elif duration_s is not None:
num_samples = int(round(duration_s * self.frame_rate))
# Determine the number of samples per window
if window_length_s is not None:
window_length_samples = int(round(window_length_s * self.frame_rate))
# Check validity of number of samples
if start_sample + num_samples > len(self.get_array_of_samples()):
raise ValueError("The combination of start and duration will run off the end of the AudioSegment object.")
# Create a Numpy Array out of the correct samples
arr = self.to_numpy_array()[start_sample:start_sample+num_samples]
# Use Scipy spectrogram and return
fs, ts, sxx = signal.spectrogram(arr, self.frame_rate, scaling='spectrum', nperseg=window_length_samples,
noverlap=int(round(overlap * window_length_samples)),
mode='magnitude', window=window)
return fs, ts, sxx | python | {
"resource": ""
} |
q272045 | _choose_front_id_from_candidates | test | def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets):
"""
Returns a front ID which is the id of the offset front that contains the most overlap
with offsets that correspond to the given onset front ID.
"""
noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id)
for offset_front_id in candidate_offset_front_ids:
offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id)
offset_front_idxs = [(f, i) for f, i in zip(offset_front_f_idxs, offset_front_s_idxs)]
noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets)))
noverlaps.append((noverlap_this_id, offset_front_id))
_overlapped, chosen_offset_front_id = max(noverlaps, key=lambda t: t[0])
return int(chosen_offset_front_id) | python | {
"resource": ""
} |
q272046 | _get_offset_front_id_after_onset_sample_idx | test | def _get_offset_front_id_after_onset_sample_idx(onset_sample_idx, offset_fronts):
"""
Returns the offset_front_id which corresponds to the offset front which occurs
first entirely after the given onset sample_idx.
"""
# get all the offset_front_ids
offset_front_ids = [i for i in np.unique(offset_fronts) if i != 0]
best_id_so_far = -1
closest_offset_sample_idx = sys.maxsize
for offset_front_id in offset_front_ids:
# get all that offset front's indexes
offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_front_id)
# get the sample indexes
offset_front_sample_idxs = [s for _f, s in offset_front_idxs]
# if each sample index is greater than onset_sample_idx, keep this offset front if it is the best one so far
min_sample_idx = min(offset_front_sample_idxs)
if min_sample_idx > onset_sample_idx and min_sample_idx < closest_offset_sample_idx:
closest_offset_sample_idx = min_sample_idx
best_id_so_far = offset_front_id
assert best_id_so_far > 1 or best_id_so_far == -1
return best_id_so_far | python | {
"resource": ""
} |
q272047 | _get_offset_front_id_after_onset_front | test | def _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts):
"""
Get the ID corresponding to the offset which occurs first after the given onset_front_id.
By `first` I mean the front which contains the offset which is closest to the latest point
in the onset front. By `after`, I mean that the offset must contain only offsets which
occur after the latest onset in the onset front.
If there is no appropriate offset front, the id returned is -1.
"""
# get the onset idxs for this front
onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# get the sample idxs for this front
onset_sample_idxs = [s for _f, s in onset_idxs]
# get the latest onset in this onset front
latest_onset_in_front = max(onset_sample_idxs)
offset_front_id_after_this_onset_front = _get_offset_front_id_after_onset_sample_idx(latest_onset_in_front, offset_fronts)
return int(offset_front_id_after_this_onset_front) | python | {
"resource": ""
} |
q272048 | _match_offset_front_id_to_onset_front_id | test | def _match_offset_front_id_to_onset_front_id(onset_front_id, onset_fronts, offset_fronts, onsets, offsets):
"""
Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the
given onset front.
The offset front which contains the most of such offsets is the match.
If there are no such offset fronts, return -1.
"""
# find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front
# the offset front which contains the most of such offsets is the match
# get the onsets that make up front_id
onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# get the offsets that match the onsets in front_id
offset_idxs = [_lookup_offset_by_onset_idx(i, onsets, offsets) for i in onset_idxs]
# get all offset_fronts which contain at least one of these offsets
candidate_offset_front_ids = set([int(offset_fronts[f, i]) for f, i in offset_idxs])
# It is possible that offset_idxs contains offset indexes that correspond to offsets that did not
# get formed into a front - those will have a front ID of 0. Remove them.
candidate_offset_front_ids = [id for id in candidate_offset_front_ids if id != 0]
if candidate_offset_front_ids:
chosen_offset_front_id = _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offset_idxs)
else:
chosen_offset_front_id = _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts)
return chosen_offset_front_id | python | {
"resource": ""
} |
q272049 | _get_consecutive_and_overlapping_fronts | test | def _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id):
"""
Gets an onset_front and an offset_front such that they both occupy at least some of the same
frequency channels, then returns the portion of each that overlaps with the other.
"""
# Get the onset front of interest
onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# Get the offset front of interest
offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id)
# Keep trying consecutive portions of this onset front until we find a consecutive portion
# that overlaps with part of the offset front
consecutive_portions_of_onset_front = [c for c in _get_consecutive_portions_of_front(onset_front)]
for consecutive_portion_of_onset_front in consecutive_portions_of_onset_front:
# Only get the segment of this front that overlaps in frequencies with the onset front of interest
onset_front_frequency_indexes = [f for f, _ in consecutive_portion_of_onset_front]
overlapping_offset_front = [(f, s) for f, s in offset_front if f in onset_front_frequency_indexes]
# Only get as much of this overlapping portion as is actually consecutive
for consecutive_portion_of_offset_front in _get_consecutive_portions_of_front(overlapping_offset_front):
if consecutive_portion_of_offset_front:
# Just return the first one we get - if we get any it means we found a portion of overlap
return consecutive_portion_of_onset_front, consecutive_portion_of_offset_front
return [], [] | python | {
"resource": ""
} |
q272050 | _update_segmentation_mask | test | def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap):
"""
Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between
`onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively.
This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of
less than 3 channels wide are removed.
This function also returns a boolean value indicating whether the onset channel went to completion.
Specifically, segments by doing the following:
- Going across frequencies in the onset_front,
- add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front,
if the offset_front is in that frequency.
Possible scenarios:
Fronts line up completely:
::
| | S S S
| | => S S S
| | S S S
| | S S S
Onset front starts before offset front:
::
| |
| | S S S
| | => S S S
| | S S S
Onset front ends after offset front:
::
| | S S S
| | => S S S
| | S S S
| |
Onset front starts before and ends after offset front:
::
| |
| | => S S S
| | S S S
| |
The above three options in reverse:
::
| |S S| |
|S S| |S S| |S S|
|S S| |S S| |S S|
|S S| | |
There is one last scenario:
::
| |
\ /
\ /
/ \
| |
Where the offset and onset fronts cross one another. If this happens, we simply
reverse the indices and accept:
::
|sss|
\sss/
\s/
/s\
|sss|
The other option would be to destroy the offset front from the crossover point on, and
then search for a new offset front for the rest of the onset front.
"""
# Get the portions of the onset and offset fronts that overlap and are consecutive
onset_front_overlap, offset_front_overlap = _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap)
onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)
offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id_most_overlap)
msg = "Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty".format(
onset_front, offset_front, onset_front_overlap, offset_front_overlap
)
assert onset_front_overlap, msg
assert offset_front_overlap, msg
onset_front = onset_front_overlap
offset_front = offset_front_overlap
# Figure out which frequencies will go in the segment
flow_on, _slow_on = onset_front[0]
fhigh_on, _shigh_on = onset_front[-1]
flow_off, _slow_off = offset_front[0]
fhigh_off, _shigh_off = offset_front[-1]
flow = max(flow_on, flow_off)
fhigh = min(fhigh_on, fhigh_off)
# Update all the masks with the segment
for fidx, _freqchan in enumerate(segmentation_mask[flow:fhigh + 1, :], start=flow):
assert fidx >= flow, "Frequency index is {}, but we should have started at {}".format(fidx, flow)
assert (fidx - flow) < len(onset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}".format(
fidx, flow, len(onset_front), onset_front
)
assert (fidx - flow) < len(offset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in offset front {}".format(
fidx, flow, len(offset_front), offset_front
)
_, beg = onset_front[fidx - flow]
_, end = offset_front[fidx - flow]
if beg > end:
end, beg = beg, end
assert end >= beg
segmentation_mask[fidx, beg:end + 1] = onset_front_id
onset_fronts[fidx, (beg + 1):(end + 1)] = 0
offset_fronts[fidx, (beg + 1):(end + 1)] = 0
nfreqs_used_in_onset_front = (fidx - flow) + 1
# Update the other masks to delete fronts that have been used
indexes = np.arange(flow, fhigh + 1, 1, dtype=np.int64)
onset_front_sample_idxs_across_freqs = np.array([s for _, s in onset_front])
onset_front_sample_idxs_across_freqs_up_to_break = onset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front]
offset_front_sample_idxs_across_freqs = np.array([s for _, s in offset_front])
offset_front_sample_idxs_across_freqs_up_to_break = offset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front]
## Remove the offset front from where we started to where we ended
offset_fronts[indexes[:nfreqs_used_in_onset_front], offset_front_sample_idxs_across_freqs_up_to_break] = 0
## Remove the onset front from where we started to where we ended
onset_fronts[indexes[:nfreqs_used_in_onset_front], onset_front_sample_idxs_across_freqs_up_to_break] = 0
# Determine if we matched the entire onset front by checking if there is any more of this onset front in onset_fronts
whole_onset_front_matched = onset_front_id not in np.unique(onset_fronts)
return whole_onset_front_matched | python | {
"resource": ""
} |
q272051 | _front_id_from_idx | test | def _front_id_from_idx(front, index):
"""
Returns the front ID found in `front` at the given `index`.
:param front: An onset or offset front array of shape [nfrequencies, nsamples]
:index: A tuple of the form (frequency index, sample index)
:returns: The ID of the front or -1 if not found in `front` and the item at `onsets_or_offsets[index]`
is not a 1.
"""
fidx, sidx = index
id = front[fidx, sidx]
if id == 0:
return -1
else:
return id | python | {
"resource": ""
} |
q272052 | _get_front_ids_one_at_a_time | test | def _get_front_ids_one_at_a_time(onset_fronts):
"""
Yields one onset front ID at a time until they are gone. All the onset fronts from a
frequency channel are yielded, then all of the next channel's, etc., though one at a time.
"""
yielded_so_far = set()
for row in onset_fronts:
for id in row:
if id != 0 and id not in yielded_so_far:
yield id
yielded_so_far.add(id) | python | {
"resource": ""
} |
q272053 | _get_corresponding_offsets | test | def _get_corresponding_offsets(onset_fronts, onset_front_id, onsets, offsets):
"""
Gets the offsets that occur as close as possible to the onsets in the given onset-front.
"""
corresponding_offsets = []
for index in _get_front_idxs_from_id(onset_fronts, onset_front_id):
offset_fidx, offset_sidx = _lookup_offset_by_onset_idx(index, onsets, offsets)
corresponding_offsets.append((offset_fidx, offset_sidx))
return corresponding_offsets | python | {
"resource": ""
} |
q272054 | _remove_overlaps | test | def _remove_overlaps(segmentation_mask, fronts):
"""
Removes all points in the fronts that overlap with the segmentation mask.
"""
fidxs, sidxs = np.where((segmentation_mask != fronts) & (segmentation_mask != 0) & (fronts != 0))
fronts[fidxs, sidxs] = 0 | python | {
"resource": ""
} |
q272055 | _remove_fronts_that_are_too_small | test | def _remove_fronts_that_are_too_small(fronts, size):
"""
Removes all fronts from `fronts` which are strictly smaller than
`size` consecutive frequencies in length.
"""
ids = np.unique(fronts)
for id in ids:
if id == 0 or id == -1:
continue
front = _get_front_idxs_from_id(fronts, id)
if len(front) < size:
indexes = ([f for f, _ in front], [s for _, s in front])
fronts[indexes] = 0 | python | {
"resource": ""
} |
q272056 | _break_poorly_matched_fronts | test | def _break_poorly_matched_fronts(fronts, threshold=0.1, threshold_overlap_samples=3):
"""
For each onset front, for each frequency in that front, break the onset front if the signals
between this frequency's onset and the next frequency's onset are not similar enough.
Specifically:
If we have the following two frequency channels, and the two O's are part of the same onset front,
::
[ . O . . . . . . . . . . ]
[ . . . . O . . . . . . . ]
We compare the signals x and y:
::
[ . x x x x . . . . . . . ]
[ . y y y y . . . . . . . ]
And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset
front between these two channels.
Once this is done, remove any onset fronts that are less than 3 channels wide.
"""
assert threshold_overlap_samples > 0, "Number of samples of overlap must be greater than zero"
breaks_after = {}
for front_id in _get_front_ids_one_at_a_time(fronts):
front = _get_front_idxs_from_id(fronts, front_id)
for i, (f, s) in enumerate(front):
if i < len(front) - 1:
# Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1
next_f, next_s = front[i + 1]
low_s = min(s, next_s)
high_s = max(s, next_s)
sig_this_f = fronts[f, low_s:high_s]
sig_next_f = fronts[next_f, low_s:high_s]
assert len(sig_next_f) == len(sig_this_f)
if len(sig_next_f) > threshold_overlap_samples:
# If these two signals are not sufficiently close in form, this front should be broken up
correlation = signal.correlate(sig_this_f, sig_next_f, mode='same')
assert len(correlation) > 0
correlation = correlation / max(correlation + 1E-9)
similarity = np.sum(correlation) / len(correlation)
# TODO: the above stuff probably needs to be figured out
if similarity < threshold:
if front_id in breaks_after:
breaks_after[front_id].append((f, s))
else:
breaks_after[front_id] = []
# Now update the fronts matrix by breaking up any fronts at the points we just identified
# and assign the newly created fronts new IDs
taken_ids = sorted(np.unique(fronts))
next_id = taken_ids[-1] + 1
for id in breaks_after.keys():
for f, s in breaks_after[id]:
fidxs, sidxs = np.where(fronts == id)
idxs_greater_than_f = [fidx for fidx in fidxs if fidx > f]
start = len(sidxs) - len(idxs_greater_than_f)
indexes = (idxs_greater_than_f, sidxs[start:])
fronts[indexes] = next_id
next_id += 1
_remove_fronts_that_are_too_small(fronts, 3) | python | {
"resource": ""
} |
q272057 | _merge_adjacent_segments | test | def _merge_adjacent_segments(mask):
"""
Merges all segments in `mask` which are touching.
"""
mask_ids = [id for id in np.unique(mask) if id != 0]
for id in mask_ids:
myfidxs, mysidxs = np.where(mask == id)
for other in mask_ids: # Ugh, brute force O(N^2) algorithm.. gross..
if id == other:
continue
else:
other_fidxs, other_sidxs = np.where(mask == other)
if _segments_are_adjacent((myfidxs, mysidxs), (other_fidxs, other_sidxs)):
mask[other_fidxs, other_sidxs] = id | python | {
"resource": ""
} |
q272058 | _separate_masks | test | def _separate_masks(mask, threshold=0.025):
"""
Returns a list of segmentation masks each of the same dimension as the input one,
but where they each have exactly one segment in them and all other samples in them
are zeroed.
Only bothers to return segments that are larger in total area than `threshold * mask.size`.
"""
try:
ncpus = multiprocessing.cpu_count()
except NotImplementedError:
ncpus = 2
with multiprocessing.Pool(processes=ncpus) as pool:
mask_ids = [id for id in np.unique(mask) if id != 0]
thresholds = [threshold * mask.size for _ in range(len(mask_ids))]
masks = [mask for _ in range(len(mask_ids))]
ms = pool.starmap(_separate_masks_task, zip(mask_ids, thresholds, masks))
return [m for m in ms if m is not None] | python | {
"resource": ""
} |
q272059 | _downsample_one_or_the_other | test | def _downsample_one_or_the_other(mask, mask_indexes, stft, stft_indexes):
"""
Takes the given `mask` and `stft`, which must be matrices of shape `frequencies, times`
and downsamples one of them into the other one's times, so that the time dimensions
are equal. Leaves the frequency dimension untouched.
"""
assert len(mask.shape) == 2, "Expected a two-dimensional `mask`, but got one of {} dimensions.".format(len(mask.shape))
assert len(stft.shape) == 2, "Expected a two-dimensional `stft`, but got one of {} dimensions.".format(len(stft.shape))
if mask.shape[1] > stft.shape[1]:
downsample_factor = mask.shape[1] / stft.shape[1]
indexes = _get_downsampled_indexes(mask, downsample_factor)
mask = mask[:, indexes]
mask_indexes = np.array(indexes)
elif mask.shape[1] < stft.shape[1]:
downsample_factor = stft.shape[1] / mask.shape[1]
indexes = _get_downsampled_indexes(stft, downsample_factor)
stft = stft[:, indexes]
stft_indexes = np.array(indexes)
return mask, mask_indexes, stft, stft_indexes | python | {
"resource": ""
} |
q272060 | _asa_task | test | def _asa_task(q, masks, stft, sample_width, frame_rate, nsamples_for_each_fft):
"""
Worker for the ASA algorithm's multiprocessing step.
"""
# Convert each mask to (1 or 0) rather than (ID or 0)
for mask in masks:
mask = np.where(mask > 0, 1, 0)
# Multiply the masks against STFTs
masks = [mask * stft for mask in masks]
nparrs = []
dtype_dict = {1: np.int8, 2: np.int16, 4: np.int32}
dtype = dtype_dict[sample_width]
for m in masks:
_times, nparr = signal.istft(m, frame_rate, nperseg=nsamples_for_each_fft)
nparr = nparr.astype(dtype)
nparrs.append(nparr)
for m in nparrs:
q.put(m)
q.put("DONE") | python | {
"resource": ""
} |
q272061 | bandpass_filter | test | def bandpass_filter(data, low, high, fs, order=5):
"""
Does a bandpass filter over the given data.
:param data: The data (numpy array) to be filtered.
:param low: The low cutoff in Hz.
:param high: The high cutoff in Hz.
:param fs: The sample rate (in Hz) of the data.
:param order: The order of the filter. The higher the order, the tighter the roll-off.
:returns: Filtered data (numpy array).
"""
nyq = 0.5 * fs
low = low / nyq
high = high / nyq
b, a = signal.butter(order, [low, high], btype='band')
y = signal.lfilter(b, a, data)
return y | python | {
"resource": ""
} |
q272062 | lowpass_filter | test | def lowpass_filter(data, cutoff, fs, order=5):
"""
Does a lowpass filter over the given data.
:param data: The data (numpy array) to be filtered.
:param cutoff: The high cutoff in Hz.
:param fs: The sample rate in Hz of the data.
:param order: The order of the filter. The higher the order, the tighter the roll-off.
:returns: Filtered data (numpy array).
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='low', analog=False)
y = signal.lfilter(b, a, data)
return y | python | {
"resource": ""
} |
q272063 | list_to_tf_input | test | def list_to_tf_input(data, response_index, num_outcomes):
"""
Separates the outcome feature from the data and creates the onehot vector for each row.
"""
matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data])
outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8)
outcomes_onehot = (np.arange(num_outcomes) == outcomes[:, None]).astype(np.float32)
return matrix, outcomes_onehot | python | {
"resource": ""
} |
q272064 | expand_and_standardize_dataset | test | def expand_and_standardize_dataset(response_index, response_header, data_set, col_vals, headers, standardizers, feats_to_ignore, columns_to_expand, outcome_trans_dict):
"""
Standardizes continuous features and expands categorical features.
"""
# expand and standardize
modified_set = []
for row_index, row in enumerate(data_set):
new_row = []
for col_index, val in enumerate(row):
header = headers[col_index]
# Outcome feature -> index outcome
if col_index == response_index:
new_outcome = outcome_trans_dict[val]
new_row.append(new_outcome)
# Ignored feature -> pass
elif header in feats_to_ignore:
pass
# Categorical feature -> create new binary column for each possible value of the column
elif header in columns_to_expand:
for poss_val in col_vals[header]:
if val == poss_val:
new_cat_val = 1.0
else:
new_cat_val = -1.0
new_row.append(new_cat_val)
# Continuous feature -> standardize value with respect to its column
else:
new_cont_val = float((val - standardizers[header]['mean']) / standardizers[header]['std_dev'])
new_row.append(new_cont_val)
modified_set.append(new_row)
# update headers to reflect column expansion
expanded_headers = []
for header in headers:
if header in feats_to_ignore:
pass
elif (header in columns_to_expand) and (header is not response_header):
for poss_val in col_vals[header]:
new_header = '{}_{}'.format(header,poss_val)
expanded_headers.append(new_header)
else:
expanded_headers.append(header)
return modified_set, expanded_headers | python | {
"resource": ""
} |
q272065 | equal_ignore_order | test | def equal_ignore_order(a, b):
"""
Used to check whether the two edge lists have the same edges
when elements are neither hashable nor sortable.
"""
unmatched = list(b)
for element in a:
try:
unmatched.remove(element)
except ValueError:
return False
return not unmatched | python | {
"resource": ""
} |
q272066 | group_audit_ranks | test | def group_audit_ranks(filenames, measurer, similarity_bound=0.05):
"""
Given a list of audit files, rank them using the `measurer` and
return the features that never deviate more than `similarity_bound`
across repairs.
"""
def _partition_groups(feature_scores):
groups = []
for feature, score in feature_scores:
added_to_group = False
# Check to see if the feature belongs in a group with any other features.
for i, group in enumerate(groups):
mean_score, group_feature_scores = group
if abs(mean_score - score) < similarity_bound:
groups[i][1].append( (feature, score) )
# Recalculate the representative mean.
groups[i][0] = sum([s for _, s in group_feature_scores])/len(group_feature_scores)
added_to_group = True
break
# If this feature did not much with the current groups, create another group.
if not added_to_group:
groups.append( [score, [(feature,score)]] )
# Return just the features.
return [[feature for feature, score in group] for _, group in groups]
score_dict = {}
features = []
for filename in filenames:
with open(filename) as audit_file:
header_line = audit_file.readline()[:-1] # Remove the trailing endline.
feature = header_line[header_line.index(":")+1:]
features.append(feature)
confusion_matrices = load_audit_confusion_matrices(filename)
for rep_level, matrix in confusion_matrices:
score = measurer(matrix)
if rep_level not in score_dict:
score_dict[rep_level] = {}
score_dict[rep_level][feature] = score
# Sort by repair level increasing repair level.
score_keys = sorted(score_dict.keys())
groups = [features]
while score_keys:
key = score_keys.pop()
new_groups = []
for group in groups:
group_features = [(f, score_dict[key][f]) for f in group]
sub_groups = _partition_groups(group_features)
new_groups.extend(sub_groups)
groups = new_groups
return groups | python | {
"resource": ""
} |
q272067 | load_audit_confusion_matrices | test | def load_audit_confusion_matrices(filename):
"""
Loads a confusion matrix in a two-level dictionary format.
For example, the confusion matrix of a 75%-accurate model
that predicted 15 values (and mis-classified 5) may look like:
{"A": {"A":10, "B": 5}, "B": {"B":5}}
Note that raw boolean values are translated into strings, such that
a value that was the boolean True will be returned as the string "True".
"""
with open(filename) as audit_file:
audit_file.next() # Skip the first line.
# Extract the confusion matrices and repair levels from the audit file.
confusion_matrices = []
for line in audit_file:
separator = ":"
separator_index = line.index(separator)
comma_index = line.index(',')
repair_level = float(line[separator_index+2:comma_index])
raw_confusion_matrix = line[comma_index+2:-2]
confusion_matrix = json.loads( raw_confusion_matrix.replace("'","\"") )
confusion_matrices.append( (repair_level, confusion_matrix) )
# Sort the repair levels in case they are out of order for whatever reason.
confusion_matrices.sort(key = lambda pair: pair[0])
return confusion_matrices | python | {
"resource": ""
} |
q272068 | list_to_tf_input | test | def list_to_tf_input(data, response_index, num_outcomes):
"""
Separates the outcome feature from the data.
"""
matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data])
outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8)
return matrix, outcomes | python | {
"resource": ""
} |
q272069 | PackagesStatusDetector._update_index_url_from_configs | test | def _update_index_url_from_configs(self):
""" Checks for alternative index-url in pip.conf """
if 'VIRTUAL_ENV' in os.environ:
self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.conf'))
self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.ini'))
if site_config_files:
self.pip_config_locations.extend(site_config_files)
index_url = None
custom_config = None
if 'PIP_INDEX_URL' in os.environ and os.environ['PIP_INDEX_URL']:
# environ variable takes priority
index_url = os.environ['PIP_INDEX_URL']
custom_config = 'PIP_INDEX_URL environment variable'
else:
for pip_config_filename in self.pip_config_locations:
if pip_config_filename.startswith('~'):
pip_config_filename = os.path.expanduser(pip_config_filename)
if os.path.isfile(pip_config_filename):
config = ConfigParser()
config.read([pip_config_filename])
try:
index_url = config.get('global', 'index-url')
custom_config = pip_config_filename
break # stop on first detected, because config locations have a priority
except (NoOptionError, NoSectionError): # pragma: nocover
pass
if index_url:
self.PYPI_API_URL = self._prepare_api_url(index_url)
print(Color('Setting API url to {{autoyellow}}{}{{/autoyellow}} as found in {{autoyellow}}{}{{/autoyellow}}'
'. Use --default-index-url to use pypi default index'.format(self.PYPI_API_URL, custom_config))) | python | {
"resource": ""
} |
q272070 | RequirementsDetector.autodetect_files | test | def autodetect_files(self):
""" Attempt to detect requirements files in the current working directory """
if self._is_valid_requirements_file('requirements.txt'):
self.filenames.append('requirements.txt')
if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover
self.filenames.append('requirements.pip')
if os.path.isdir('requirements'):
for filename in os.listdir('requirements'):
file_path = os.path.join('requirements', filename)
if self._is_valid_requirements_file(file_path):
self.filenames.append(file_path)
self._check_inclusions_recursively() | python | {
"resource": ""
} |
q272071 | resolve_streams | test | def resolve_streams(wait_time=1.0):
"""Resolve all streams on the network.
This function returns all currently available streams from any outlet on
the network. The network is usually the subnet specified at the local
router, but may also include a group of machines visible to each other via
multicast packets (given that the network supports it), or list of
hostnames. These details may optionally be customized by the experimenter
in a configuration file (see Network Connectivity in the LSL wiki).
Keyword arguments:
wait_time -- The waiting time for the operation, in seconds, to search for
streams. Warning: If this is too short (<0.5s) only a subset
(or none) of the outlets that are present on the network may
be returned. (default 1.0)
Returns a list of StreamInfo objects (with empty desc field), any of which
can subsequently be used to open an inlet. The full description can be
retrieved from the inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_all(byref(buffer), 1024, c_double(wait_time))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | python | {
"resource": ""
} |
q272072 | resolve_byprop | test | def resolve_byprop(prop, value, minimum=1, timeout=FOREVER):
"""Resolve all streams with a specific value for a given property.
If the goal is to resolve a specific stream, this method is preferred over
resolving all streams and then selecting the desired one.
Keyword arguments:
prop -- The StreamInfo property that should have a specific value (e.g.,
"name", "type", "source_id", or "desc/manufaturer").
value -- The string value that the property should have (e.g., "EEG" as
the type property).
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
Example: results = resolve_Stream_byprop("type","EEG")
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_byprop(byref(buffer), 1024,
c_char_p(str.encode(prop)),
c_char_p(str.encode(value)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | python | {
"resource": ""
} |
q272073 | resolve_bypred | test | def resolve_bypred(predicate, minimum=1, timeout=FOREVER):
"""Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_bypred(byref(buffer), 1024,
c_char_p(str.encode(predicate)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | python | {
"resource": ""
} |
q272074 | handle_error | test | def handle_error(errcode):
"""Error handler function. Translates an error code into an exception."""
if type(errcode) is c_int:
errcode = errcode.value
if errcode == 0:
pass # no error
elif errcode == -1:
raise TimeoutError("the operation failed due to a timeout.")
elif errcode == -2:
raise LostError("the stream has been lost.")
elif errcode == -3:
raise InvalidArgumentError("an argument was incorrectly specified.")
elif errcode == -4:
raise InternalError("an internal error has occurred.")
elif errcode < 0:
raise RuntimeError("an unknown error has occurred.") | python | {
"resource": ""
} |
q272075 | StreamOutlet.push_sample | test | def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
handle_error(self.do_push_sample(self.obj, self.sample_type(*x),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("length of the data must correspond to the "
"stream's channel count.") | python | {
"resource": ""
} |
q272076 | StreamOutlet.push_chunk | test | def push_chunk(self, x, timestamp=0.0, pushthrough=True):
"""Push a list of samples into the outlet.
samples -- A list of samples, either as a list of lists or a list of
multiplexed values.
timestamp -- Optionally the capture time of the most recent sample, in
agreement with local_clock(); if omitted, the current
time is used. The time stamps of other samples are
automatically derived according to the sampling rate of
the stream. (default 0.0)
pushthrough Whether to push the chunk through to the receivers instead
of buffering it with subsequent samples. Note that the
chunk_size, if specified at outlet construction, takes
precedence over the pushthrough flag. (default True)
"""
try:
n_values = self.channel_count * len(x)
data_buff = (self.value_type * n_values).from_buffer(x)
handle_error(self.do_push_chunk(self.obj, data_buff,
c_long(n_values),
c_double(timestamp),
c_int(pushthrough)))
except TypeError:
if len(x):
if type(x[0]) is list:
x = [v for sample in x for v in sample]
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
if len(x) % self.channel_count == 0:
constructor = self.value_type*len(x)
# noinspection PyCallingNonCallable
handle_error(self.do_push_chunk(self.obj, constructor(*x),
c_long(len(x)),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("each sample must have the same number of "
"channels.") | python | {
"resource": ""
} |
q272077 | StreamInlet.info | test | def info(self, timeout=FOREVER):
"""Retrieve the complete information of the given stream.
This includes the extended description. Can be invoked at any time of
the stream's lifetime.
Keyword arguments:
timeout -- Timeout of the operation. (default FOREVER)
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_get_fullinfo(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return StreamInfo(handle=result) | python | {
"resource": ""
} |
q272078 | StreamInlet.open_stream | test | def open_stream(self, timeout=FOREVER):
"""Subscribe to the data stream.
All samples pushed in at the other end from this moment onwards will be
queued and eventually be delivered in response to pull_sample() or
pull_chunk() calls. Pulling a sample without some preceding open_stream
is permitted (the stream will then be opened implicitly).
Keyword arguments:
timeout -- Optional timeout of the operation (default FOREVER).
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
lib.lsl_open_stream(self.obj, c_double(timeout), byref(errcode))
handle_error(errcode) | python | {
"resource": ""
} |
q272079 | StreamInlet.time_correction | test | def time_correction(self, timeout=FOREVER):
"""Retrieve an estimated time correction offset for the given stream.
The first call to this function takes several miliseconds until a
reliable first estimate is obtained. Subsequent calls are instantaneous
(and rely on periodic background updates). The precision of these
estimates should be below 1 ms (empirically within +/-0.2 ms).
Keyword arguments:
timeout -- Timeout to acquire the first time-correction estimate
(default FOREVER).
Returns the current time correction estimate. This is the number that
needs to be added to a time stamp that was remotely generated via
local_clock() to map it into the local clock domain of this
machine.
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_time_correction(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return result | python | {
"resource": ""
} |
q272080 | XMLElement.child | test | def child(self, name):
"""Get a child with a specified name."""
return XMLElement(lib.lsl_child(self.e, str.encode(name))) | python | {
"resource": ""
} |
q272081 | XMLElement.next_sibling | test | def next_sibling(self, name=None):
"""Get the next sibling in the children list of the parent node.
If a name is provided, the next sibling with the given name is returned.
"""
if name is None:
return XMLElement(lib.lsl_next_sibling(self.e))
else:
return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name))) | python | {
"resource": ""
} |
q272082 | XMLElement.previous_sibling | test | def previous_sibling(self, name=None):
"""Get the previous sibling in the children list of the parent node.
If a name is provided, the previous sibling with the given name is
returned.
"""
if name is None:
return XMLElement(lib.lsl_previous_sibling(self.e))
else:
return XMLElement(lib.lsl_previous_sibling_n(self.e,
str.encode(name))) | python | {
"resource": ""
} |
q272083 | XMLElement.set_name | test | def set_name(self, name):
"""Set the element's name. Returns False if the node is empty."""
return bool(lib.lsl_set_name(self.e, str.encode(name))) | python | {
"resource": ""
} |
q272084 | XMLElement.set_value | test | def set_value(self, value):
"""Set the element's value. Returns False if the node is empty."""
return bool(lib.lsl_set_value(self.e, str.encode(value))) | python | {
"resource": ""
} |
q272085 | XMLElement.append_child | test | def append_child(self, name):
"""Append a child element with the specified name."""
return XMLElement(lib.lsl_append_child(self.e, str.encode(name))) | python | {
"resource": ""
} |
q272086 | XMLElement.prepend_child | test | def prepend_child(self, name):
"""Prepend a child element with the specified name."""
return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name))) | python | {
"resource": ""
} |
q272087 | XMLElement.append_copy | test | def append_copy(self, elem):
"""Append a copy of the specified element as a child."""
return XMLElement(lib.lsl_append_copy(self.e, elem.e)) | python | {
"resource": ""
} |
q272088 | XMLElement.prepend_copy | test | def prepend_copy(self, elem):
"""Prepend a copy of the specified element as a child."""
return XMLElement(lib.lsl_prepend_copy(self.e, elem.e)) | python | {
"resource": ""
} |
q272089 | XMLElement.remove_child | test | def remove_child(self, rhs):
"""Remove a given child element, specified by name or as element."""
if type(rhs) is XMLElement:
lib.lsl_remove_child(self.e, rhs.e)
else:
lib.lsl_remove_child_n(self.e, rhs) | python | {
"resource": ""
} |
q272090 | ContinuousResolver.results | test | def results(self):
"""Obtain the set of currently present streams on the network.
Returns a list of matching StreamInfo objects (with empty desc
field), any of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolver_results(self.obj, byref(buffer), 1024)
return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | python | {
"resource": ""
} |
q272091 | pair | test | def pair(cmd, word):
"""See all token associated with a given token.
PAIR lilas"""
word = list(preprocess_query(word))[0]
key = pair_key(word)
tokens = [t.decode() for t in DB.smembers(key)]
tokens.sort()
print(white(tokens))
print(magenta('(Total: {})'.format(len(tokens)))) | python | {
"resource": ""
} |
q272092 | do_AUTOCOMPLETE | test | def do_AUTOCOMPLETE(cmd, s):
"""Shows autocomplete results for a given token."""
s = list(preprocess_query(s))[0]
keys = [k.decode() for k in DB.smembers(edge_ngram_key(s))]
print(white(keys))
print(magenta('({} elements)'.format(len(keys)))) | python | {
"resource": ""
} |
q272093 | compute_edge_ngrams | test | def compute_edge_ngrams(token, min=None):
"""Compute edge ngram of token from min. Does not include token itself."""
if min is None:
min = config.MIN_EDGE_NGRAMS
token = token[:config.MAX_EDGE_NGRAMS + 1]
return [token[:i] for i in range(min, len(token))] | python | {
"resource": ""
} |
q272094 | iter_pipe | test | def iter_pipe(pipe, processors):
"""Allow for iterators to return either an item or an iterator of items."""
if isinstance(pipe, str):
pipe = [pipe]
for it in processors:
pipe = it(pipe)
yield from pipe | python | {
"resource": ""
} |
q272095 | ChunkedPool.imap_unordered | test | def imap_unordered(self, func, iterable, chunksize):
"""Customized version of imap_unordered.
Directly send chunks to func, instead of iterating in each process and
sending one by one.
Original:
https://hg.python.org/cpython/file/tip/Lib/multiprocessing/pool.py#l271
Other tried options:
- map_async: makes a list(iterable), so it loads all the data for each
process into RAM
- apply_async: needs manual chunking
"""
assert self._state == RUN
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
tasks = ((result._job, i, func, chunk, {})
for i, (_, chunk) in enumerate(task_batches))
self._taskqueue.put((tasks, result._set_length))
return result | python | {
"resource": ""
} |
q272096 | make_fuzzy | test | def make_fuzzy(word, max=1):
"""Naive neighborhoods algo."""
# inversions
neighbors = []
for i in range(0, len(word) - 1):
neighbor = list(word)
neighbor[i], neighbor[i+1] = neighbor[i+1], neighbor[i]
neighbors.append(''.join(neighbor))
# substitutions
for letter in string.ascii_lowercase:
for i in range(0, len(word)):
neighbor = list(word)
if letter != neighbor[i]:
neighbor[i] = letter
neighbors.append(''.join(neighbor))
# insertions
for letter in string.ascii_lowercase:
for i in range(0, len(word) + 1):
neighbor = list(word)
neighbor.insert(i, letter)
neighbors.append(''.join(neighbor))
if len(word) > 3:
# removal
for i in range(0, len(word)):
neighbor = list(word)
del neighbor[i]
neighbors.append(''.join(neighbor))
return neighbors | python | {
"resource": ""
} |
q272097 | do_fuzzy | test | def do_fuzzy(self, word):
"""Compute fuzzy extensions of word.
FUZZY lilas"""
word = list(preprocess_query(word))[0]
print(white(make_fuzzy(word))) | python | {
"resource": ""
} |
q272098 | do_fuzzyindex | test | def do_fuzzyindex(self, word):
"""Compute fuzzy extensions of word that exist in index.
FUZZYINDEX lilas"""
word = list(preprocess_query(word))[0]
token = Token(word)
neighbors = make_fuzzy(token)
neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors]
neighbors.sort(key=lambda n: n[1], reverse=True)
for token, freq in neighbors:
if freq == 0:
break
print(white(token), blue(freq)) | python | {
"resource": ""
} |
q272099 | extend_results_extrapoling_relations | test | def extend_results_extrapoling_relations(helper):
"""Try to extract the bigger group of interlinked tokens.
Should generally be used at last in the collectors chain.
"""
if not helper.bucket_dry:
return # No need.
tokens = set(helper.meaningful + helper.common)
for relation in _extract_manytomany_relations(tokens):
helper.add_to_bucket([t.db_key for t in relation])
if helper.bucket_overflow:
break
else:
helper.debug('No relation extrapolated.') | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.