_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q261400 | read_struct_array | validation | def read_struct_array(fd, endian, header):
"""Read a struct array.
Returns a dict with fields of the struct array.
"""
# read field name length (unused, as strings are null terminated)
field_name_length = read_elements(fd, endian, ['miINT32'])
if field_name_length > 32:
raise ParseError('Unexpected field name length: {}'.format(
field_name_length))
# read field names
fields = read_elements(fd, endian, ['miINT8'], is_name=True)
if isinstance(fields, basestring):
fields = [fields]
# read rows and columns of each field
empty = lambda: [list() for i in range(header['dims'][0])]
array = {}
for row in range(header['dims'][0]):
for col in range(header['dims'][1]):
for field in fields:
# read the matrix header and array
vheader, next_pos, fd_var = read_var_header(fd, endian)
data = read_var_array(fd_var, endian, vheader)
if field not in array:
array[field] = empty()
array[field][row].append(data)
# move on to next field
fd.seek(next_pos)
# pack the nested arrays
for field in fields:
rows = array[field]
for i in range(header['dims'][0]):
rows[i] = squeeze(rows[i])
array[field] = squeeze(array[field])
return array | python | {
"resource": ""
} |
q261401 | eof | validation | def eof(fd):
"""Determine if end-of-file is reached for file fd."""
b = fd.read(1)
end = len(b) == 0
if not end:
curpos = fd.tell()
fd.seek(curpos - 1)
return end | python | {
"resource": ""
} |
q261402 | write_elements | validation | def write_elements(fd, mtp, data, is_name=False):
"""Write data element tag and data.
The tag contains the array type and the number of
bytes the array data will occupy when written to file.
If data occupies 4 bytes or less, it is written immediately
as a Small Data Element (SDE).
"""
fmt = etypes[mtp]['fmt']
if isinstance(data, Sequence):
if fmt == 's' or is_name:
if isinstance(data, bytes):
if is_name and len(data) > 31:
raise ValueError(
'Name "{}" is too long (max. 31 '
'characters allowed)'.format(data))
fmt = '{}s'.format(len(data))
data = (data,)
else:
fmt = ''.join('{}s'.format(len(s)) for s in data)
else:
l = len(data)
if l == 0:
# empty array
fmt = ''
if l > 1:
# more than one element to be written
fmt = '{}{}'.format(l, fmt)
else:
data = (data,)
num_bytes = struct.calcsize(fmt)
if num_bytes <= 4:
# write SDE
if num_bytes < 4:
# add pad bytes
fmt += '{}x'.format(4 - num_bytes)
fd.write(struct.pack('hh' + fmt, etypes[mtp]['n'],
*chain([num_bytes], data)))
return
# write tag: element type and number of bytes
fd.write(struct.pack('b3xI', etypes[mtp]['n'], num_bytes))
# add pad bytes to fmt, if needed
mod8 = num_bytes % 8
if mod8:
fmt += '{}x'.format(8 - mod8)
# write data
fd.write(struct.pack(fmt, *data)) | python | {
"resource": ""
} |
q261403 | write_var_header | validation | def write_var_header(fd, header):
"""Write variable header"""
# write tag bytes,
# and array flags + class and nzmax (null bytes)
fd.write(struct.pack('b3xI', etypes['miUINT32']['n'], 8))
fd.write(struct.pack('b3x4x', mclasses[header['mclass']]))
# write dimensions array
write_elements(fd, 'miINT32', header['dims'])
# write var name
write_elements(fd, 'miINT8', asbytes(header['name']), is_name=True) | python | {
"resource": ""
} |
q261404 | write_var_data | validation | def write_var_data(fd, data):
"""Write variable data to file"""
# write array data elements (size info)
fd.write(struct.pack('b3xI', etypes['miMATRIX']['n'], len(data)))
# write the data
fd.write(data) | python | {
"resource": ""
} |
q261405 | write_compressed_var_array | validation | def write_compressed_var_array(fd, array, name):
"""Write compressed variable data to file"""
bd = BytesIO()
write_var_array(bd, array, name)
data = zlib.compress(bd.getvalue())
bd.close()
# write array data elements (size info)
fd.write(struct.pack('b3xI', etypes['miCOMPRESSED']['n'], len(data)))
# write the compressed data
fd.write(data) | python | {
"resource": ""
} |
q261406 | write_numeric_array | validation | def write_numeric_array(fd, header, array):
"""Write the numeric array"""
# make a memory file for writing array data
bd = BytesIO()
# write matrix header to memory file
write_var_header(bd, header)
if not isinstance(array, basestring) and header['dims'][0] > 1:
# list array data in column major order
array = list(chain.from_iterable(izip(*array)))
# write matrix data to memory file
write_elements(bd, header['mtp'], array)
# write the variable to disk file
data = bd.getvalue()
bd.close()
write_var_data(fd, data) | python | {
"resource": ""
} |
q261407 | isarray | validation | def isarray(array, test, dim=2):
"""Returns True if test is True for all array elements.
Otherwise, returns False.
"""
if dim > 1:
return all(isarray(array[i], test, dim - 1)
for i in range(len(array)))
return all(test(i) for i in array) | python | {
"resource": ""
} |
q261408 | WebDriver._execute | validation | def _execute(self, command, data=None, unpack=True):
""" Private method to execute command.
Args:
command(Command): The defined command.
data(dict): The uri variable and body.
uppack(bool): If unpack value from result.
Returns:
The unwrapped value field in the json response.
"""
if not data:
data = {}
if self.session_id is not None:
data.setdefault('session_id', self.session_id)
data = self._wrap_el(data)
res = self.remote_invoker.execute(command, data)
ret = WebDriverResult.from_object(res)
ret.raise_for_status()
ret.value = self._unwrap_el(ret.value)
if not unpack:
return ret
return ret.value | python | {
"resource": ""
} |
q261409 | WebDriver.init | validation | def init(self):
"""Create Session by desiredCapabilities
Support:
Android iOS Web(WebView)
Returns:
WebDriver Object.
"""
resp = self._execute(Command.NEW_SESSION, {
'desiredCapabilities': self.desired_capabilities
}, False)
resp.raise_for_status()
self.session_id = str(resp.session_id)
self.capabilities = resp.value | python | {
"resource": ""
} |
q261410 | WebDriver.switch_to_window | validation | def switch_to_window(self, window_name):
"""Switch to the given window.
Support:
Web(WebView)
Args:
window_name(str): The window to change focus to.
Returns:
WebDriver Object.
"""
data = {
'name': window_name
}
self._execute(Command.SWITCH_TO_WINDOW, data) | python | {
"resource": ""
} |
q261411 | WebDriver.set_window_size | validation | def set_window_size(self, width, height, window_handle='current'):
"""Sets the width and height of the current window.
Support:
Web(WebView)
Args:
width(int): the width in pixels.
height(int): the height in pixels.
window_handle(str): Identifier of window_handle,
default to 'current'.
Returns:
WebDriver Object.
"""
self._execute(Command.SET_WINDOW_SIZE, {
'width': int(width),
'height': int(height),
'window_handle': window_handle}) | python | {
"resource": ""
} |
q261412 | WebDriver.set_window_position | validation | def set_window_position(self, x, y, window_handle='current'):
"""Sets the x,y position of the current window.
Support:
Web(WebView)
Args:
x(int): the x-coordinate in pixels.
y(int): the y-coordinate in pixels.
window_handle(str): Identifier of window_handle,
default to 'current'.
Returns:
WebDriver Object.
"""
self._execute(Command.SET_WINDOW_POSITION, {
'x': int(x),
'y': int(y),
'window_handle': window_handle}) | python | {
"resource": ""
} |
q261413 | WebDriver.switch_to_frame | validation | def switch_to_frame(self, frame_reference=None):
"""Switches focus to the specified frame, by index, name, or webelement.
Support:
Web(WebView)
Args:
frame_reference(None|int|WebElement):
The identifier of the frame to switch to.
None means to set to the default context.
An integer representing the index.
A webelement means that is an (i)frame to switch to.
Otherwise throw an error.
Returns:
WebDriver Object.
"""
if frame_reference is not None and type(frame_reference) not in [int, WebElement]:
raise TypeError('Type of frame_reference must be None or int or WebElement')
self._execute(Command.SWITCH_TO_FRAME,
{'id': frame_reference}) | python | {
"resource": ""
} |
q261414 | WebDriver.execute_script | validation | def execute_script(self, script, *args):
"""Execute JavaScript Synchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function.
"""
return self._execute(Command.EXECUTE_SCRIPT, {
'script': script,
'args': list(args)}) | python | {
"resource": ""
} |
q261415 | WebDriver.execute_async_script | validation | def execute_async_script(self, script, *args):
"""Execute JavaScript Asynchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function.
"""
return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {
'script': script,
'args': list(args)}) | python | {
"resource": ""
} |
q261416 | WebDriver.add_cookie | validation | def add_cookie(self, cookie_dict):
"""Set a cookie.
Support:
Web(WebView)
Args:
cookie_dict: A dictionary contain keys: "name", "value",
["path"], ["domain"], ["secure"], ["httpOnly"], ["expiry"].
Returns:
WebElement Object.
"""
if not isinstance(cookie_dict, dict):
raise TypeError('Type of the cookie must be a dict.')
if not cookie_dict.get(
'name', None
) or not cookie_dict.get(
'value', None):
raise KeyError('Missing required keys, \'name\' and \'value\' must be provided.')
self._execute(Command.ADD_COOKIE, {'cookie': cookie_dict}) | python | {
"resource": ""
} |
q261417 | WebDriver.save_screenshot | validation | def save_screenshot(self, filename, quietly = False):
"""Save the screenshot to local.
Support:
Android iOS Web(WebView)
Args:
filename(str): The path to save the image.
quietly(bool): If True, omit the IOError when
failed to save the image.
Returns:
WebElement Object.
Raises:
WebDriverException.
IOError.
"""
imgData = self.take_screenshot()
try:
with open(filename, "wb") as f:
f.write(b64decode(imgData.encode('ascii')))
except IOError as err:
if not quietly:
raise err | python | {
"resource": ""
} |
q261418 | WebDriver.element | validation | def element(self, using, value):
"""Find an element in the current context.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
WebElement Object.
Raises:
WebDriverException.
"""
return self._execute(Command.FIND_ELEMENT, {
'using': using,
'value': value
}) | python | {
"resource": ""
} |
q261419 | WebDriver.elements | validation | def elements(self, using, value):
"""Find elements in the current context.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return a List<Element | None>, if no element matched, the list is empty.
Raises:
WebDriverException.
"""
return self._execute(Command.FIND_ELEMENTS, {
'using': using,
'value': value
}) | python | {
"resource": ""
} |
q261420 | WebDriver.wait_for | validation | def wait_for(
self, timeout=10000, interval=1000,
asserter=lambda x: x):
"""Wait for driver till satisfy the given condition
Support:
Android iOS Web(WebView)
Args:
timeout(int): How long we should be retrying stuff.
interval(int): How long between retries.
asserter(callable): The asserter func to determine the result.
Returns:
Return the driver.
Raises:
WebDriverException.
"""
if not callable(asserter):
raise TypeError('Asserter must be callable.')
@retry(
retry_on_exception=lambda ex: isinstance(ex, WebDriverException),
stop_max_delay=timeout,
wait_fixed=interval
)
def _wait_for(driver):
asserter(driver)
return driver
return _wait_for(self) | python | {
"resource": ""
} |
q261421 | WebDriver.wait_for_element | validation | def wait_for_element(
self, using, value, timeout=10000,
interval=1000, asserter=is_displayed):
"""Wait for element till satisfy the given condition
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
timeout(int): How long we should be retrying stuff.
interval(int): How long between retries.
asserter(callable): The asserter func to determine the result.
Returns:
Return the Element.
Raises:
WebDriverException.
"""
if not callable(asserter):
raise TypeError('Asserter must be callable.')
@retry(
retry_on_exception=lambda ex: isinstance(ex, WebDriverException),
stop_max_delay=timeout,
wait_fixed=interval
)
def _wait_for_element(ctx, using, value):
el = ctx.element(using, value)
asserter(el)
return el
return _wait_for_element(self, using, value) | python | {
"resource": ""
} |
q261422 | WebDriver.wait_for_elements | validation | def wait_for_elements(
self, using, value, timeout=10000,
interval=1000, asserter=is_displayed):
"""Wait for elements till satisfy the given condition
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
timeout(int): How long we should be retrying stuff.
interval(int): How long between retries.
asserter(callable): The asserter func to determine the result.
Returns:
Return the list of Element if any of them satisfy the condition.
Raises:
WebDriverException.
"""
if not callable(asserter):
raise TypeError('Asserter must be callable.')
@retry(
retry_on_exception=lambda ex: isinstance(ex, WebDriverException),
stop_max_delay=timeout,
wait_fixed=interval
)
def _wait_for_elements(ctx, using, value):
els = ctx.elements(using, value)
if not len(els):
raise WebDriverException('no such element')
else:
el = els[0]
asserter(el)
return els
return _wait_for_elements(self, using, value) | python | {
"resource": ""
} |
q261423 | WebDriverResult.from_object | validation | def from_object(cls, obj):
"""The factory method to create WebDriverResult from JSON Object.
Args:
obj(dict): The JSON Object returned by server.
"""
return cls(
obj.get('sessionId', None),
obj.get('status', 0),
obj.get('value', None)
) | python | {
"resource": ""
} |
q261424 | WebDriverResult.raise_for_status | validation | def raise_for_status(self):
"""Raise WebDriverException if returned status is not zero."""
if not self.status:
return
error = find_exception_by_code(self.status)
message = None
screen = None
stacktrace = None
if isinstance(self.value, str):
message = self.value
elif isinstance(self.value, dict):
message = self.value.get('message', None)
screen = self.value.get('screen', None)
stacktrace = self.value.get('stacktrace', None)
raise WebDriverException(error, message, screen, stacktrace) | python | {
"resource": ""
} |
q261425 | fluent | validation | def fluent(func):
"""Fluent interface decorator to return self if method return None."""
@wraps(func)
def fluent_interface(instance, *args, **kwargs):
ret = func(instance, *args, **kwargs)
if ret is not None:
return ret
return instance
return fluent_interface | python | {
"resource": ""
} |
q261426 | MemorizeFormatter.check_unused_args | validation | def check_unused_args(self, used_args, args, kwargs):
"""Implement the check_unused_args in superclass."""
for k, v in kwargs.items():
if k in used_args:
self._used_kwargs.update({k: v})
else:
self._unused_kwargs.update({k: v}) | python | {
"resource": ""
} |
q261427 | MemorizeFormatter.vformat | validation | def vformat(self, format_string, args, kwargs):
"""Clear used and unused dicts before each formatting."""
self._used_kwargs = {}
self._unused_kwargs = {}
return super(MemorizeFormatter, self).vformat(format_string, args, kwargs) | python | {
"resource": ""
} |
q261428 | MemorizeFormatter.format_map | validation | def format_map(self, format_string, mapping):
"""format a string by a map
Args:
format_string(str): A format string
mapping(dict): A map to format the string
Returns:
A formatted string.
Raises:
KeyError: if key is not provided by the given map.
"""
return self.vformat(format_string, args=None, kwargs=mapping) | python | {
"resource": ""
} |
q261429 | find_exception_by_code | validation | def find_exception_by_code(code):
"""Find name of exception by WebDriver defined error code.
Args:
code(str): Error code defined in protocol.
Returns:
The error name defined in protocol.
"""
errorName = None
for error in WebDriverError:
if error.value.code == code:
errorName = error
break
return errorName | python | {
"resource": ""
} |
q261430 | RemoteInvoker.execute | validation | def execute(self, command, data={}):
"""Format the endpoint url by data and then request the remote server.
Args:
command(Command): WebDriver command to be executed.
data(dict): Data fulfill the uri template and json body.
Returns:
A dict represent the json body from server response.
Raises:
KeyError: Data cannot fulfill the variable which command needed.
ConnectionError: Meet network problem (e.g. DNS failure,
refused connection, etc).
Timeout: A request times out.
HTTPError: HTTP request returned an unsuccessful status code.
"""
method, uri = command
try:
path = self._formatter.format_map(uri, data)
body = self._formatter.get_unused_kwargs()
url = "{0}{1}".format(self._url, path)
return self._request(method, url, body)
except KeyError as err:
LOGGER.debug(
'Endpoint {0} is missing argument {1}'.format(uri, err))
raise | python | {
"resource": ""
} |
q261431 | RemoteInvoker._request | validation | def _request(self, method, url, body):
"""Internal method to send request to the remote server.
Args:
method(str): HTTP Method(GET/POST/PUT/DELET/HEAD).
url(str): The request url.
body(dict): The JSON object to be sent.
Returns:
A dict represent the json body from server response.
Raises:
ConnectionError: Meet network problem (e.g. DNS failure,
refused connection, etc).
Timeout: A request times out.
HTTPError: HTTP request returned an unsuccessful status code.
"""
if method != 'POST' and method != 'PUT':
body = None
s = Session()
LOGGER.debug(
'Method: {0}, Url: {1}, Body: {2}.'.format(method, url, body))
req = Request(method, url, json=body)
prepped = s.prepare_request(req)
res = s.send(prepped, timeout=self._timeout or None)
res.raise_for_status()
# TODO try catch
return res.json() | python | {
"resource": ""
} |
q261432 | WebElement._execute | validation | def _execute(self, command, data=None, unpack=True):
"""Private method to execute command with data.
Args:
command(Command): The defined command.
data(dict): The uri variable and body.
Returns:
The unwrapped value field in the json response.
"""
if not data:
data = {}
data.setdefault('element_id', self.element_id)
return self._driver._execute(command, data, unpack) | python | {
"resource": ""
} |
q261433 | WebElement.element | validation | def element(self, using, value):
"""find an element in the current element.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
WebElement Object.
Raises:
WebDriverException.
"""
return self._execute(Command.FIND_CHILD_ELEMENT, {
'using': using,
'value': value
}) | python | {
"resource": ""
} |
q261434 | WebElement.element_or_none | validation | def element_or_none(self, using, value):
"""Check if an element in the current element.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return Element if the element does exists and return None otherwise.
Raises:
WebDriverException.
"""
try:
return self._execute(Command.FIND_CHILD_ELEMENT, {
'using': using,
'value': value
})
except:
return None | python | {
"resource": ""
} |
q261435 | WebElement.elements | validation | def elements(self, using, value):
"""find elements in the current element.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return a List<Element | None>, if no element matched, the list is empty.
Raises:
WebDriverException.
"""
return self._execute(Command.FIND_CHILD_ELEMENTS, {
'using': using,
'value': value
}) | python | {
"resource": ""
} |
q261436 | is_displayed | validation | def is_displayed(target):
"""Assert whether the target is displayed
Args:
target(WebElement): WebElement Object.
Returns:
Return True if the element is displayed or return False otherwise.
"""
is_displayed = getattr(target, 'is_displayed', None)
if not is_displayed or not callable(is_displayed):
raise TypeError('Target has no attribute \'is_displayed\' or not callable')
if not is_displayed():
raise WebDriverException('element not visible') | python | {
"resource": ""
} |
q261437 | vController.PlugIn | validation | def PlugIn(self):
"""Take next available controller id and plug in to Virtual USB Bus"""
ids = self.available_ids()
if len(ids) == 0:
raise MaxInputsReachedError('Max Inputs Reached')
self.id = ids[0]
_xinput.PlugIn(self.id)
while self.id in self.available_ids():
pass | python | {
"resource": ""
} |
q261438 | vController.UnPlug | validation | def UnPlug(self, force=False):
"""Unplug controller from Virtual USB Bus and free up ID"""
if force:
_xinput.UnPlugForce(c_uint(self.id))
else:
_xinput.UnPlug(c_uint(self.id))
while self.id not in self.available_ids():
if self.id == 0:
break | python | {
"resource": ""
} |
q261439 | vController.set_value | validation | def set_value(self, control, value=None):
"""Set a value on the controller
If percent is True all controls will accept a value between -1.0 and 1.0
If not then:
Triggers are 0 to 255
Axis are -32768 to 32767
Control List:
AxisLx , Left Stick X-Axis
AxisLy , Left Stick Y-Axis
AxisRx , Right Stick X-Axis
AxisRy , Right Stick Y-Axis
BtnBack , Menu/Back Button
BtnStart , Start Button
BtnA , A Button
BtnB , B Button
BtnX , X Button
BtnY , Y Button
BtnThumbL , Left Thumbstick Click
BtnThumbR , Right Thumbstick Click
BtnShoulderL , Left Shoulder Button
BtnShoulderR , Right Shoulder Button
Dpad , Set Dpad Value (0 = Off, Use DPAD_### Constants)
TriggerL , Left Trigger
TriggerR , Right Trigger
"""
func = getattr(_xinput, 'Set' + control)
if 'Axis' in control:
target_type = c_short
if self.percent:
target_value = int(32767 * value)
else:
target_value = value
elif 'Btn' in control:
target_type = c_bool
target_value = bool(value)
elif 'Trigger' in control:
target_type = c_byte
if self.percent:
target_value = int(255 * value)
else:
target_value = value
elif 'Dpad' in control:
target_type = c_int
target_value = int(value)
func(c_uint(self.id), target_type(target_value)) | python | {
"resource": ""
} |
q261440 | main | validation | def main():
"""Test the functionality of the rController object"""
import time
print('Testing controller in position 1:')
print('Running 3 x 3 seconds tests')
# Initialise Controller
con = rController(1)
# Loop printing controller state and buttons held
for i in range(3):
print('Waiting...')
time.sleep(2.5)
print('State: ', con.gamepad)
print('Buttons: ', con.buttons)
time.sleep(0.5)
print('Done!') | python | {
"resource": ""
} |
q261441 | rController.gamepad | validation | def gamepad(self):
"""Returns the current gamepad state. Buttons pressed is shown as a raw integer value.
Use rController.buttons for a list of buttons pressed.
"""
state = _xinput_state()
_xinput.XInputGetState(self.ControllerID - 1, pointer(state))
self.dwPacketNumber = state.dwPacketNumber
return state.XINPUT_GAMEPAD | python | {
"resource": ""
} |
q261442 | rController.buttons | validation | def buttons(self):
"""Returns a list of buttons currently pressed"""
return [name for name, value in rController._buttons.items()
if self.gamepad.wButtons & value == value] | python | {
"resource": ""
} |
q261443 | maybe_decode_header | validation | def maybe_decode_header(header):
"""
Decodes an encoded 7-bit ASCII header value into it's actual value.
"""
value, encoding = decode_header(header)[0]
if encoding:
return value.decode(encoding)
else:
return value | python | {
"resource": ""
} |
q261444 | autodiscover | validation | def autodiscover():
"""
Imports all available previews classes.
"""
from django.conf import settings
for application in settings.INSTALLED_APPS:
module = import_module(application)
if module_has_submodule(module, 'emails'):
emails = import_module('%s.emails' % application)
try:
import_module('%s.emails.previews' % application)
except ImportError:
# Only raise the exception if this module contains previews and
# there was a problem importing them. (An emails module that
# does not contain previews is not an error.)
if module_has_submodule(emails, 'previews'):
raise | python | {
"resource": ""
} |
q261445 | PreviewSite.register | validation | def register(self, cls):
"""
Adds a preview to the index.
"""
preview = cls(site=self)
logger.debug('Registering %r with %r', preview, self)
index = self.__previews.setdefault(preview.module, {})
index[cls.__name__] = preview | python | {
"resource": ""
} |
q261446 | PreviewSite.detail_view | validation | def detail_view(self, request, module, preview):
"""
Looks up a preview in the index, returning a detail view response.
"""
try:
preview = self.__previews[module][preview]
except KeyError:
raise Http404 # The provided module/preview does not exist in the index.
return preview.detail_view(request) | python | {
"resource": ""
} |
q261447 | Preview.url | validation | def url(self):
"""
The URL to access this preview.
"""
return reverse('%s:detail' % URL_NAMESPACE, kwargs={
'module': self.module,
'preview': type(self).__name__,
}) | python | {
"resource": ""
} |
q261448 | Preview.detail_view | validation | def detail_view(self, request):
"""
Renders the message view to a response.
"""
context = {
'preview': self,
}
kwargs = {}
if self.form_class:
if request.GET:
form = self.form_class(data=request.GET)
else:
form = self.form_class()
context['form'] = form
if not form.is_bound or not form.is_valid():
return render(request, 'mailviews/previews/detail.html', context)
kwargs.update(form.get_message_view_kwargs())
message_view = self.get_message_view(request, **kwargs)
message = message_view.render_to_message()
raw = message.message()
headers = OrderedDict((header, maybe_decode_header(raw[header])) for header in self.headers)
context.update({
'message': message,
'subject': message.subject,
'body': message.body,
'headers': headers,
'raw': raw.as_string(),
})
alternatives = getattr(message, 'alternatives', [])
try:
html = next(alternative[0] for alternative in alternatives
if alternative[1] == 'text/html')
context.update({
'html': html,
'escaped_html': b64encode(html.encode('utf-8')),
})
except StopIteration:
pass
return render(request, self.template_name, context) | python | {
"resource": ""
} |
q261449 | split_docstring | validation | def split_docstring(value):
"""
Splits the docstring of the given value into it's summary and body.
:returns: a 2-tuple of the format ``(summary, body)``
"""
docstring = textwrap.dedent(getattr(value, '__doc__', ''))
if not docstring:
return None
pieces = docstring.strip().split('\n\n', 1)
try:
body = pieces[1]
except IndexError:
body = None
return Docstring(pieces[0], body) | python | {
"resource": ""
} |
q261450 | EmailMessageView.render_to_message | validation | def render_to_message(self, extra_context=None, **kwargs):
"""
Renders and returns an unsent message with the provided context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering the
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
"""
if extra_context is None:
extra_context = {}
# Ensure our custom headers are added to the underlying message class.
kwargs.setdefault('headers', {}).update(self.headers)
context = self.get_context_data(**extra_context)
return self.message_class(
subject=self.render_subject(context),
body=self.render_body(context),
**kwargs) | python | {
"resource": ""
} |
q261451 | EmailMessageView.send | validation | def send(self, extra_context=None, **kwargs):
"""
Renders and sends an email message.
All keyword arguments other than ``extra_context`` are passed through
as keyword arguments when constructing a new :attr:`message_class`
instance for this message.
This method exists primarily for convenience, and the proper
rendering of your message should not depend on the behavior of this
method. To alter how a message is created, override
:meth:``render_to_message`` instead, since that should always be
called, even if a message is not sent.
:param extra_context: Any additional context data that will be used
when rendering this message.
:type extra_context: :class:`dict`
"""
message = self.render_to_message(extra_context=extra_context, **kwargs)
return message.send() | python | {
"resource": ""
} |
q261452 | TemplatedEmailMessageView.render_subject | validation | def render_subject(self, context):
"""
Renders the message subject for the given context.
The context data is automatically unescaped to avoid rendering HTML
entities in ``text/plain`` content.
:param context: The context to use when rendering the subject template.
:type context: :class:`~django.template.Context`
:returns: A rendered subject.
:rtype: :class:`str`
"""
rendered = self.subject_template.render(unescape(context))
return rendered.strip() | python | {
"resource": ""
} |
q261453 | TemplatedHTMLEmailMessageView.render_to_message | validation | def render_to_message(self, extra_context=None, *args, **kwargs):
"""
Renders and returns an unsent message with the given context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
"""
message = super(TemplatedHTMLEmailMessageView, self)\
.render_to_message(extra_context, *args, **kwargs)
if extra_context is None:
extra_context = {}
context = self.get_context_data(**extra_context)
content = self.render_html_body(context)
message.attach_alternative(content, mimetype='text/html')
return message | python | {
"resource": ""
} |
q261454 | numeric | validation | def numeric(_, n):
"""
NBASE = 1000
ndigits = total number of base-NBASE digits
weight = base-NBASE weight of first digit
sign = 0x0000 if positive, 0x4000 if negative, 0xC000 if nan
dscale = decimal digits after decimal place
"""
try:
nt = n.as_tuple()
except AttributeError:
raise TypeError('numeric field requires Decimal value (got %r)' % n)
digits = []
if isinstance(nt.exponent, str):
# NaN, Inf, -Inf
ndigits = 0
weight = 0
sign = 0xC000
dscale = 0
else:
decdigits = list(reversed(nt.digits + (nt.exponent % 4) * (0,)))
weight = 0
while decdigits:
if any(decdigits[:4]):
break
weight += 1
del decdigits[:4]
while decdigits:
digits.insert(0, ndig(decdigits[:4]))
del decdigits[:4]
ndigits = len(digits)
weight += nt.exponent // 4 + ndigits - 1
sign = nt.sign * 0x4000
dscale = -min(0, nt.exponent)
data = [ndigits, weight, sign, dscale] + digits
return ('ihhHH%dH' % ndigits, [2 * len(data)] + data) | python | {
"resource": ""
} |
q261455 | execute_from_command_line | validation | def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--monitors-dir', default=MONITORS_DIR)
parser.add_argument('--alerts-dir', default=ALERTS_DIR)
parser.add_argument('--config', default=SMA_INI_FILE)
parser.add_argument('--warning', help='set logging to warning', action='store_const', dest='loglevel',
const=logging.WARNING, default=logging.INFO)
parser.add_argument('--quiet', help='set logging to ERROR', action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
parser.add_argument('--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
parser.add_argument('--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
parser.sub = parser.add_subparsers()
parse_service = parser.sub.add_parser('service', help='Run SMA as service (daemon).')
parse_service.set_defaults(which='service')
parse_oneshot = parser.sub.add_parser('one-shot', help='Run SMA once and exit')
parse_oneshot.set_defaults(which='one-shot')
parse_alerts = parser.sub.add_parser('alerts', help='Alerts options.')
parse_alerts.set_defaults(which='alerts')
parse_alerts.add_argument('--test', help = 'Test alert', action='store_true')
parse_alerts.add_argument('alert_section', nargs='?', help='Alert section to see')
parse_results = parser.sub.add_parser('results', help='Monitors results')
parse_results.set_defaults(which='results')
parser.set_default_subparser('one-shot')
args = parser.parse_args(argv[1:])
create_logger('sma', args.loglevel)
if not getattr(args, 'which', None) or args.which == 'one-shot':
sma = SMA(args.monitors_dir, args.alerts_dir, args.config)
sma.evaluate_and_alert()
elif args.which == 'service':
sma = SMAService(args.monitors_dir, args.alerts_dir, args.config)
sma.start()
elif args.which == 'alerts' and args.test:
sma = SMA(args.monitors_dir, args.alerts_dir, args.config)
sma.alerts.test()
elif args.which == 'results':
print(SMA(args.monitors_dir, args.alerts_dir, args.config).results) | python | {
"resource": ""
} |
q261456 | _getCallingContext | validation | def _getCallingContext():
"""
Utility function for the RedisLogRecord.
Returns the module, function, and lineno of the function
that called the logger.
We look way up in the stack. The stack at this point is:
[0] logger.py _getCallingContext (hey, that's me!)
[1] logger.py __init__
[2] logger.py makeRecord
[3] _log
[4] <logging method>
[5] caller of logging method
"""
frames = inspect.stack()
if len(frames) > 4:
context = frames[5]
else:
context = frames[0]
modname = context[1]
lineno = context[2]
if context[3]:
funcname = context[3]
else:
funcname = ""
# python docs say you don't want references to
# frames lying around. Bad things can happen.
del context
del frames
return modname, funcname, lineno | python | {
"resource": ""
} |
q261457 | RedisFormatter.format | validation | def format(self, record):
"""
JSON-encode a record for serializing through redis.
Convert date to iso format, and stringify any exceptions.
"""
data = record._raw.copy()
# serialize the datetime date as utc string
data['time'] = data['time'].isoformat()
# stringify exception data
if data.get('traceback'):
data['traceback'] = self.formatException(data['traceback'])
return json.dumps(data) | python | {
"resource": ""
} |
q261458 | RedisHandler.emit | validation | def emit(self, record):
"""
Publish record to redis logging channel
"""
try:
self.redis_client.publish(self.channel, self.format(record))
except redis.RedisError:
pass | python | {
"resource": ""
} |
q261459 | RedisListHandler.emit | validation | def emit(self, record):
"""
Publish record to redis logging list
"""
try:
if self.max_messages:
p = self.redis_client.pipeline()
p.rpush(self.key, self.format(record))
p.ltrim(self.key, -self.max_messages, -1)
p.execute()
else:
self.redis_client.rpush(self.key, self.format(record))
except redis.RedisError:
pass | python | {
"resource": ""
} |
q261460 | require_template_debug | validation | def require_template_debug(f):
"""Decorated function is a no-op if TEMPLATE_DEBUG is False"""
def _(*args, **kwargs):
TEMPLATE_DEBUG = getattr(settings, 'TEMPLATE_DEBUG', False)
return f(*args, **kwargs) if TEMPLATE_DEBUG else ''
return _ | python | {
"resource": ""
} |
q261461 | _display_details | validation | def _display_details(var_data):
"""
Given a dictionary of variable attribute data from get_details display the
data in the terminal.
"""
meta_keys = (key for key in list(var_data.keys())
if key.startswith('META_'))
for key in meta_keys:
display_key = key[5:].capitalize()
pprint('{0}: {1}'.format(display_key, var_data.pop(key)))
pprint(var_data) | python | {
"resource": ""
} |
q261462 | set_trace | validation | def set_trace(context):
"""
Start a pdb set_trace inside of the template with the context available as
'context'. Uses ipdb if available.
"""
try:
import ipdb as pdb
except ImportError:
import pdb
print("For best results, pip install ipdb.")
print("Variables that are available in the current context:")
render = lambda s: template.Template(s).render(context)
availables = get_variables(context)
pprint(availables)
print('Type `availables` to show this list.')
print('Type <variable_name> to access one.')
print('Use render("template string") to test template rendering')
# Cram context variables into the local scope
for var in availables:
locals()[var] = context[var]
pdb.set_trace()
return '' | python | {
"resource": ""
} |
q261463 | pydevd | validation | def pydevd(context):
"""
Start a pydev settrace
"""
global pdevd_not_available
if pdevd_not_available:
return ''
try:
import pydevd
except ImportError:
pdevd_not_available = True
return ''
render = lambda s: template.Template(s).render(context)
availables = get_variables(context)
for var in availables:
locals()[var] = context[var]
#catch the case where no client is listening
try:
pydevd.settrace()
except socket.error:
pdevd_not_available = True
return '' | python | {
"resource": ""
} |
q261464 | _flatten | validation | def _flatten(iterable):
"""
Given an iterable with nested iterables, generate a flat iterable
"""
for i in iterable:
if isinstance(i, Iterable) and not isinstance(i, string_types):
for sub_i in _flatten(i):
yield sub_i
else:
yield i | python | {
"resource": ""
} |
q261465 | _get_detail_value | validation | def _get_detail_value(var, attr):
"""
Given a variable and one of its attributes that are available inside of
a template, return its 'method' if it is a callable, its class name if it
is a model manager, otherwise return its value
"""
value = getattr(var, attr)
# Rename common Django class names
kls = getattr(getattr(value, '__class__', ''), '__name__', '')
if kls in ('ManyRelatedManager', 'RelatedManager', 'EmptyManager'):
return kls
if callable(value):
return 'routine'
return value | python | {
"resource": ""
} |
q261466 | get_attributes | validation | def get_attributes(var):
"""
Given a varaible, return the list of attributes that are available inside
of a template
"""
is_valid = partial(is_valid_in_template, var)
return list(filter(is_valid, dir(var))) | python | {
"resource": ""
} |
q261467 | is_valid_in_template | validation | def is_valid_in_template(var, attr):
"""
Given a variable and one of its attributes, determine if the attribute is
accessible inside of a Django template and return True or False accordingly
"""
# Remove private variables or methods
if attr.startswith('_'):
return False
# Remove any attributes that raise an acception when read
try:
value = getattr(var, attr)
except:
return False
if isroutine(value):
# Remove any routines that are flagged with 'alters_data'
if getattr(value, 'alters_data', False):
return False
else:
# Remove any routines that require arguments
try:
argspec = getargspec(value)
num_args = len(argspec.args) if argspec.args else 0
num_defaults = len(argspec.defaults) if argspec.defaults else 0
if num_args - num_defaults > 1:
return False
except TypeError:
# C extension callables are routines, but getargspec fails with
# a TypeError when these are passed.
pass
return True | python | {
"resource": ""
} |
q261468 | GitVcs.parse_log_messages | validation | def parse_log_messages(self, text):
"""Will parse git log messages in the 'short' format"""
regex = r"commit ([0-9a-f]+)\nAuthor: (.*?)\n\n(.*?)(?:\n\n|$)"
messages = re.findall(regex, text, re.DOTALL)
parsed = []
for commit, author, message in messages:
parsed.append((
commit[:10],
re.sub(r"\s*<.*?>", "", author), # Remove email address if present
message.strip()
))
return parsed | python | {
"resource": ""
} |
q261469 | Command.determine_paths | validation | def determine_paths(self, package_name=None, create_package_dir=False, dry_run=False):
"""Determine paths automatically and a little intelligently"""
# Give preference to the environment variable here as it will not
# derefrence sym links
self.project_dir = Path(os.getenv('PWD') or os.getcwd())
# Try and work out the project name
distribution = self.get_distribution()
if distribution:
# Get name from setup.py
self.project_name = distribution.get_name()
else:
# ...failing that, use the current directory name
self.project_name = self.project_dir.name
# Descend into the 'src' directory to find the package
# if necessary
if os.path.isdir(self.project_dir / "src"):
package_search_dir = self.project_dir / "src"
else:
package_search_dir = self.project_dir
created_package_dir = False
if not package_name:
# Lets try and work out the package_name from the project_name
package_name = self.project_name.replace("-", "_")
# Now do some fuzzy matching
def get_matches(name):
possibles = [n for n in os.listdir(package_search_dir) if os.path.isdir(package_search_dir / n)]
return difflib.get_close_matches(name, possibles, n=1, cutoff=0.8)
close = get_matches(package_name)
# If no matches, try removing the first part of the package name
# (e.g. django-guardian becomes guardian)
if not close and "_" in package_name:
short_package_name = "_".join(package_name.split("_")[1:])
close = get_matches(short_package_name)
if not close:
if create_package_dir:
package_dir = package_search_dir / package_name
# Gets set to true even during dry run
created_package_dir = True
if not dry_run:
print("Creating package directory at %s" % package_dir)
os.mkdir(package_dir)
else:
print("Would have created package directory at %s" % package_dir)
else:
raise CommandError("Could not guess the package name. Specify it using --name.")
else:
package_name = close[0]
self.package_name = package_name
self.package_dir = package_search_dir / package_name
if not os.path.exists(self.package_dir) and not created_package_dir:
raise CommandError("Package directory did not exist at %s. Perhaps specify it using --name" % self.package_dir) | python | {
"resource": ""
} |
q261470 | check_integrity | validation | def check_integrity(sakefile, settings):
"""
Checks the format of the sakefile dictionary
to ensure it conforms to specification
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The setting dictionary (for print functions)
Returns:
True if the Sakefile is conformant
False if not
"""
sprint = settings["sprint"]
error = settings["error"]
sprint("Call to check_integrity issued", level="verbose")
if not sakefile:
error("Sakefile is empty")
return False
# checking for duplicate targets
if len(sakefile.keys()) != len(set(sakefile.keys())):
error("Sakefile contains duplicate targets")
return False
for target in sakefile:
if target == "all":
if not check_target_integrity(target, sakefile["all"], all=True):
error("Failed to accept target 'all'")
return False
continue
if "formula" not in sakefile[target]:
if not check_target_integrity(target, sakefile[target],
meta=True):
errmes = "Failed to accept meta-target '{}'".format(target)
error(errmes)
return False
for atom_target in sakefile[target]:
if atom_target == "help":
continue
if not check_target_integrity(atom_target,
sakefile[target][atom_target],
parent=target):
errmes = "Failed to accept target '{}'\n".format(
atom_target)
error(errmes)
return False
continue
if not check_target_integrity(target, sakefile[target]):
errmes = "Failed to accept target '{}'\n".format(target)
error(errmes)
return False
return True | python | {
"resource": ""
} |
q261471 | check_shastore_version | validation | def check_shastore_version(from_store, settings):
"""
This function gives us the option to emit errors or warnings
after sake upgrades
"""
sprint = settings["sprint"]
error = settings["error"]
sprint("checking .shastore version for potential incompatibilities",
level="verbose")
if not from_store or 'sake version' not in from_store:
errmes = ["Since you've used this project last, a new version of ",
"sake was installed that introduced backwards incompatible",
" changes. Run 'sake clean', and rebuild before continuing\n"]
errmes = " ".join(errmes)
error(errmes)
sys.exit(1) | python | {
"resource": ""
} |
q261472 | get_sha | validation | def get_sha(a_file, settings=None):
"""
Returns sha1 hash of the file supplied as an argument
"""
if settings:
error = settings["error"]
else:
error = ERROR_FN
try:
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with io.open(a_file, "rb") as fh:
buf = fh.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fh.read(BLOCKSIZE)
the_hash = hasher.hexdigest()
except IOError:
errmes = "File '{}' could not be read! Exiting!".format(a_file)
error(errmes)
sys.exit(1)
except:
errmes = "Unspecified error returning sha1 hash. Exiting!"
error(errmes)
sys.exit(1)
return the_hash | python | {
"resource": ""
} |
q261473 | write_shas_to_shastore | validation | def write_shas_to_shastore(sha_dict):
"""
Writes a sha1 dictionary stored in memory to
the .shastore file
"""
if sys.version_info[0] < 3:
fn_open = open
else:
fn_open = io.open
with fn_open(".shastore", "w") as fh:
fh.write("---\n")
fh.write('sake version: {}\n'.format(constants.VERSION))
if sha_dict:
fh.write(yaml.dump(sha_dict))
fh.write("...") | python | {
"resource": ""
} |
q261474 | take_shas_of_all_files | validation | def take_shas_of_all_files(G, settings):
"""
Takes sha1 hash of all dependencies and outputs of all targets
Args:
The graph we are going to build
The settings dictionary
Returns:
A dictionary where the keys are the filenames and the
value is the sha1 hash
"""
global ERROR_FN
sprint = settings["sprint"]
error = settings["error"]
ERROR_FN = error
sha_dict = {}
all_files = []
for target in G.nodes(data=True):
sprint("About to take shas of files in target '{}'".format(target[0]),
level="verbose")
if 'dependencies' in target[1]:
sprint("It has dependencies", level="verbose")
deplist = []
for dep in target[1]['dependencies']:
glist = glob.glob(dep)
if glist:
for oneglob in glist:
deplist.append(oneglob)
else:
deplist.append(dep)
target[1]['dependencies'] = list(deplist)
for dep in target[1]['dependencies']:
sprint(" - {}".format(dep), level="verbose")
all_files.append(dep)
if 'output' in target[1]:
sprint("It has outputs", level="verbose")
for out in acts.get_all_outputs(target[1]):
sprint(" - {}".format(out), level="verbose")
all_files.append(out)
if len(all_files):
sha_dict['files'] = {}
# check if files exist and de-dupe
extant_files = []
for item in all_files:
if item not in extant_files and os.path.isfile(item):
extant_files.append(item)
pool = Pool()
results = pool.map(get_sha, extant_files)
pool.close()
pool.join()
for fn, sha in zip(extant_files, results):
sha_dict['files'][fn] = {'sha': sha}
return sha_dict
sprint("No dependencies", level="verbose") | python | {
"resource": ""
} |
q261475 | run_commands | validation | def run_commands(commands, settings):
"""
Runs the commands supplied as an argument
It will exit the program if the commands return a
non-zero code
Args:
the commands to run
The settings dictionary
"""
sprint = settings["sprint"]
quiet = settings["quiet"]
error = settings["error"]
enhanced_errors = True
the_shell = None
if settings["no_enhanced_errors"]:
enhanced_errors = False
if "shell" in settings:
the_shell = settings["shell"]
windows_p = sys.platform == "win32"
STDOUT = None
STDERR = None
if quiet:
STDOUT = PIPE
STDERR = PIPE
commands = commands.rstrip()
sprint("About to run commands '{}'".format(commands), level="verbose")
if not quiet:
sprint(commands)
if the_shell:
tmp = shlex.split(the_shell)
the_shell = tmp[0]
tmp = tmp[1:]
if enhanced_errors and not windows_p:
tmp.append("-e")
tmp.append(commands)
commands = tmp
else:
if enhanced_errors and not windows_p:
commands = ["-e", commands]
p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR,
executable=the_shell)
out, err = p.communicate()
if p.returncode:
if quiet:
error(err.decode(locale.getpreferredencoding()))
error("Command failed to run")
sys.exit(1) | python | {
"resource": ""
} |
q261476 | get_the_node_dict | validation | def get_the_node_dict(G, name):
"""
Helper function that returns the node data
of the node with the name supplied
"""
for node in G.nodes(data=True):
if node[0] == name:
return node[1] | python | {
"resource": ""
} |
q261477 | get_direct_ancestors | validation | def get_direct_ancestors(G, list_of_nodes):
"""
Returns a list of nodes that are the parents
from all of the nodes given as an argument.
This is for use in the parallel topo sort
"""
parents = []
for item in list_of_nodes:
anc = G.predecessors(item)
for one in anc:
parents.append(one)
return parents | python | {
"resource": ""
} |
q261478 | get_sinks | validation | def get_sinks(G):
"""
A sink is a node with no children.
This means that this is the end of the line,
and it should be run last in topo sort. This
returns a list of all sinks in a graph
"""
sinks = []
for node in G:
if not len(list(G.successors(node))):
sinks.append(node)
return sinks | python | {
"resource": ""
} |
q261479 | get_levels | validation | def get_levels(G):
"""
For the parallel topo sort to work, the targets have
to be executed in layers such that there is no
dependency relationship between any nodes in a layer.
What is returned is a list of lists representing all
the layers, or levels
"""
levels = []
ends = get_sinks(G)
levels.append(ends)
while get_direct_ancestors(G, ends):
ends = get_direct_ancestors(G, ends)
levels.append(ends)
levels.reverse()
return levels | python | {
"resource": ""
} |
q261480 | merge_from_store_and_in_mems | validation | def merge_from_store_and_in_mems(from_store, in_mem_shas, dont_update_shas_of):
"""
If we don't merge the shas from the sha store and if we build a
subgraph, the .shastore will only contain the shas of the files
from the subgraph and the rest of the graph will have to be
rebuilt
"""
if not from_store:
for item in dont_update_shas_of:
if item in in_mem_shas['files']:
del in_mem_shas['files'][item]
return in_mem_shas
for key in from_store['files']:
if key not in in_mem_shas['files'] and key not in dont_update_shas_of:
in_mem_shas['files'][key] = from_store['files'][key]
for item in dont_update_shas_of:
if item in in_mem_shas['files']:
del in_mem_shas['files'][item]
return in_mem_shas | python | {
"resource": ""
} |
q261481 | find_standard_sakefile | validation | def find_standard_sakefile(settings):
"""Returns the filename of the appropriate sakefile"""
error = settings["error"]
if settings["customsake"]:
custom = settings["customsake"]
if not os.path.isfile(custom):
error("Specified sakefile '{}' doesn't exist", custom)
sys.exit(1)
return custom
# no custom specified, going over defaults in order
for name in ["Sakefile", "Sakefile.yaml", "Sakefile.yml"]:
if os.path.isfile(name):
return name
error("Error: there is no Sakefile to read")
sys.exit(1) | python | {
"resource": ""
} |
q261482 | get_ties | validation | def get_ties(G):
"""
If you specify a target that shares a dependency with another target,
both targets need to be updated. This is because running one will resolve
the sha mismatch and sake will think that the other one doesn't have to
run. This is called a "tie". This function will find such ties.
"""
# we are going to make a dictionary whose keys are every dependency
# and whose values are a list of all targets that use that dependency.
# after making the dictionary, values whose length is above one will
# be called "ties"
ties = []
dep_dict = {}
for node in G.nodes(data=True):
if 'dependencies' in node[1]:
for item in node[1]['dependencies']:
if item not in dep_dict:
dep_dict[item] = []
dep_dict[item].append(node[0])
for item in dep_dict:
if len(list(set(dep_dict[item]))) > 1:
ties.append(list(set(dep_dict[item])))
return ties | python | {
"resource": ""
} |
q261483 | get_tied_targets | validation | def get_tied_targets(original_targets, the_ties):
"""
This function gets called when a target is specified to ensure
that all 'tied' targets also get included in the subgraph to
be built
"""
my_ties = []
for original_target in original_targets:
for item in the_ties:
if original_target in item:
for thing in item:
my_ties.append(thing)
my_ties = list(set(my_ties))
if my_ties:
ties_message = ""
ties_message += "The following targets share dependencies and must be run together:"
for item in sorted(my_ties):
ties_message += "\n - {}".format(item)
return list(set(my_ties+original_targets)), ties_message
return original_targets, "" | python | {
"resource": ""
} |
q261484 | construct_graph | validation | def construct_graph(sakefile, settings):
"""
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
"""
verbose = settings["verbose"]
sprint = settings["sprint"]
G = nx.DiGraph()
sprint("Going to construct Graph", level="verbose")
for target in sakefile:
if target == "all":
# we don't want this node
continue
if "formula" not in sakefile[target]:
# that means this is a meta target
for atomtarget in sakefile[target]:
if atomtarget == "help":
continue
sprint("Adding '{}'".format(atomtarget), level="verbose")
data_dict = sakefile[target][atomtarget]
data_dict["parent"] = target
G.add_node(atomtarget, **data_dict)
else:
sprint("Adding '{}'".format(target), level="verbose")
G.add_node(target, **sakefile[target])
sprint("Nodes are built\nBuilding connections", level="verbose")
for node in G.nodes(data=True):
sprint("checking node {} for dependencies".format(node[0]),
level="verbose")
# normalize all paths in output
for k, v in node[1].items():
if v is None: node[1][k] = []
if "output" in node[1]:
for index, out in enumerate(node[1]['output']):
node[1]['output'][index] = clean_path(node[1]['output'][index])
if "dependencies" not in node[1]:
continue
sprint("it has dependencies", level="verbose")
connects = []
# normalize all paths in dependencies
for index, dep in enumerate(node[1]['dependencies']):
dep = os.path.normpath(dep)
shrt = "dependencies"
node[1]['dependencies'][index] = clean_path(node[1][shrt][index])
for node in G.nodes(data=True):
connects = []
if "dependencies" not in node[1]:
continue
for dep in node[1]['dependencies']:
matches = check_for_dep_in_outputs(dep, verbose, G)
if not matches:
continue
for match in matches:
sprint("Appending {} to matches".format(match), level="verbose")
connects.append(match)
if connects:
for connect in connects:
G.add_edge(connect, node[0])
return G | python | {
"resource": ""
} |
q261485 | clean_all | validation | def clean_all(G, settings):
"""
Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed
"""
quiet = settings["quiet"]
recon = settings["recon"]
sprint = settings["sprint"]
error = settings["error"]
all_outputs = []
for node in G.nodes(data=True):
if "output" in node[1]:
for item in get_all_outputs(node[1]):
all_outputs.append(item)
all_outputs.append(".shastore")
retcode = 0
for item in sorted(all_outputs):
if os.path.isfile(item):
if recon:
sprint("Would remove file: {}".format(item))
continue
sprint("Attempting to remove file '{}'", level="verbose")
try:
os.remove(item)
sprint("Removed file", level="verbose")
except:
errmes = "Error: file '{}' failed to be removed"
error(errmes.format(item))
retcode = 1
if not retcode and not recon:
sprint("All clean", color=True)
return retcode | python | {
"resource": ""
} |
q261486 | write_dot_file | validation | def write_dot_file(G, filename):
"""
Writes the graph G in dot file format for graphviz visualization.
Args:
a Networkx graph
A filename to name the dot files
"""
with io.open(filename, "w") as fh:
fh.write("strict digraph DependencyDiagram {\n")
edge_list = G.edges()
node_list = set(G.nodes())
if edge_list:
for edge in sorted(edge_list):
source, targ = edge
node_list = node_list - set(source)
node_list = node_list - set(targ)
line = '"{}" -> "{}";\n'
fh.write(line.format(source, targ))
# draw nodes with no links
if node_list:
for node in sorted(node_list):
line = '"{}"\n'.format(node)
fh.write(line)
fh.write("}") | python | {
"resource": ""
} |
q261487 | itertable | validation | def itertable(table):
"""Auxiliary function for iterating over a data table."""
for item in table:
res = {
k.lower(): nfd(v) if isinstance(v, text_type) else v for k, v in item.items()}
for extra in res.pop('extra', []):
k, _, v = extra.partition(':')
res[k.strip()] = v.strip()
yield res | python | {
"resource": ""
} |
q261488 | _make_package | validation | def _make_package(args): # pragma: no cover
"""Prepare transcriptiondata from the transcription sources."""
from lingpy.sequence.sound_classes import token2class
from lingpy.data import Model
columns = ['LATEX', 'FEATURES', 'SOUND', 'IMAGE', 'COUNT', 'NOTE']
bipa = TranscriptionSystem('bipa')
for src, rows in args.repos.iter_sources(type='td'):
args.log.info('TranscriptionData {0} ...'.format(src['NAME']))
uritemplate = URITemplate(src['URITEMPLATE']) if src['URITEMPLATE'] else None
out = [['BIPA_GRAPHEME', 'CLTS_NAME', 'GENERATED', 'EXPLICIT',
'GRAPHEME', 'URL'] + columns]
graphemes = set()
for row in rows:
if row['GRAPHEME'] in graphemes:
args.log.warn('skipping duplicate grapheme: {0}'.format(row['GRAPHEME']))
continue
graphemes.add(row['GRAPHEME'])
if not row['BIPA']:
bipa_sound = bipa[row['GRAPHEME']]
explicit = ''
else:
bipa_sound = bipa[row['BIPA']]
explicit = '+'
generated = '+' if bipa_sound.generated else ''
if is_valid_sound(bipa_sound, bipa):
bipa_grapheme = bipa_sound.s
bipa_name = bipa_sound.name
else:
bipa_grapheme, bipa_name = '<NA>', '<NA>'
url = uritemplate.expand(**row) if uritemplate else row.get('URL', '')
out.append(
[bipa_grapheme, bipa_name, generated, explicit, row['GRAPHEME'],
url] + [
row.get(c, '') for c in columns])
found = len([o for o in out if o[0] != '<NA>'])
args.log.info('... {0} of {1} graphemes found ({2:.0f}%)'.format(
found, len(out), found / len(out) * 100))
with UnicodeWriter(
pkg_path('transcriptiondata', '{0}.tsv'.format(src['NAME'])), delimiter='\t'
) as writer:
writer.writerows(out)
count = 0
with UnicodeWriter(pkg_path('soundclasses', 'lingpy.tsv'), delimiter='\t') as writer:
writer.writerow(['CLTS_NAME', 'BIPA_GRAPHEME'] + SOUNDCLASS_SYSTEMS)
for grapheme, sound in sorted(bipa.sounds.items()):
if not sound.alias:
writer.writerow(
[sound.name, grapheme] + [token2class(
grapheme, Model(cls)) for cls in SOUNDCLASS_SYSTEMS])
count += 1
args.log.info('SoundClasses: {0} written to file.'.format(count)) | python | {
"resource": ""
} |
q261489 | is_valid_sound | validation | def is_valid_sound(sound, ts):
"""Check the consistency of a given transcription system conversino"""
if isinstance(sound, (Marker, UnknownSound)):
return False
s1 = ts[sound.name]
s2 = ts[sound.s]
return s1.name == s2.name and s1.s == s2.s | python | {
"resource": ""
} |
q261490 | TranscriptionSystem.normalize | validation | def normalize(self, string):
"""Normalize the string according to normalization list"""
return ''.join([self._normalize.get(x, x) for x in nfd(string)]) | python | {
"resource": ""
} |
q261491 | TranscriptionSystem._from_name | validation | def _from_name(self, string):
"""Parse a sound from its name"""
components = string.split(' ')
if frozenset(components) in self.features:
return self.features[frozenset(components)]
rest, sound_class = components[:-1], components[-1]
if sound_class in ['diphthong', 'cluster']:
if string.startswith('from ') and 'to ' in string:
extension = {'diphthong': 'vowel', 'cluster': 'consonant'}[sound_class]
string_ = ' '.join(string.split(' ')[1:-1])
from_, to_ = string_.split(' to ')
v1, v2 = frozenset(from_.split(' ') + [extension]), frozenset(
to_.split(' ') + [extension])
if v1 in self.features and v2 in self.features:
s1, s2 = (self.features[v1], self.features[v2])
if sound_class == 'diphthong':
return Diphthong.from_sounds(s1 + s2, s1, s2, self) # noqa: F405
else:
return Cluster.from_sounds(s1 + s2, s1, s2, self) # noqa: F405
else:
# try to generate the sounds if they are not there
s1, s2 = self._from_name(from_ + ' ' + extension), self._from_name(
to_ + ' ' + extension)
if not (isinstance(
s1, UnknownSound) or isinstance(s2, UnknownSound)): # noqa: F405
if sound_class == 'diphthong':
return Diphthong.from_sounds( # noqa: F405
s1 + s2, s1, s2, self)
return Cluster.from_sounds(s1 + s2, s1, s2, self) # noqa: F405
raise ValueError('components could not be found in system')
else:
raise ValueError('name string is erroneously encoded')
if sound_class not in self.sound_classes:
raise ValueError('no sound class specified')
args = {self._feature_values.get(comp, '?'): comp for comp in rest}
if '?' in args:
raise ValueError('string contains unknown features')
args['grapheme'] = ''
args['ts'] = self
sound = self.sound_classes[sound_class](**args)
if sound.featureset not in self.features:
sound.generated = True
return sound
return self.features[sound.featureset] | python | {
"resource": ""
} |
q261492 | ipfn.iteration | validation | def iteration(self):
"""
Runs the ipfn algorithm. Automatically detects of working with numpy ndarray or pandas dataframes.
"""
i = 0
conv = np.inf
old_conv = -np.inf
conv_list = []
m = self.original
# If the original data input is in pandas DataFrame format
if isinstance(self.original, pd.DataFrame):
ipfn_method = self.ipfn_df
elif isinstance(self.original, np.ndarray):
ipfn_method = self.ipfn_np
self.original = self.original.astype('float64')
else:
print('Data input instance not recognized')
sys.exit(0)
while ((i <= self.max_itr and conv > self.conv_rate) and
(i <= self.max_itr and abs(conv - old_conv) > self.rate_tolerance)):
old_conv = conv
m, conv = ipfn_method(m, self.aggregates, self.dimensions, self.weight_col)
conv_list.append(conv)
i += 1
converged = 1
if i <= self.max_itr:
if not conv > self.conv_rate:
print('ipfn converged: convergence_rate below threshold')
elif not abs(conv - old_conv) > self.rate_tolerance:
print('ipfn converged: convergence_rate not updating or below rate_tolerance')
else:
print('Maximum iterations reached')
converged = 0
# Handle the verbose
if self.verbose == 0:
return m
elif self.verbose == 1:
return m, converged
elif self.verbose == 2:
return m, converged, pd.DataFrame({'iteration': range(i), 'conv': conv_list}).set_index('iteration')
else:
print('wrong verbose input, return None')
sys.exit(0) | python | {
"resource": ""
} |
q261493 | Accessor.resolve | validation | def resolve(self, context, quiet=True):
"""
Return an object described by the accessor by traversing the attributes
of context.
"""
try:
obj = context
for level in self.levels:
if isinstance(obj, dict):
obj = obj[level]
elif isinstance(obj, list) or isinstance(obj, tuple):
obj = obj[int(level)]
else:
if callable(getattr(obj, level)):
try:
obj = getattr(obj, level)()
except KeyError:
obj = getattr(obj, level)
else:
# for model field that has choice set
# use get_xxx_display to access
display = 'get_%s_display' % level
obj = getattr(obj, display)() if hasattr(obj, display) else getattr(obj, level)
if not obj:
break
return obj
except Exception as e:
if quiet:
return ''
else:
raise e | python | {
"resource": ""
} |
q261494 | InlineMonthsColumn.get_days_span | validation | def get_days_span(self, month_index):
"""
Calculate how many days the month spans.
"""
is_first_month = month_index == 0
is_last_month = month_index == self.__len__() - 1
y = int(self.start_date.year + (self.start_date.month + month_index) / 13)
m = int((self.start_date.month + month_index) % 12 or 12)
total = calendar.monthrange(y, m)[1]
if is_first_month and is_last_month:
return (self.end_date - self.start_date).days + 1
else:
if is_first_month:
return total - self.start_date.day + 1
elif is_last_month:
return self.end_date.day
else:
return total | python | {
"resource": ""
} |
q261495 | _OPC._calculate_float | validation | def _calculate_float(self, byte_array):
"""Returns an IEEE 754 float from an array of 4 bytes
:param byte_array: Expects an array of 4 bytes
:type byte_array: array
:rtype: float
"""
if len(byte_array) != 4:
return None
return struct.unpack('f', struct.pack('4B', *byte_array))[0] | python | {
"resource": ""
} |
q261496 | _OPC._calculate_period | validation | def _calculate_period(self, vals):
''' calculate the sampling period in seconds '''
if len(vals) < 4:
return None
if self.firmware['major'] < 16:
return ((vals[3] << 24) | (vals[2] << 16) | (vals[1] << 8) | vals[0]) / 12e6
else:
return self._calculate_float(vals) | python | {
"resource": ""
} |
q261497 | _OPC.calculate_bin_boundary | validation | def calculate_bin_boundary(self, bb):
"""Calculate the adc value that corresponds to a specific bin boundary diameter in microns.
:param bb: Bin Boundary in microns
:type bb: float
:rtype: int
"""
return min(enumerate(OPC_LOOKUP), key = lambda x: abs(x[1] - bb))[0] | python | {
"resource": ""
} |
q261498 | _OPC.read_info_string | validation | def read_info_string(self):
"""Reads the information string for the OPC
:rtype: string
:Example:
>>> alpha.read_info_string()
'OPC-N2 FirmwareVer=OPC-018.2....................BD'
"""
infostring = []
# Send the command byte and sleep for 9 ms
self.cnxn.xfer([0x3F])
sleep(9e-3)
# Read the info string by sending 60 empty bytes
for i in range(60):
resp = self.cnxn.xfer([0x00])[0]
infostring.append(chr(resp))
sleep(0.1)
return ''.join(infostring) | python | {
"resource": ""
} |
q261499 | _OPC.ping | validation | def ping(self):
"""Checks the connection between the Raspberry Pi and the OPC
:rtype: Boolean
"""
b = self.cnxn.xfer([0xCF])[0] # send the command byte
sleep(0.1)
return True if b == 0xF3 else False | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.