code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def is_same_transform(matrix0, matrix1):
matrix0 = np.array(matrix0, dtype=np.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = np.array(matrix1, dtype=np.float64, copy=True)
matrix1 /= matrix1[3, 3]
return np.allclose(matrix0, matrix1) | Return True if two matrices perform same transformation.
>>> is_same_transform(np.identity(4), np.identity(4))
True
>>> is_same_transform(np.identity(4), random_rotation_matrix())
False |
def AddATR(self, readernode, atr):
capchild = self.AppendItem(readernode, atr)
self.SetPyData(capchild, None)
self.SetItemImage(
capchild, self.cardimageindex, wx.TreeItemIcon_Normal)
self.SetItemImage(
capchild, self.cardimageindex, wx.TreeItemIcon_Expanded)
... | Add an ATR to a reader node. |
def decrease_step(self) -> str:
if self._steps_index > 0:
self._steps_index = self._steps_index - 1
return 'step: {}'.format(self.current_step()) | Decrease the jog resolution without overrunning the list of values |
def authenticate_admin(self, transport, account_name, password):
Authenticator.authenticate_admin(self, transport, account_name, password)
auth_token = AuthToken()
auth_token.account_name = account_name
params = {sconstant.E_NAME: account_name,
sconstant.E_PASSWORD: pas... | Authenticates administrator using username and password. |
def AIC_compare(aic_list):
aic_values = np.array(aic_list)
minimum = np.min(aic_values)
delta = aic_values - minimum
values = np.exp(-delta / 2)
weights = values / np.sum(values)
return delta, weights | Calculates delta AIC and AIC weights from a list of AIC values
Parameters
-----------------
aic_list : iterable
AIC values from set of candidat models
Returns
-------------
tuple
First element contains the delta AIC values, second element contains
the relative AIC weigh... |
def getCachedOrUpdatedValue(self, key, channel=None):
if channel:
return self._hmchannels[channel].getCachedOrUpdatedValue(key)
try:
return self._VALUES[key]
except KeyError:
value = self._VALUES[key] = self.getValue(key)
return value | Gets the channel's value with the given key.
If the key is not found in the cache, the value is queried from the host.
If 'channel' is given, the respective channel's value is returned. |
async def get(self) -> InfoDict:
if self._seen_kork:
raise AnalysisComplete()
info = await self._queue.get()
if not info:
self._seen_kork = True
await self._finished
raise AnalysisComplete()
return info | Waits for the next dictionary of information from the engine and
returns it.
It might be more convenient to use ``async for info in analysis: ...``.
:raises: :exc:`chess.engine.AnalysisComplete` if the analysis is
complete (or has been stopped) and all information has been
... |
def reload(script, input, output):
script = Path(script).expand().abspath()
output = Path(output).expand().abspath()
input = input if isinstance(input, (list, tuple)) else [input]
output.makedirs_p()
_script_reload(script, input, output) | reloads the generator script when the script files
or the input files changes |
def as_completed(jobs):
jobs = tuple(jobs)
event = threading.Event()
callback = lambda f, ev: event.set()
[job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs]
[job.add_listener(Job.ERROR, callback, once=True) for job in jobs]
while jobs:
event.wait()
event.clear()
jobs, finished ... | Generator function that yields the jobs in order of their
completion. Attaches a new listener to each job. |
def read(self, pos, size, **kwargs):
data, realsize = self.read_data(size, **kwargs)
if not self.state.solver.is_true(realsize == 0):
self.state.memory.store(pos, data, size=realsize)
return realsize | Reads some data from the file, storing it into memory.
:param pos: The address to write the read data into memory
:param size: The requested length of the read
:return: The real length of the read |
def _clear(self):
draw = ImageDraw.Draw(self._background_image)
draw.rectangle(self._device.bounding_box,
fill="black")
del draw | Helper that clears the composition. |
def dtstr_to_datetime(dtstr, to_tz=None, fail_silently=True):
try:
dt = datetime.datetime.utcfromtimestamp(int(dtstr, 36) / 1e3)
if to_tz:
dt = timezone.make_aware(dt, timezone=pytz.UTC)
if to_tz != pytz.UTC:
dt = dt.astimezone(to_tz)
return dt
exc... | Convert result from datetime_to_dtstr to datetime in timezone UTC0. |
def download_data_dictionary(request, dataset_id):
dataset = Dataset.objects.get(pk=dataset_id)
dataDict = dataset.data_dictionary
fields = DataDictionaryField.objects.filter(
parent_dict=dataDict
).order_by('columnIndex')
response = HttpResponse(content_type='text/csv')
csvName = slugif... | Generates and returns compiled data dictionary from database.
Returned as a CSV response. |
def adam7_generate(width, height):
for xstart, ystart, xstep, ystep in adam7:
if xstart >= width:
continue
yield ((xstart, y, xstep) for y in range(ystart, height, ystep)) | Generate the coordinates for the reduced scanlines
of an Adam7 interlaced image
of size `width` by `height` pixels.
Yields a generator for each pass,
and each pass generator yields a series of (x, y, xstep) triples,
each one identifying a reduced scanline consisting of
pixels starting at (x, y)... |
def calculate_equinoxes(self, year, timezone='UTC'):
tz = pytz.timezone(timezone)
d1 = ephem.next_equinox(str(year))
d = ephem.Date(str(d1))
equinox1 = d.datetime() + tz.utcoffset(d.datetime())
d2 = ephem.next_equinox(d1)
d = ephem.Date(str(d2))
equinox2 = d.datet... | calculate equinox with time zone |
def segment_to_vector(self, seg):
ft_dict = {ft: val for (val, ft) in self.fts(seg)}
return [ft_dict[name] for name in self.names] | Given a Unicode IPA segment, return a list of feature specificiations
in cannonical order.
Args:
seg (unicode): IPA consonant or vowel
Returns:
list: feature specifications ('+'/'-'/'0') in the order from
`FeatureTable.names` |
def get_prefixes(self, query):
try:
res = Prefix.smart_search(query, {})
except socket.error:
print >> sys.stderr, "Connection refused, please check hostname & port"
sys.exit(1)
except xmlrpclib.ProtocolError:
print >> sys.stderr, "Authentication f... | Get prefix data from NIPAP |
def to_paginated_list(self, result, _ns, _operation, **kwargs):
items, context = self.parse_result(result)
headers = dict()
paginated_list = PaginatedList(
items=items,
_page=self,
_ns=_ns,
_operation=_operation,
_context=context,
... | Convert a controller result to a paginated list.
The result format is assumed to meet the contract of this page class's `parse_result` function. |
def _create_job(self, mapping):
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_b... | Initiate a bulk insert and upload batches to run in parallel. |
def get_entry_view(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_get_entry_view_codec, key_data, key=key_data, thread_id=thread_id()) | Returns the EntryView for the specified key.
**Warning:
This method returns a clone of original mapping, modifying the returned value does not change the actual value
in the map. One should put modified value back to make changes visible to all nodes.**
**Warning 2: This method uses __... |
def font_extents(self):
extents = ffi.new('cairo_font_extents_t *')
cairo.cairo_font_extents(self._pointer, extents)
self._check_status()
return (
extents.ascent, extents.descent, extents.height,
extents.max_x_advance, extents.max_y_advance) | Return the extents of the currently selected font.
Values are given in the current user-space coordinate system.
Because font metrics are in user-space coordinates, they are mostly,
but not entirely, independent of the current transformation matrix.
If you call :meth:`context.scale(2) ... |
def get_item_query_session_for_bank(self, bank_id):
if not self.supports_item_query():
raise errors.Unimplemented()
return sessions.ItemQuerySession(bank_id, runtime=self._runtime) | Gets the ``OsidSession`` associated with the item query service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.ItemQuerySession) - ``an
_item_query_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument ... |
def init_layer(self):
self.layer = self.vector.GetLayer()
self.__features = [None] * self.nfeatures | initialize a layer object
Returns
------- |
def _create_dataset(
self, group, chunk_size, compression, compression_opts):
if chunk_size == 'auto':
chunks = True
else:
per_chunk = (
nb_per_chunk(20, 1, chunk_size) if self.dtype == np.dtype('O')
else nb_per_chunk(
... | Create an empty dataset in a group. |
def update(self, item):
if item.matrix not in self.data:
self.data[item.matrix] = []
result = Select(self.data[item.matrix]).where(
lambda entry: entry.stage == item.stage).build()
if len(result) > 0:
stage = result[0]
stage.status = item.status
... | Add a collector item.
Args:
item (CollectorUpdate): event data like stage, timestampe and status. |
def get_mimetype(path):
filename = os.path.split(path)[1]
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'text/x-plain'
_logger().debug('mimetype detected: %s', mimetype)
return mimetype | Guesses the mime type of a file. If mime type cannot be detected, plain
text is assumed.
:param path: path of the file
:return: the corresponding mime type. |
def ssl_server_options():
cafile = options.ssl_ca_cert
keyfile = options.ssl_key
certfile = options.ssl_cert
verify_mode = options.ssl_cert_reqs
try:
context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH, cafile=cafile)
context.load_cert_chain(certfile=cer... | ssl options for tornado https server
these options are defined in each application's default.conf file
if left empty, use the self generated keys and certificates included
in this package.
this function is backward compatible with python version lower than
2.7.9 where ssl.SSLContext is not available... |
def stop_polling(self):
if hasattr(self, '_polling') and self._polling:
log.info('Stop polling...')
self._polling = False | Break long-polling process.
:return: |
def click_element_at_coordinates(self, coordinate_X, coordinate_Y):
self._info("Pressing at (%s, %s)." % (coordinate_X, coordinate_Y))
driver = self._current_application()
action = TouchAction(driver)
action.press(x=coordinate_X, y=coordinate_Y).release().perform() | click element at a certain coordinate |
def make_relationship(self, relator,
direction=
RELATIONSHIP_DIRECTIONS.BIDIRECTIONAL):
if IEntity.providedBy(relator):
rel = DomainRelationship(relator, self,
direction=direction)
elif IResource.provi... | Create a relationship object for this attribute from the given
relator and relationship direction. |
def get_resource_retriever(url):
if url.startswith('http://') or url.startswith('https://'):
return HttpResourceRetriever(url)
else:
raise ValueError('Unsupported scheme in url: %s' % url) | Get the appropriate retriever object for the specified url based on url scheme.
Makes assumption that HTTP urls do not require any special authorization.
For HTTP urls: returns HTTPResourceRetriever
For s3:// urls returns S3ResourceRetriever
:param url: url of the resource to be retrieved
:return:... |
def union(self, x, y):
repr_x = self.find(x)
repr_y = self.find(y)
if repr_x == repr_y:
return False
if self.rank[repr_x] == self.rank[repr_y]:
self.rank[repr_x] += 1
self.up[repr_y] = repr_x
elif self.rank[repr_x] > self.rank[repr_y]:
... | Merges part that contain x and part containing y
:returns: False if x, y are already in same part
:complexity: O(inverse_ackerman(n)) |
def _publish_date(self, item):
url = item['url']
html = deepcopy(item['spider_response'].body)
publish_date = None
try:
if html is None:
request = urllib2.Request(url)
html = urllib2.build_opener().open(request).read()
html = Beauti... | Returns the publish_date of the extracted article. |
def get_base(self):
if self._type == 'query':
return self._observable.get_query_base()
return self._observable.get_target_base() | Get the single base at this position.
:returns: base
:rtype: char |
def check_key(key, allowed):
if key in allowed:
return True
for pattern in allowed:
if fnmatch(key, pattern):
return True
return False | Validate that the specified key is allowed according the provided
list of patterns. |
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN):
random_part = u''.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(lg - len(prefix or "") - 1)
)
if prefix is not None:
return u'%s-%s' % (prefix, random_part)
else:
return ran... | Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode |
def by_occupied_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.occupied_housing_units.name,
asc... | Search zipcode information by occupied house of units. |
def add_bookmark(self, time):
if self.annot is None:
msg = 'No score file loaded'
lg.debug(msg)
error_dialog = QErrorMessage()
error_dialog.setWindowTitle('Error adding bookmark')
error_dialog.showMessage(msg)
error_dialog.exec()
... | Run this function when user adds a new bookmark.
Parameters
----------
time : tuple of float
start and end of the new bookmark, in s |
def fetch_cvparams_values_from_subel(base, subelname, paramnames, ns):
sub_el = basereader.find_element_xpath(base, subelname, ns)
cvparams = get_all_cvparams(sub_el, ns)
output = []
for param in paramnames:
output.append(fetch_cvparam_value_by_name(cvparams, param))
return output | Searches a base element for subelement by name, then takes the
cvParams of that subelement and returns the values as a list
for the paramnames that match. Value order in list equals input
paramnames order. |
def query_filter(query):
try:
return {'operation': int(query)}
except ValueError:
pass
if isinstance(query, string_types):
query = query.strip()
for operation in KNOWN_OPERATIONS:
if query.startswith(operation):
query = "%s %s" % (operation, query[... | Translate a query-style string to a 'filter'.
Query can be the following formats:
Case Insensitive
'value' OR '*= value' Contains
'value*' OR '^= value' Begins with value
'*value' OR '$= value' Ends with value
'*value*' OR '_= value' Contains value
Case Sensitive
'~ ... |
def midPoint(self, point):
x = (self.x + point.x)/2.0
y = (self.y + point.y)/2.0
z = (self.z + point.z)/2.0
return MapPoint(x,y,z) | identify the midpoint between two mapPoints |
def legacy_signature(**kwargs_mapping):
def signature_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
redirected_kwargs = {
kwargs_mapping[k] if k in kwargs_mapping else k: v
for k, v in kwargs.items()
}
return f(*args, **redi... | This decorator makes it possible to call a function using old argument names
when they are passed as keyword arguments.
@legacy_signature(old_arg1='arg1', old_arg2='arg2')
def func(arg1, arg2=1):
return arg1 + arg2
func(old_arg1=1) == 2
func(old_arg1=1, old_arg2=2) == 3 |
def on_hover(self, callback, remove=False):
self._hover_callbacks.register_callback(callback, remove=remove) | The hover callback takes an unpacked set of keyword arguments. |
def longitude(self, longitude):
if not (-180 <= longitude <= 180):
raise ValueError('longitude was {}, but has to be in [-180, 180]'
.format(longitude))
self._longitude = longitude | Setter for longitude. |
def get_invocation_command_nodefault(
toolset, tool, user_provided_command=[], additional_paths=[], path_last=False):
assert isinstance(toolset, basestring)
assert isinstance(tool, basestring)
assert is_iterable_typed(user_provided_command, basestring)
assert is_iterable_typed(additional_paths, base... | A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns t... |
def setDefaultColorRamp(self, colorRampEnum=ColorRampEnum.COLOR_RAMP_HUE):
self._colorRamp = ColorRampGenerator.generateDefaultColorRamp(colorRampEnum) | Returns the color ramp as a list of RGB tuples |
def unshare_project(project_id, usernames,**kwargs):
user_id = kwargs.get('user_id')
proj_i = _get_project(project_id)
proj_i.check_share_permission(user_id)
for username in usernames:
user_i = _get_user(username)
proj_i.unset_owner(user_i.id, write=write, share=share)
db.DBSession.f... | Un-share a project with a list of users, identified by their usernames. |
def close_multicast_socket(sock, address):
if sock is None:
return
if address:
mreq = make_mreq(sock.family, address)
if sock.family == socket.AF_INET:
sock.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, mreq)
elif sock.family == socket.AF_INET6:
... | Cleans up the given multicast socket.
Unregisters it of the multicast group.
Parameters should be the result of create_multicast_socket
:param sock: A multicast socket
:param address: The multicast address used by the socket |
def get_source_by_name(self, name):
srcs = self.get_sources_by_name(name)
if len(srcs) == 1:
return srcs[0]
elif len(srcs) == 0:
raise Exception('No source matching name: ' + name)
elif len(srcs) > 1:
raise Exception('Multiple sources matching name: ' ... | Return a single source in the ROI with the given name. The
input name string can match any of the strings in the names
property of the source object. Case and whitespace are
ignored when matching name strings. If no sources are found
or multiple sources then an exception is thrown.
... |
def flush(self):
if self.triggered and len(self.buffer) > 0:
text = []
for record in self.buffer:
terminator = getattr(record, 'terminator', '\n')
s = self.format(record)
if terminator is not None:
text.append(s + termin... | Send messages by e-mail.
The sending of messages is suppressed if a trigger severity
level has been set and none of the received messages was at
that level or above. In that case the messages are
discarded. Empty e-mails are discarded. |
def reset_default_props(**kwargs):
global _DEFAULT_PROPS
pcycle = plt.rcParams['axes.prop_cycle']
_DEFAULT_PROPS = {
'color': itertools.cycle(_get_standard_colors(**kwargs))
if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]),
'marker': itertools.cycle(['o', 'x', '.... | Reset properties to initial cycle point |
def reset(self):
for key in list(self.keys()): self.iterators[key] = _itertools.cycle(self[key])
return self | Resets the style cycle. |
def get_object(self, view_kwargs, qs=None):
self.before_get_object(view_kwargs)
id_field = getattr(self, 'id_field', inspect(self.model).primary_key[0].key)
try:
filter_field = getattr(self.model, id_field)
except Exception:
raise Exception("{} has no attribute {}... | Retrieve an object through sqlalchemy
:params dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy |
def stop_workers(self, _join_arbiter=True):
self._must_stop.set()
self._workers.stop()
self._result_notifier.join()
self._broker.stop()
if _join_arbiter:
self._arbiter.join()
self._reset() | Stop the workers and wait for them to terminate. |
def resume_trial(self, trial):
assert trial.status == Trial.PAUSED, trial.status
self.start_trial(trial) | Resumes PAUSED trials. This is a blocking call. |
def RemoveEventHandler(self, wb):
from UcsBase import WriteUcsWarning
if wb in self._wbs:
self._remove_watch_block(wb)
else:
WriteUcsWarning("Event handler not found") | Removes an event handler. |
async def shutdown(self):
if self.log_output:
logging.info('Shutting down ...')
else:
print('Shutting down ...')
await self.send_reset()
try:
self.loop.stop()
except:
pass
try:
self.loop.close()
except:
... | This method attempts an orderly shutdown
If any exceptions are thrown, just ignore them.
:returns: No return value |
def clone_repo(self):
tempdir_path = tempfile.mkdtemp()
if self.args.git:
self.log.debug('Cloning git source repository from %s to %s',
self.source, tempdir_path)
self.sh('git clone', self.source, tempdir_path)
else:
raise NotImpleme... | Clone a repository containing the dotfiles source. |
def create_pipe(backend_p):
return Zsock(lib.zsys_create_pipe(byref(zsock_p.from_param(backend_p))), False) | Create a pipe, which consists of two PAIR sockets connected over inproc.
The pipe is configured to use the zsys_pipehwm setting. Returns the
frontend socket successful, NULL if failed. |
def config():
cfg = ConfigParser()
cfg.read(os.path.join(os.path.dirname(os.path.realpath(ips_vagrant.__file__)), 'config/ipsv.conf'))
return cfg | Load system configuration
@rtype: ConfigParser |
async def sound(dev: Device, target, value):
if target and value:
click.echo("Setting %s to %s" % (target, value))
click.echo(await dev.set_sound_settings(target, value))
print_settings(await dev.get_sound_settings()) | Get or set sound settings. |
def Gaussian_filter(x, sigma, norm=True):
r
x = check_float(x)
sigma = check_float(sigma)
val = np.exp(-0.5 * (x / sigma) ** 2)
if norm:
return val / (np.sqrt(2 * np.pi) * sigma)
else:
return val | r"""Gaussian filter
This method implements a Gaussian filter.
Parameters
----------
x : float
Input data point
sigma : float
Standard deviation (filter scale)
norm : bool
Option to return normalised data. Default (norm=True)
Returns
-------
float Gaussian f... |
def sample(self, bqm, beta_range=None, num_reads=10, num_sweeps=1000):
if not isinstance(num_reads, int):
raise TypeError("'samples' should be a positive integer")
if num_reads < 1:
raise ValueError("'samples' should be a positive integer")
h, J, offset = bqm.to_ising()
... | Sample from low-energy spin states using simulated annealing.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
beta_range (tuple, optional): Beginning and end of the beta schedule
(beta is the inverse temperature) as a... |
def getPWMFrequency(self, device=DEFAULT_DEVICE_ID, message=True):
return self._getPWMFrequency(device, message) | Get the motor shutdown on error status stored on the hardware device.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
message : `boo... |
def parse_pdb_ligand_info(self, pdb_ligand_info):
mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL)
for m in mtchs:
if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1:
ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOT... | This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type. |
def edit_imagefindpars():
teal.teal(imagefindpars.__taskname__, returnAs=None,
autoClose=True, loadOnly=False, canExecute=False) | Allows the user to edit the imagefindpars configObj in a TEAL GUI |
def count(self, path):
try:
res = self.get_bite().count(self.list_path(path)).next()
dir_count = res['directoryCount']
file_count = res['fileCount']
content_size = res['spaceConsumed']
except StopIteration:
dir_count = file_count = content_size... | Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys |
def _boosted_value(name, action, key, value, boost):
if boost is not None:
value_key = 'query' if action in MATCH_ACTIONS else 'value'
return {name: {'boost': boost, value_key: value}}
return {name: value} | Boost a value if we should in _process_queries |
def search_file(search_root, search_filename,
instance_relative_root=False):
if instance_relative_root:
search_root = os.path.join(current_app.instance_path, search_root)
file_path = None
file_ext = None
for file in os.listdir(search_root):
fil... | Search for a filename in a specific search root dir.
:param search_root: root dir to search
:param search_filename: filename to search (no extension)
:param instance_relative_root: search root is relative to instance path
:return: tuple(full_file_path, extension without heading dot) |
def map_tree(visitor, tree):
newn = [map_tree(visitor, node) for node in tree.nodes]
return visitor(tree, newn) | Apply function to nodes |
def get_initial(self, form, name):
if hasattr(form, 'initial'):
return form.initial.get(name, None)
return None | Get the initial data that got passed into the superform for this
composite field. It should return ``None`` if no initial values where
given. |
def tidy(fnames):
for fname in fnames:
try:
node = nrml.read(fname)
except ValueError as err:
print(err)
return
with open(fname + '.bak', 'wb') as f:
f.write(open(fname, 'rb').read())
with open(fname, 'wb') as f:
nrml.write(... | Reformat a NRML file in a canonical form. That also means reducing the
precision of the floats to a standard value. If the file is invalid,
a clear error message is shown. |
def children(self):
children = []
child_nodes = getattr(self.parsed_response, 'Children')
for child in getattr(child_nodes, 'BrowseNode', []):
children.append(AmazonBrowseNode(child))
return children | This browse node's children in the browse node tree.
:return:
A list of this browse node's children in the browse node tree. |
def add_pr_curve(self, tag, labels, predictions, num_thresholds,
global_step=None, weights=None):
if num_thresholds < 2:
raise ValueError('num_thresholds must be >= 2')
labels = _make_numpy_array(labels)
predictions = _make_numpy_array(predictions)
self._... | Adds precision-recall curve.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet ... |
def get_folders(self):
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/'
r = requests.get(endpoint, headers=self._headers)
if check_response(r):
return Folder._json_to_folders(self, r.json()) | Returns a list of all folders for this account
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`] |
def error(message, code=1):
if message:
print('ERROR: {0}'.format(message), file=sys.stderr)
else:
print(file=sys.stderr)
sys.exit(code) | Prints an error message to stderr and exits with a status of 1 by default. |
def open(self, page, parms=None, payload=None, HTTPrequest=None ):
response = self.open_raw( page, parms, payload, HTTPrequest )
return response.read() | Opens a page from the server with optional content. Returns the string response. |
def default_preference_list(self, prefs):
prefs = _check_preferences(prefs)
if prefs is not None:
self._prefs = prefs | Set the default preference list.
:param str prefs: A string containing the default preferences for
ciphers, digests, and compression algorithms. |
def createStaticLibBuilder(env):
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.get('RANLIB',False) or env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMS... | This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one. |
def raise_for_missing_namespace(self, line: str, position: int, namespace: str, name: str) -> None:
if not self.has_namespace(namespace):
raise UndefinedNamespaceWarning(self.get_line_number(), line, position, namespace, name) | Raise an exception if the namespace is not defined. |
def is_empty(value, msg=None, except_=None, inc_zeros=True):
if hasattr(value, 'empty'):
value = not bool(value.empty)
elif inc_zeros and value in ZEROS:
value = True
else:
pass
_is_null = is_null(value, except_=False)
result = bool(_is_null or not value)
if except_:
... | is defined, but null or empty like value |
def update(self, other):
if not isinstance(other, CtsTextgroupMetadata):
raise TypeError("Cannot add %s to CtsTextgroupMetadata" % type(other))
elif str(self.urn) != str(other.urn):
raise InvalidURN("Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s " % (self.urn, oth... | Merge two Textgroup Objects.
- Original (left Object) keeps his parent.
- Added document merges with work if it already exists
:param other: Textgroup object
:type other: CtsTextgroupMetadata
:return: Textgroup Object
:rtype: CtsTextgroupMetadata |
def discover() -> List[Tuple[str, str]]:
if IS_ROBOT and os.path.isdir('/dev/modules'):
devices = os.listdir('/dev/modules')
else:
devices = []
discovered_modules = []
module_port_regex = re.compile('|'.join(MODULE_TYPES.keys()), re.I)
for port in devices:
match = module_port... | Scan for connected modules and instantiate handler classes |
def run_pipeline(pipeline,
context,
pipeline_context_input=None,
parse_input=True):
logger.debug("starting")
try:
if parse_input:
logger.debug("executing context_parser")
prepare_context(pipeline=pipeline,
... | Run the specified pypyr pipeline.
This function runs the actual pipeline. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
Pipeline and context should be already loaded.
Args:
... |
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs):
plt = pretty_plot(**kwargs)
pp = np.polyfit(x, y, deg)
xp = np.linspace(min(x), max(x), 200)
plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o')
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
... | Convenience method to plot data with trend lines based on polynomial fit.
Args:
x: Sequence of x data.
y: Sequence of y data.
deg (int): Degree of polynomial. Defaults to 1.
xlabel (str): Label for x-axis.
ylabel (str): Label for y-axis.
\\*\\*kwargs: Keyword args pa... |
def DEFAULT_NULLVALUE(test):
return False if isinstance(test,bool) \
else 0 if isinstance(test,int) \
else 0.0 if isinstance(test,float) \
else '' | Returns a null value for each of various kinds of test values.
**Parameters**
**test** : bool, int, float or string
Value to test.
**Returns**
**null** : element in `[False, 0, 0.0, '']`
Null value corresponding to the given test value:
... |
def fit(self, sequences, y=None):
super(BACE, self).fit(sequences, y=y)
if self.n_macrostates is not None:
self._do_lumping()
else:
raise RuntimeError('n_macrostates must not be None to fit')
return self | Fit a BACE lumping model using a sequence of cluster assignments.
Parameters
----------
sequences : list(np.ndarray(dtype='int'))
List of arrays of cluster assignments
y : None
Unused, present for sklearn compatibility only.
Returns
-------
... |
def _handle_join_dags(self, request):
if request.payload['names'] is None:
send_response = len(self._dags_running) <= 1
else:
send_response = all([name not in self._dags_running.keys()
for name in request.payload['names']])
if send_respons... | The handler for the join_dags request.
If dag names are given in the payload only return a valid Response if none of
the dags specified by the names are running anymore. If no dag names are given,
wait for all dags except one, which by design is the one that issued the request,
to be fi... |
def as_dict(self):
out = {}
for prop in self:
propval = getattr(self, prop)
if hasattr(propval, 'for_json'):
out[prop] = propval.for_json()
elif isinstance(propval, list):
out[prop] = [getattr(x, 'for_json', lambda:x)() for x in propval... | Return a dictionary containing the current values
of the object.
Returns:
(dict): The object represented as a dictionary |
def list_udfs(self, database=None, like=None):
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=False)
with self._execute(statement, results=True) as cur:
result = self._get_udfs(cur, udf.ImpalaUDF)
retu... | Lists all UDFs associated with given database
Parameters
----------
database : string
like : string for searching (optional) |
def get_query_cache_key(compiler):
sql, params = compiler.as_sql()
check_parameter_types(params)
cache_key = '%s:%s:%s' % (compiler.using, sql,
[text_type(p) for p in params])
return sha1(cache_key.encode('utf-8')).hexdigest() | Generates a cache key from a SQLCompiler.
This cache key is specific to the SQL query and its context
(which database is used). The same query in the same context
(= the same database) must generate the same cache key.
:arg compiler: A SQLCompiler that will generate the SQL query
:type compiler: ... |
def extract_feature_dependent_feature(self, extractor, force_extraction=False, verbose=0, add_args=None,
custom_name=None):
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return extract_feature_dependent_fea... | Extracts a feature which may be dependent on other features and stores it in the database
Parameters
----------
extractor : function, which takes the path of a data point, a dictionary of all other features and *args as
parameters and returns a feature
force_extraction : boolean... |
def empty_bar_plot(ax):
plt.sca(ax)
plt.setp(plt.gca(),xticks=[],xticklabels=[])
return ax | Delete all axis ticks and labels |
def register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags):
try:
file_handle = self.get_handle(filepath)
raise KeyError("File %s already exists in archive" % filepath)
except KeyError:
pass
localpath = self._get_localpath(f... | Register a file in the archive.
If the file already exists, this raises a `KeyError`
Parameters
----------
filepath : str
The path to the file
creatror : int
A unique key for the job that created this file
status : `FileStatus`
Enu... |
def describe(self):
return {
"name": self.name,
"params": self.params,
"returns": self.returns,
"description": self.description,
} | Describes the method.
:return: Description
:rtype: dict[str, object] |
def calc(self, x:Image, *args:Any, **kwargs:Any)->Image:
"Apply to image `x`, wrapping it if necessary."
if self._wrap: return getattr(x, self._wrap)(self.func, *args, **kwargs)
else: return self.func(x, *args, **kwargs) | Apply to image `x`, wrapping it if necessary. |
def _verify_credentials(self):
r = requests.get(self.apiurl + "account/verify_credentials.xml",
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 200:
raise UserLoginFailed("Username or Password inco... | An internal method that verifies the credentials given at instantiation.
:raises: :class:`Pymoe.errors.UserLoginFailed` |
def ConnectNoSSL(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None, thumbprint=None,
b64token=None, mechanism='userpass'):
if hasattr(ssl, '_create_unverified_cont... | Provides a standard method for connecting to a specified server without SSL
verification. Useful when connecting to servers with self-signed certificates
or when you wish to ignore SSL altogether. Will attempt to create an unverified
SSL context and then connect via the Connect method. |
def load_and_print_resfile(filename, info_dict=None):
if info_dict is None:
info_dict = dict()
info_dict["mass"] = 1.23
info_dict["nom_cap"] = 3600
info_dict["tot_mass"] = 2.33
d = CellpyData()
print("filename:", filename)
print("info_dict in:", end=' ')
print(info_di... | Load a raw data file and print information.
Args:
filename (str): name of the resfile.
info_dict (dict):
Returns:
info (str): string describing something. |
def is_templatetags_module_valid_constant(node):
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True | Suppress warnings for valid constants in templatetags module. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.