code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def covered_interval(bin):
if bin < 0 or bin > MAX_BIN:
raise OutOfRangeError(
'Invalid bin number %d (maximum bin number is %d)'
% (bin, MAX_BIN))
shift = SHIFT_FIRST
for offset in BIN_OFFSETS:
if offset <= bin:
return bin - offset << shift, bin + 1 - offset << shift
shift += SHIFT_NEXT | Given a bin number `bin`, return the interval covered by this bin.
:arg int bin: Bin number.
:return: Tuple of `start, stop` being the zero-based, open-ended interval
covered by `bin`.
:rtype: tuple(int)
:raise OutOfRangeError: If bin number `bin` exceeds the maximum bin
number. |
def MP(candidate, references, n):
counts = Counter(ngrams(candidate, n))
if not counts:
return 0
max_counts = {}
for reference in references:
reference_counts = Counter(ngrams(reference, n))
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())
return sum(clipped_counts.values()) / sum(counts.values()) | calculate modified precision |
def _get_managed_files(self):
if self.grains_core.os_data().get('os_family') == 'Debian':
return self.__get_managed_files_dpkg()
elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']:
return self.__get_managed_files_rpm()
return list(), list(), list() | Build a in-memory data of all managed files. |
def get_all_units(self, params=None):
if not params:
params = {}
return self._iterate_through_pages(self.get_units_per_page, resource=UNITS, **{'params': params}) | Get all units
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list |
def _define_jco_args(cmd_parser):
jo_group = cmd_parser.add_argument_group('Job options', 'Job configuration options')
jo_group.add_argument('--job-name', help='Job name')
jo_group.add_argument('--preload', action='store_true', help='Preload job onto all resources in the instance')
jo_group.add_argument('--trace', choices=['error', 'warn', 'info', 'debug', 'trace'], help='Application trace level')
jo_group.add_argument('--submission-parameters', '-p', nargs='+', action=_SubmitParamArg, help="Submission parameters as name=value pairs")
jo_group.add_argument('--job-config-overlays', help="Path to file containing job configuration overlays JSON. Overrides any job configuration set by the application." , metavar='file')
return jo_group, | Define job configuration arguments.
Returns groups defined, currently one. |
def _process_docstrings(self, doc, members, add=True):
if ((doc.doctype == "member" or doc.doctype == "local") and
doc.pointsto is not None and
doc.pointsto in members):
if add:
members[doc.pointsto].docstring.append(doc)
else:
members[doc.pointsto].overwrite_docs(doc)
return True
else:
return False | Adds the docstrings from the list of DocElements to their
respective members.
Returns true if the doc element belonged to a member. |
def set_set_point(self, param):
if self.temperature_low_limit <= param <= self.temperature_high_limit:
self.set_point_temperature = param
return "" | Sets the target temperature.
:param param: The new temperature in C. Must be positive.
:return: Empty string. |
def check_path(path, credentials=None):
from thunder.readers import get_file_reader
reader = get_file_reader(path)(credentials=credentials)
existing = reader.list(path, directories=True)
if existing:
raise ValueError('Path %s appears to already exist. Specify a new directory, '
'or call with overwrite=True to overwrite.' % path) | Check that specified output path does not already exist
The ValueError message will suggest calling with overwrite=True;
this function is expected to be called from the various output methods
that accept an 'overwrite' keyword argument. |
def get_os_filename (path):
if os.name == 'nt':
path = prepare_urlpath_for_nt(path)
res = urllib.url2pathname(fileutil.pathencode(path))
if os.name == 'nt' and res.endswith(':') and len(res) == 2:
res += os.sep
return res | Return filesystem path for given URL path. |
def metadata(self):
if self.__metadata__ is None:
metadata = {}
for test_run in self.test_runs.defer(None).all():
for key, value in test_run.metadata.items():
metadata.setdefault(key, [])
if value not in metadata[key]:
metadata[key].append(value)
for key in metadata.keys():
if len(metadata[key]) == 1:
metadata[key] = metadata[key][0]
else:
metadata[key] = sorted(metadata[key], key=str)
self.__metadata__ = metadata
return self.__metadata__ | The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values. |
def linear_trend(x, param):
linReg = linregress(range(len(x)), x)
return [("attr_\"{}\"".format(config["attr"]), getattr(linReg, config["attr"]))
for config in param] | Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: pandas.Series |
def _process_response(self, response: Response):
_logger.debug('Handling response')
self._redirect_tracker.load(response)
if self._redirect_tracker.is_redirect():
self._process_redirect()
self._loop_type = LoopType.redirect
elif response.status_code == http.client.UNAUTHORIZED and self._next_request.password:
self._process_authentication(response)
else:
self._next_request = None
self._loop_type = LoopType.normal
if self._cookie_jar:
self._extract_cookies(response)
if self._next_request:
self._add_cookies(self._next_request) | Handle the response and update the internal state. |
def detect_nexson_version(blob):
n = get_nexml_el(blob)
assert isinstance(n, dict)
return n.get('@nexml2json', BADGER_FISH_NEXSON_VERSION) | Returns the nexml2json attribute or the default code for badgerfish |
def saveToFile(self,imageObjectList):
virtual = imageObjectList[0].inmemory
for key in self.masklist.keys():
filename = self.masknames[key]
newHDU = fits.PrimaryHDU()
newHDU.data = self.masklist[key]
if virtual:
for img in imageObjectList:
img.saveVirtualOutputs({filename:newHDU})
else:
try:
newHDU.writeto(filename, overwrite=True)
log.info("Saving static mask to disk: %s" % filename)
except IOError:
log.error("Problem saving static mask file: %s to "
"disk!\n" % filename)
raise IOError | Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image. |
def add_all_from_dict(self, dictionary, **kwargs):
for name, procedure in dictionary.items():
self.add(name, procedure, **kwargs) | Batch-add function implementations to the library.
:param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add()
:param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class |
def ReadCronJobs(self, cronjob_ids=None, cursor=None):
query = ("SELECT job, UNIX_TIMESTAMP(create_time), enabled, "
"forced_run_requested, last_run_status, "
"UNIX_TIMESTAMP(last_run_time), current_run_id, state, "
"UNIX_TIMESTAMP(leased_until), leased_by "
"FROM cron_jobs")
if cronjob_ids is None:
cursor.execute(query)
return [self._CronJobFromRow(row) for row in cursor.fetchall()]
query += " WHERE job_id IN (%s)" % ", ".join(["%s"] * len(cronjob_ids))
cursor.execute(query, cronjob_ids)
res = []
for row in cursor.fetchall():
res.append(self._CronJobFromRow(row))
if len(res) != len(cronjob_ids):
missing = set(cronjob_ids) - set([c.cron_job_id for c in res])
raise db.UnknownCronJobError("CronJob(s) with id(s) %s not found." %
missing)
return res | Reads all cronjobs from the database. |
def apply(self, df):
if hasattr(self.definition, '__call__'):
r = self.definition(df)
elif self.definition in df.columns:
r = df[self.definition]
elif not isinstance(self.definition, string_types):
r = pd.Series(self.definition, index=df.index)
else:
raise ValueError("Invalid column definition: %s" % str(self.definition))
return r.astype(self.astype) if self.astype else r | Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`. |
def stop_deployment(awsclient, deployment_id):
log.info('Deployment: %s - stopping active deployment.', deployment_id)
client_codedeploy = awsclient.get_client('codedeploy')
response = client_codedeploy.stop_deployment(
deploymentId=deployment_id,
autoRollbackEnabled=True
) | stop tenkai deployment.
:param awsclient:
:param deployment_id: |
def rows(self):
for line in self.text.splitlines():
yield tuple(self.getcells(line)) | Returns an iterator of row tuples. |
def image(self):
if self.bands == 1:
return self.data.squeeze()
elif self.bands == 3:
return numpy.dstack(self.data) | An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations. |
def _get_matching_dns_entry_ids(self, identifier=None, rtype=None,
name=None, content=None):
record_ids = []
if not identifier:
records = self._list_records(rtype, name, content)
record_ids = [record['id'] for record in records]
else:
record_ids.append(identifier)
return record_ids | Return a list of DNS entries that match the given criteria. |
def is_namedtuple(type_: Type[Any]) -> bool:
return _issubclass(type_, tuple) and hasattr(type_, '_field_types') and hasattr(type_, '_fields') | Generated with typing.NamedTuple |
def get_pan_rect(self):
wd, ht = self.get_window_size()
win_pts = np.asarray([(0, 0), (wd, 0), (wd, ht), (0, ht)])
arr_pts = self.tform['data_to_window'].from_(win_pts)
return arr_pts | Get the coordinates in the actual data corresponding to the
area shown in the display for the current zoom level and pan.
Returns
-------
points : list
Coordinates in the form of
``[(x0, y0), (x1, y1), (x2, y2), (x3, y3)]``
from lower-left to lower-right. |
def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv:
envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)])
if self.frame_history is not None:
envs = VecFrameStack(envs, self.frame_history)
return envs | Create vectorized environments |
def do_version():
v = ApiPool.ping.model.Version(
name=ApiPool().current_server_name,
version=ApiPool().current_server_api.get_version(),
container=get_container_version(),
)
log.info("/version: " + pprint.pformat(v))
return v | Return version details of the running server api |
def load(store):
store = normalize_store_arg(store)
if contains_array(store, path=None):
return Array(store=store, path=None)[...]
elif contains_group(store, path=None):
grp = Group(store=store, path=None)
return LazyLoader(grp) | Load data from an array or group into memory.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
Returns
-------
out
If the store contains an array, out will be a numpy array. If the store contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save, savez
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested. |
def wait_send(self, timeout = None):
self._send_queue_cleared.clear()
self._send_queue_cleared.wait(timeout = timeout) | Wait until all queued messages are sent. |
def from_jd(jd):
jd += 0.5
z = trunc(jd)
a = z
b = a + 1524
c = trunc((b - 122.1) / 365.25)
d = trunc(365.25 * c)
e = trunc((b - d) / 30.6001)
if trunc(e < 14):
month = e - 1
else:
month = e - 13
if trunc(month > 2):
year = c - 4716
else:
year = c - 4715
day = b - d - trunc(30.6001 * e)
return (year, month, day) | Calculate Julian calendar date from Julian day |
def download_directory(self, bucket, key, directory, transfer_config=None, subscribers=None):
check_io_access(directory, os.W_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.RECEIVE) | download a directory using Aspera |
def ping():
try:
curl_couchdb('/cozy/')
ping = True
except requests.exceptions.ConnectionError, error:
print error
ping = False
return ping | Ping CozyDB with existing credentials |
def send_mass_user_template_mail(subject_template, body_template, users, context):
message_tuples = []
for user in users:
context['user'] = user
subject, body = render_mail_template(subject_template, body_template, context)
message_tuples.append((subject, body, conf.get('DEFAULT_FROM_EMAIL'), [user.email]))
if message_tuples:
send_mass_mail(message_tuples) | Similar to `send_mass_template_mail` this function renders the given templates
into email subjects and bodies.
The emails are send one-by-one. |
def _complete_cases(self, text, line, istart, iend):
if text == "":
return list(self.live.keys())
else:
return [c for c in self.live if c.startswith(text)] | Returns the completion list of possible test cases for the active unit test. |
def setup_remoteckan(self, remoteckan=None, **kwargs):
if remoteckan is None:
self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(),
**kwargs)
else:
self._remoteckan = remoteckan | Set up remote CKAN from provided CKAN or by creating from configuration
Args:
remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration.
Returns:
None |
def create_lbaas_l7rule(self, l7policy, body=None):
return self.post(self.lbaas_l7rules_path % l7policy, body=body) | Creates rule for a certain L7 policy. |
def scale(self, image, size, crop, options):
original_size = self.get_image_size(image)
factor = self._calculate_scaling_factor(original_size, size, crop is not None)
if factor < 1 or options['scale_up']:
width = int(original_size[0] * factor)
height = int(original_size[1] * factor)
image = self.engine_scale(image, width, height)
return image | Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up
option is set to True before calling ``engine_scale``.
:param image:
:param size:
:param crop:
:param options:
:return: |
def _get_expiration(self, headers: dict) -> int:
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta)) | Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires |
def get_hist(rfile, histname, get_overflow=False):
import root_numpy as rnp
rfile = open_rfile(rfile)
hist = rfile[histname]
xlims = np.array(list(hist.xedges()))
bin_values = rnp.hist2array(hist, include_overflow=get_overflow)
rfile.close()
return bin_values, xlims | Read a 1D Histogram. |
def _read_data(self):
while True:
try:
data = yield from self._socket.recv()
except asyncio.CancelledError:
break
except ConnectionClosed:
break
self._push_packet(data)
self._loop.call_soon(self.close) | Reads data from the connection and adds it to _push_packet,
until the connection is closed or the task in cancelled. |
def getTypedValueNoExceptions(self, row):
return wrapply(self.type, wrapply(self.getValue, row)) | Returns the properly-typed value for the given row at this column.
Returns the type's default value if either the getter or the type conversion fails. |
def assert_is_lat(val):
assert type(val) is float or type(val) is int, "Value must be a number"
if val < -90.0 or val > 90.0:
raise ValueError("Latitude value must be between -90 and 90") | Checks it the given value is a feasible decimal latitude
:param val: value to be checked
:type val: int of float
:returns: `None`
:raises: *ValueError* if value is out of latitude boundaries, *AssertionError* if type is wrong |
def set_widgets(self):
last_layer = self.parent.layer and self.parent.layer.id() or None
self.lblDescribeCanvasHazLayer.clear()
self.list_compatible_canvas_layers()
self.auto_select_one_item(self.lstCanvasHazLayers)
if last_layer:
layers = []
for index in range(self.lstCanvasHazLayers.count()):
item = self.lstCanvasHazLayers.item(index)
layers += [item.data(Qt.UserRole)]
if last_layer in layers:
self.lstCanvasHazLayers.setCurrentRow(layers.index(last_layer))
hazard = self.parent.step_fc_functions1.selected_value(
layer_purpose_hazard['key'])
icon_path = get_image_path(hazard)
self.lblIconIFCWHazardFromCanvas.setPixmap(QPixmap(icon_path)) | Set widgets on the Hazard Layer From TOC tab. |
def latexpdf():
rc = latex()
print('Running LaTeX files through pdflatex...')
builddir = os.path.join(BUILDDIR, 'latex')
subprocess.call(['make', '-C', builddir, 'all-pdf'])
print('pdflatex finished; the PDF files are in {}.'.format(builddir)) | make LaTeX files and run them through pdflatex |
def modInv(a, m):
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0 | returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1 |
def rgb_to_cmy(r, g=None, b=None):
if type(r) in [list,tuple]:
r, g, b = r
return (1-r, 1-g, 1-b) | Convert the color from RGB coordinates to CMY.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> rgb_to_cmy(1, 0.5, 0)
(0, 0.5, 1) |
def swaplevel(self, i=-2, j=-1, copy=True):
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index,
copy=copy).__finalize__(self) | Swap levels i and j in a MultiIndex.
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
Series
Series with levels swapped in MultiIndex.
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index. |
def easeOutBounce(n):
_checkRange(n)
if n < (1/2.75):
return 7.5625 * n * n
elif n < (2/2.75):
n -= (1.5/2.75)
return 7.5625 * n * n + 0.75
elif n < (2.5/2.75):
n -= (2.25/2.75)
return 7.5625 * n * n + 0.9375
else:
n -= (2.65/2.75)
return 7.5625 * n * n + 0.984375 | A bouncing tween function that hits the destination and then bounces to rest.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). |
def view_package_path(self, package: str) -> _PATH:
if package not in self.view_packgets_list():
raise NoSuchPackageException(
f'There is no such package {package!r}.')
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'pm', 'path', package)
return output[8:-1] | Print the path to the APK of the given. |
def _parse_state_file(state_file_path='terraform.tfstate'):
ret = {}
with salt.utils.files.fopen(state_file_path, 'r') as fh_:
tfstate = salt.utils.json.load(fh_)
modules = tfstate.get('modules')
if not modules:
log.error('Malformed tfstate file. No modules found')
return ret
for module in modules:
resources = module.get('resources', [])
for resource_name, resource in salt.ext.six.iteritems(resources):
roster_entry = None
if resource['type'] == 'salt_host':
roster_entry = _handle_salt_host_resource(resource)
if not roster_entry:
continue
minion_id = roster_entry.get(MINION_ID, resource.get('id'))
if not minion_id:
continue
if MINION_ID in roster_entry:
del roster_entry[MINION_ID]
_add_ssh_key(roster_entry)
ret[minion_id] = roster_entry
return ret | Parses the terraform state file passing different resource types to the right handler |
def check_if_numbers_are_consecutive(list_):
return all((True if second - first == 1 else False
for first, second in zip(list_[:-1], list_[1:]))) | Returns True if numbers in the list are consecutive
:param list_: list of integers
:return: Boolean |
def read_ascii_catalog(filename, format_, unit=None):
catalog = ascii.read(filename, format=format_)
columns = catalog.columns
if 'RA' in columns and 'Dec' in columns:
if unit is None:
unit = (hour, degree)
coords = SkyCoord(catalog['RA'],
catalog['Dec'],
unit=unit,
frame='icrs')
elif 'Lat' in columns and 'Lon' in columns:
if unit is None:
unit = (degree, degree)
coords = SkyCoord(catalog['Lon'],
catalog['Lat'],
unit=unit,
frame='galactic')
else:
raise Exception('columns RA,Dec or Lon,Lat not found')
return coords | Read an ASCII catalog file using Astropy.
This routine is used by pymoctool to load coordinates from a
catalog file in order to generate a MOC representation. |
def get_option_float(self, name, section=None, vars=None, expect=None):
val = self.get_option(name, section, vars, expect)
if val:
return float(val) | Just like ``get_option`` but parse as a float. |
def setdefault(self, key, default=None):
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release() | Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key. |
def _display_status(normalized_data, stream):
if 'Pull complete' in normalized_data['status'] or 'Download complete' in normalized_data['status']:
stream.write("\n")
if 'id' in normalized_data:
stream.write("%s - " % normalized_data['id'])
stream.write("{0}\n".format(normalized_data['status'])) | print status message from docker-py stream. |
def add_attachment(self, attachment):
self._attachments = self._ensure_append(attachment, self._attachments) | Add an attachment to this email
:param attachment: Add an attachment to this email
:type attachment: Attachment |
def copy_dir(self, path):
for directory in path:
if os.path.isdir(path):
full_path = os.path.join(self.archive_dir, directory.lstrip('/'))
logger.debug("Copying %s to %s", directory, full_path)
shutil.copytree(directory, full_path)
else:
logger.debug("Not a directory: %s", directory)
return path | Recursively copy directory |
def background_chart(chart, foreground, colors):
def convert_background(entry):
try:
attr = urwid.AttrSpec(foreground, entry, colors)
except urwid.AttrSpecError:
return None
if colors > 16 and attr.background_basic and \
attr.background_number >= 8:
entry = 'h%d'%attr.background_number
attr = urwid.AttrSpec(foreground, entry, colors)
return attr, entry
return parse_chart(chart, convert_background) | Create text markup for a background colour chart
chart -- palette chart as string
foreground -- colour to use for foreground of chart
colors -- number of colors (88 or 256)
This will remap 8 <= colour < 16 to high-colour versions
in the hopes of greater compatibility |
def _add_list_row(self, feature_list, key=None):
if len(feature_list) > len(self._column_name_list):
raise IndexError("Input list must have %s columns or less" % len(self._column_name_list))
self._row_memo = {}
if key is not None:
if key in self._row_name_idx:
self._rows[self._row_name_idx[key]] = feature_list
return
else:
self._row_name_idx[key] = len(self._rows)
self._row_name_list.append(key)
self._rows.append(feature_list) | Add a list row to the matrix
:param str key: key used when rows is a dict rather than an array
:param feature_list: a list of features in the same order as column_names
:raise IndexError: if the list doesnt match the expected number of columns |
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols):
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox
x1, y1, x2, y2 = crop_coords
cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1]
return normalize_bbox(cropped_bbox, crop_height, crop_width) | Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop. |
def ParseFileObject(self, parser_mediator, file_object):
filename = parser_mediator.GetFilename()
if not filename.startswith('INFO2'):
return
file_header_map = self._GetDataTypeMap('recycler_info2_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse Windows Recycler INFO2 file header with '
'error: {0!s}').format(exception))
if file_header.unknown1 != 5:
parser_mediator.ProduceExtractionWarning('unsupported format signature.')
return
file_entry_size = file_header.file_entry_size
if file_entry_size not in (280, 800):
parser_mediator.ProduceExtractionWarning(
'unsupported file entry size: {0:d}'.format(file_entry_size))
return
file_offset = file_object.get_offset()
file_size = file_object.get_size()
while file_offset < file_size:
self._ParseInfo2Record(
parser_mediator, file_object, file_offset, file_entry_size)
file_offset += file_entry_size | Parses a Windows Recycler INFO2 file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. |
def parse_event_xml(self, event_data) -> dict:
event = {}
event_xml = event_data.decode()
message = MESSAGE.search(event_xml)
if not message:
return {}
event[EVENT_OPERATION] = message.group(EVENT_OPERATION)
topic = TOPIC.search(event_xml)
if topic:
event[EVENT_TOPIC] = topic.group(EVENT_TOPIC)
source = SOURCE.search(event_xml)
if source:
event[EVENT_SOURCE] = source.group(EVENT_SOURCE)
event[EVENT_SOURCE_IDX] = source.group(EVENT_SOURCE_IDX)
data = DATA.search(event_xml)
if data:
event[EVENT_TYPE] = data.group(EVENT_TYPE)
event[EVENT_VALUE] = data.group(EVENT_VALUE)
_LOGGER.debug(event)
return event | Parse metadata xml. |
def _has_nested(self, relations, operator='>=', count=1, boolean='and', extra=None):
relations = relations.split('.')
def closure(q):
if len(relations) > 1:
q.where_has(relations.pop(0), closure)
else:
q.has(relations.pop(0), operator, count, boolean, extra)
return self.where_has(relations.pop(0), closure) | Add nested relationship count conditions to the query.
:param relations: nested relations
:type relations: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder |
def _send(self, prepared_request):
session = Session()
response = session.send(prepared_request)
return Response(response) | Send a PreparedRequest to the server.
Parameters
prepared_request (requests.PreparedRequest)
Returns
(Response)
A Response object, whichcontains a server's
response to an HTTP request. |
def set_permission(self, permission, value, parent=False, admin=False):
try:
if not getattr(self, 'parent_{}'.format(permission)) and not parent and not admin:
return False
level = 'parent' if parent else 'self'
setattr(self, '{}_{}'.format(level, permission), value)
if parent and not value:
setattr(self, 'self_{}'.format(permission), False)
self.save()
return True
except Exception as e:
logger.error("Error occurred setting permission {} to {}: {}".format(permission, value, e))
return False | Sets permission for personal information.
Returns False silently if unable to set permission.
Returns True if successful. |
def _NewMatchSection(self, val):
section = {"criterion": val, "config": {}}
self.matches.append(section)
self.section = section["config"]
self.processor = self._ParseMatchGrp | Create a new configuration section for each match clause.
Each match clause is added to the main config, and the criterion that will
trigger the match is recorded, as is the configuration.
Args:
val: The value following the 'match' keyword. |
def console_set_char_background(
con: tcod.console.Console,
x: int,
y: int,
col: Tuple[int, int, int],
flag: int = BKGND_SET,
) -> None:
lib.TCOD_console_set_char_background(_console(con), x, y, col, flag) | Change the background color of x,y to col using a blend mode.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
flag (int): Blending mode to use, defaults to BKGND_SET. |
def full_restapi_key_transformer(key, attr_desc, value):
keys = _FLATTEN.split(attr_desc['key'])
return ([_decode_attribute_map_key(k) for k in keys], value) | A key transformer that returns the full RestAPI key path.
:param str _: The attribute name
:param dict attr_desc: The attribute metadata
:param object value: The value
:returns: A list of keys using RestAPI syntax. |
def append(self, path, data, **kwargs):
metadata_response = self._post(
path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
data_response = self._requests_session.post(
metadata_response.headers['location'], data=data, **self._requests_kwargs)
_check_response(data_response)
assert not data_response.content | Append to the given file.
:param data: ``bytes`` or a ``file``-like object
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int |
def _format_value(self, value):
value, unit = self.py3.format_units(value, unit=self.unit, si=self.si_units)
return self.py3.safe_format(self.format_value, {"value": value, "unit": unit}) | Return formatted string |
def get(self, server):
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
'No matching credentials in {}'.format(self.program)
)
return result | Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised. |
def gravatar(self, size=20):
default = "mm"
gravatar_url = "//www.gravatar.com/avatar/" + hashlib.md5(self.email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d': default, 's': str(size)})
return gravatar_url | Construct a gravatar image address for the user |
def atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=0, timeout=None, peer_table={} ):
if timeout is None:
timeout = atlas_inv_timeout()
interval = 524288
peer_inv = ""
log.debug("Download zonefile inventory %s-%s from %s" % (bit_offset, maxlen, peer_hostport))
if bit_offset > maxlen:
return peer_inv
for offset in xrange( bit_offset, maxlen, interval):
next_inv = atlas_peer_get_zonefile_inventory_range( my_hostport, peer_hostport, offset, interval, timeout=timeout, peer_table=peer_table )
if next_inv is None:
log.debug("Failed to sync inventory for %s from %s to %s" % (peer_hostport, offset, offset+interval))
break
peer_inv += next_inv
if len(next_inv) < interval:
break
return peer_inv | Get the zonefile inventory from the remote peer
Start from the given bit_offset
NOTE: this doesn't update the peer table health by default;
you'll have to explicitly pass in a peer table (i.e. setting
to {} ensures that nothing happens). |
def add(self, child):
if isinstance(child, Include):
self.add_include(child)
elif isinstance(child, Dimension):
self.add_dimension(child)
elif isinstance(child, Unit):
self.add_unit(child)
elif isinstance(child, ComponentType):
self.add_component_type(child)
elif isinstance(child, Component):
self.add_component(child)
elif isinstance(child, FatComponent):
self.add_fat_component(child)
elif isinstance(child, Constant):
self.add_constant(child)
else:
raise ModelError('Unsupported child element') | Adds a typed child object to the model.
@param child: Child object to be added. |
def UnicodeFromCodePage(string):
codepage = ctypes.windll.kernel32.GetOEMCP()
try:
return string.decode("cp%s" % codepage)
except UnicodeError:
try:
return string.decode("utf16", "ignore")
except UnicodeError:
return string.decode("utf8", "ignore") | Attempt to coerce string into a unicode object. |
def compress(data, compresslevel=9):
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f:
f.write(data)
return buf.getvalue() | Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9. |
def _u_distance_covariance_sqr_naive(x, y, exponent=1):
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b) | Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm. |
def quote_names(db, names):
c = db.cursor()
c.execute("SELECT pg_catalog.quote_ident(n) FROM pg_catalog.unnest(%s::text[]) n", [list(names)])
return [name for (name,) in c] | psycopg2 doesn't know how to quote identifier names, so we ask the server |
def enableHook(self, msgObj):
self.killListIdx = len(qte_global.kill_list) - 2
self.qteMain.qtesigKeyseqComplete.connect(self.disableHook) | Enable yank-pop.
This method is connected to the 'yank-qtmacs_text_edit' hook
(triggered by the yank macro) to ensure that yank-pop only
gets activated afterwards. |
def get_default_library_patters():
python_version = platform.python_version_tuple()
python_implementation = platform.python_implementation()
system = platform.system()
if python_implementation == "PyPy":
if python_version[0] == "2":
return ["*/lib-python/%s.%s/*" % python_version[:2], "*/site-packages/*"]
else:
return ["*/lib-python/%s/*" % python_version[0], "*/site-packages/*"]
else:
if system == "Windows":
return [r"*\lib\*"]
return ["*/lib/python%s.%s/*" % python_version[:2], "*/lib64/python%s.%s/*" % python_version[:2]] | Returns library paths depending on the used platform.
:return: a list of glob paths |
def gettext(message):
global _default
_default = _default or translation(DEFAULT_LANGUAGE)
translation_object = getattr(_active, 'value', _default)
result = translation_object.gettext(message)
return result | Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object. |
def filter_service_by_servicegroup_name(group):
def inner_filter(items):
service = items["service"]
if service is None:
return False
return group in [items["servicegroups"][g].servicegroup_name for g in service.servicegroups]
return inner_filter | Filter for service
Filter on group
:param group: group to filter
:type group: str
:return: Filter
:rtype: bool |
def p_next(self):
"Consume and return the next char"
try:
self.pos += 1
return self.input[self.pos - 1]
except IndexError:
self.pos -= 1
return None | Consume and return the next char |
def get_roles_by_type(resource_root, service_name, role_type,
cluster_name="default", view=None):
roles = get_all_roles(resource_root, service_name, cluster_name, view)
return [ r for r in roles if r.type == role_type ] | Get all roles of a certain type in a service
@param resource_root: The root Resource object.
@param service_name: Service name
@param role_type: Role type
@param cluster_name: Cluster name
@return: A list of ApiRole objects. |
def notify_observers(table, kind, primary_key=None):
if IN_MIGRATIONS:
return
if not Observer.objects.filter(dependencies__table=table).exists():
return
def handler():
try:
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_ORM_NOTIFY,
'table': table,
'kind': kind,
'primary_key': str(primary_key),
},
)
except ChannelFull:
logger.exception("Unable to notify workers.")
batcher = PrioritizedBatcher.global_instance()
if batcher.is_started:
batcher.add(
'rest_framework_reactive', handler, group_by=(table, kind, primary_key)
)
else:
handler() | Transmit ORM table change notification.
:param table: Name of the table that has changed
:param kind: Change type
:param primary_key: Primary key of the affected instance |
def convert_2_utc(self, datetime_, timezone):
datetime_ = self.tz_mapper[timezone].localize(datetime_)
return datetime_.astimezone(pytz.UTC) | convert to datetime to UTC offset. |
def _main_loop(self):
self.logger.debug("Running main loop")
old_time = 0
while True:
for plugin_key in self.plugins_dict:
obj = self.plugins_dict[plugin_key]
self._process_plugin(obj)
if self.settings['STATS_DUMP'] != 0:
new_time = int(old_div(time.time(), self.settings['STATS_DUMP']))
if new_time != old_time:
self._dump_stats()
if self.settings['STATS_DUMP_CRAWL']:
self._dump_crawl_stats()
if self.settings['STATS_DUMP_QUEUE']:
self._dump_queue_stats()
old_time = new_time
self._report_self()
time.sleep(self.settings['SLEEP_TIME']) | The internal while true main loop for the redis monitor |
def appendChild(self, param):
Symbol.appendChild(self, param)
if param.offset is None:
param.offset = self.size
self.size += param.size | Overrides base class. |
def marker(self):
if not self._marker:
assert markers, 'Package packaging is needed for environment markers'
self._marker = markers.Marker(self.raw)
return self._marker | Return environment marker. |
def render_pdf_file_to_image_files_pdftoppm_pgm(pdf_file_name, root_output_file_path,
res_x=150, res_y=150):
comm_output = render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name,
root_output_file_path, res_x, res_y, ["-gray"])
return comm_output | Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm. |
def writePlist(dataObject, filepath):
plistData, error = (
NSPropertyListSerialization.
dataFromPropertyList_format_errorDescription_(
dataObject, NSPropertyListXMLFormat_v1_0, None))
if plistData is None:
if error:
error = error.encode('ascii', 'ignore')
else:
error = "Unknown error"
raise NSPropertyListSerializationException(error)
else:
if plistData.writeToFile_atomically_(filepath, True):
return
else:
raise NSPropertyListWriteException(
"Failed to write plist data to %s" % filepath) | Write 'rootObject' as a plist to filepath. |
def calculate_weighted_avg(bonds):
minimum_bond = min(bonds)
weighted_sum = 0.0
total_sum = 0.0
for entry in bonds:
weighted_sum += entry * exp(1 - (entry / minimum_bond) ** 6)
total_sum += exp(1 - (entry / minimum_bond) ** 6)
return weighted_sum / total_sum | Returns the weighted average bond length given by
Hoppe's effective coordination number formula.
Args:
bonds (list): list of floats that are the
bond distances between a cation and its
peripheral ions |
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8,
fhigh=1e-5, gwAmp=1e-20, alpha=-0.66,
logspacing=True, dipoleamps=None,
dipoledir=None, dipolemag=None):
gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing,
dipoleamps, dipoledir, dipolemag)
gwb.add_gwb(psr,dist)
return gwb | Add a stochastic background from inspiraling binaries distributed
according to a pure dipole distribution, using the tempo2
code that underlies the GWdipolebkgrd plugin.
The basic use is identical to that of 'add_gwb':
Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries,
'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator,
'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha'
determine its amplitude and exponent, and setting 'logspacing' to False
will use linear spacing for the individual sources.
Additionally, the dipole component can be specified by using one of two
methods:
1) Specify the dipole direction as three dipole amplitudes, in the vector
dipoleamps
2) Specify the direction of the dipole as a magnitude dipolemag, and a vector
dipoledir=[dipolephi, dipoletheta]
It is also possible to create a background object with
gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing)
then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a
consistent background for multiple pulsars.
Returns the GWB object |
def parse_cobol(lines):
output = []
intify = ["level", "occurs"]
for row in lines:
match = CobolPatterns.row_pattern.match(row.strip())
if not match:
_logger().warning("Found unmatched row %s" % row.strip())
continue
match = match.groupdict()
for i in intify:
match[i] = int(match[i]) if match[i] is not None else None
if match['pic'] is not None:
match['pic_info'] = parse_pic_string(match['pic'])
output.append(match)
return output | Parses the COBOL
- converts the COBOL line into a dictionary containing the information
- parses the pic information into type, length, precision
- ~~handles redefines~~ -> our implementation does not do that anymore
because we want to display item that was redefined. |
def install_monitor(self, mon):
assert self.binded
for module in self._modules:
module.install_monitor(mon) | Installs monitor on all executors. |
def read_tabular(filepath):
_, fn, ext = splitext2(filepath)
if ext == '.h5':
return _read_tabular_h5(filepath)
elif ext == '.pkl':
return _read_tabular_pickle(filepath)
else:
raise NotImplementedError | Read tabular object in HDF5 or pickle format
Args:
filepath (path-like): path to read to; must end in '.h5' or '.pkl' |
def calculate_svd(data):
if (not isinstance(data, np.ndarray)) or (data.ndim != 2):
raise TypeError('Input data must be a 2D np.ndarray.')
return svd(data, check_finite=False, lapack_driver='gesvd',
full_matrices=False) | Calculate Singular Value Decomposition
This method calculates the Singular Value Decomposition (SVD) of the input
data using SciPy.
Parameters
----------
data : np.ndarray
Input data array, 2D matrix
Returns
-------
tuple of left singular vector, singular values and right singular vector
Raises
------
TypeError
For invalid data type |
def _branch_name(cls, version):
suffix = version.public[len(version.base_version):]
components = version.base_version.split('.') + [suffix]
if suffix == '' or suffix.startswith('rc'):
return '{}.{}.x'.format(*components[:2])
elif suffix.startswith('.dev'):
return 'master'
else:
raise ValueError('Unparseable pants version number: {}'.format(version)) | Defines a mapping between versions and branches.
In particular, `-dev` suffixed releases always live on master. Any other (modern) release
lives in a branch. |
def set_ip_port(self, ip, port):
self.address = ip
self.port = port
self.stop()
self.start() | set ip and port |
def get_lock_request(name, version, patch_lock, weak=True):
ch = '~' if weak else ''
if patch_lock == PatchLock.lock:
s = "%s%s==%s" % (ch, name, str(version))
return PackageRequest(s)
elif (patch_lock == PatchLock.no_lock) or (not version):
return None
version_ = version.trim(patch_lock.rank)
s = "%s%s-%s" % (ch, name, str(version_))
return PackageRequest(s) | Given a package and patch lock, return the equivalent request.
For example, for object 'foo-1.2.1' and lock type 'lock_3', the equivalent
request is '~foo-1.2'. This restricts updates to foo to patch-or-lower
version changes only.
For objects not versioned down to a given lock level, the closest possible
lock is applied. So 'lock_3' applied to 'foo-1' would give '~foo-1'.
Args:
name (str): Package name.
version (Version): Package version.
patch_lock (PatchLock): Lock type to apply.
Returns:
`PackageRequest` object, or None if there is no equivalent request. |
def get_meta_attributes(self, **kwargs):
superuser = kwargs.get('superuser', False)
if (self.untl_object.qualifier == 'recordStatus'
or self.untl_object.qualifier == 'system'):
if superuser:
self.editable = True
self.repeatable = True
else:
self.editable = False
self.view_type = 'qualified-input'
elif self.untl_object.qualifier == 'hidden':
self.label = 'Object Hidden'
self.view_type = 'radio'
else:
self.editable = False
self.view_type = 'qualified-input' | Determine the form attributes for the meta field. |
def terminal_title(self):
result = self.application.get_title()
assert result is None or isinstance(result, six.text_type)
return result | Return the current title to be displayed in the terminal.
When this in `None`, the terminal title remains the original. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.