code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def get_image(self, image_id, **kwargs):
if 'mask' not in kwargs:
kwargs['mask'] = IMAGE_MASK
return self.vgbdtg.getObject(id=image_id, **kwargs) | Get details about an image.
:param int image: The ID of the image.
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.) |
def remove_profile(name, s3=False):
user = os.path.expanduser("~")
if s3:
f = os.path.join(user, S3_PROFILE_ID + name)
else:
f = os.path.join(user, DBPY_PROFILE_ID + name)
try:
try:
open(f)
except:
raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f))
os.remove(f)
except Exception as e:
raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e)) | Removes a profile from your config |
def name_for(obj):
if isinstance(obj, str):
return obj
cls = obj if isclass(obj) else obj.__class__
if hasattr(cls, "__alias__"):
return underscore(cls.__alias__)
else:
return underscore(cls.__name__) | Get a name for something.
Allows overriding of default names using the `__alias__` attribute. |
def _height_and_width(self):
try:
return self._cache['height_and_width']
except KeyError:
handw = self._cache['height_and_width'] = super(Terminal, self)._height_and_width()
return handw | Override for blessings.Terminal._height_and_width
Adds caching |
def WriteFileHash(self, path, hash_value):
string = '{0:s}\t{1:s}\n'.format(hash_value, path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string) | Writes the file path and hash to file.
Args:
path (str): path of the file.
hash_value (str): message digest hash calculated over the file data. |
def from_wif_hex(cls: Type[SigningKeyType], wif_hex: str) -> SigningKeyType:
wif_bytes = Base58Encoder.decode(wif_hex)
if len(wif_bytes) != 35:
raise Exception("Error: the size of WIF is invalid")
checksum_from_wif = wif_bytes[-2:]
fi = wif_bytes[0:1]
seed = wif_bytes[1:-2]
seed_fi = wif_bytes[0:-2]
if fi != b"\x01":
raise Exception("Error: bad format version, not WIF")
checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(seed_fi))[0:2]
if checksum_from_wif != checksum:
raise Exception("Error: bad checksum of the WIF")
return cls(seed) | Return SigningKey instance from Duniter WIF in hexadecimal format
:param wif_hex: WIF string in hexadecimal format |
def upgrade(self, using=None, **kwargs):
return self._get_connection(using).indices.upgrade(index=self._name, **kwargs) | Upgrade the index to the latest format.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.upgrade`` unchanged. |
def re_tab(s):
l = []
p = 0
for i in range(8, len(s), 8):
if s[i - 2:i] == " ":
l.append(s[p:i].rstrip() + "\t")
p = i
if p == 0:
return s
else:
l.append(s[p:])
return "".join(l) | Return a tabbed string from an expanded one. |
def git_tag2eups_tag(git_tag):
eups_tag = git_tag
if re.match(r'\d', eups_tag):
eups_tag = "v{eups_tag}".format(eups_tag=eups_tag)
eups_tag = eups_tag.translate(str.maketrans('.-', '__'))
return eups_tag | Convert git tag to an acceptable eups tag format
I.e., eups no likey semantic versioning markup, wants underscores
Parameters
----------
git_tag: str
literal git tag string
Returns
-------
eups_tag: string
A string suitable for use as an eups tag name |
def targets_by_file(self, targets):
targets_by_file = defaultdict(OrderedSet)
for target in targets:
for f in self.files_for_target(target):
targets_by_file[f].add(target)
return targets_by_file | Returns a map from abs path of source, class or jar file to an OrderedSet of targets.
The value is usually a singleton, because a source or class file belongs to a single target.
However a single jar may be provided (transitively or intransitively) by multiple JarLibrary
targets. But if there is a JarLibrary target that depends on a jar directly, then that
"canonical" target will be the first one in the list of targets. |
def evaluate_perceptron(ctx, model, corpus):
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) | Evaluate performance of Averaged Perceptron POS Tagger. |
def remove_file(filename, recursive=False, force=False):
import os
try:
mode = os.stat(filename)[0]
if mode & 0x4000 != 0:
if recursive:
for file in os.listdir(filename):
success = remove_file(filename + '/' + file, recursive, force)
if not success and not force:
return False
os.rmdir(filename)
else:
if not force:
return False
else:
os.remove(filename)
except:
if not force:
return False
return True | Removes a file or directory. |
def to_pixel(self, wcs, mode='all'):
pixel_params = self._to_pixel_params(wcs, mode=mode)
return EllipticalAperture(**pixel_params) | Convert the aperture to an `EllipticalAperture` object defined
in pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `EllipticalAperture` object
An `EllipticalAperture` object. |
def collect_video_streams(self):
rc = []
streams_by_id = {}
for t in self.all_tags_of_type(TagDefineVideoStream):
stream = [ t ]
streams_by_id[t.characterId] = stream
rc.append(stream)
for t in self.all_tags_of_type(TagVideoFrame):
assert t.streamId in streams_by_id
streams_by_id[t.streamId].append(t)
return rc | Return a list of video streams in this timeline and its children.
The streams are returned in order with respect to the timeline.
A stream is returned as a list: the first element is the tag
which introduced that stream; other elements are the tags
which made up the stream body (if any). |
def copy_and_update(dictionary, update):
newdict = dictionary.copy()
newdict.update(update)
return newdict | Returns an updated copy of the dictionary without modifying the original |
def cwd():
cwd = os.environ.get("BE_CWD")
if cwd and not os.path.isdir(cwd):
sys.stderr.write("ERROR: %s is not a directory" % cwd)
sys.exit(lib.USER_ERROR)
return cwd or os.getcwd().replace("\\", "/") | Return the be current working directory |
def get_aligned_adjacent_coords(x, y):
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)] | returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned. |
def disconnect(sid=None, namespace=None, silent=False):
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace) | Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated. |
def parse_exiobase1(path):
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
return io | Parse the exiobase1 raw data files.
This function works with
- pxp_ita_44_regions_coeff_txt
- ixi_fpa_44_regions_coeff_txt
- pxp_ita_44_regions_coeff_src_txt
- ixi_fpa_44_regions_coeff_src_txt
which can be found on www.exiobase.eu
The parser works with the compressed (zip) files as well as the unpacked
files.
Parameters
----------
path : pathlib.Path or string
Path of the exiobase 1 data
Returns
-------
pymrio.IOSystem with exio1 data |
def _generate_processed_key_name(process_to, upload_name):
timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')
name, extension = os.path.splitext(upload_name)
digest = md5(''.join([timestamp, upload_name])).hexdigest()
return os.path.join(process_to, '{0}.{1}'.format(digest, extension)) | Returns a key name to use after processing based on timestamp and
upload key name. |
def recipients(self):
cc = self._cc or []
bcc = self._bcc or []
return self._to + cc + bcc | A list of all recipients for this message. |
def do_loadmacros(parser, token):
try:
tag_name, filename = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"'{0}' tag requires exactly one argument (filename)".format(
token.contents.split()[0]))
if filename[0] in ('"', "'") and filename[-1] == filename[0]:
filename = filename[1:-1]
else:
raise template.TemplateSyntaxError(
"Malformed argument to the {0} template tag."
" Argument must be in quotes.".format(tag_name)
)
t = get_template(filename)
try:
nodelist = t.template.nodelist
except AttributeError:
nodelist = t.nodelist
macros = nodelist.get_nodes_by_type(DefineMacroNode)
_setup_macros_dict(parser)
for macro in macros:
parser._macros[macro.name] = macro
return LoadMacrosNode(macros) | The function taking a parsed tag and returning
a LoadMacrosNode object, while also loading the macros
into the page. |
def _http_call(self, url, method, **kwargs):
logging.debug("Request[{0}]: {1}".format(method, url))
start_time = datetime.datetime.now()
logging.debug("Header: {0}".format(kwargs['headers']))
logging.debug("Params: {0}".format(kwargs['data']))
response = requests.request(method, url, verify=False, **kwargs)
duration = datetime.datetime.now() - start_time
logging.debug("Response[{0:d}]: {1}, Duration: {2}.{3}s.".format(
response.status_code, response.reason, duration.seconds,
duration.microseconds))
return response | Makes a http call. Logs response information. |
def _construct_timeseries(self, timeseries, constraints={}):
self.response_from(timeseries, constraints)
if self.response == None:
return None
return {'data':self.response['data'],
'period':self.response['period'],
'start time':datetime.datetime.fromtimestamp(self.response['start_time']),
'end time':datetime.datetime.fromtimestamp(self.response['end_time'])} | wraps response_from for timeseries calls, returns the resulting dict |
def _to_span(x, idx=0):
if isinstance(x, Candidate):
return x[idx].context
elif isinstance(x, Mention):
return x.context
elif isinstance(x, TemporarySpanMention):
return x
else:
raise ValueError(f"{type(x)} is an invalid argument type") | Convert a Candidate, Mention, or Span to a span. |
def checksums(self, install):
check_md5(pkg_checksum(install, self.repo), self.tmp_path + install) | Checksums before install |
def compress_array(str_list, withHC=LZ4_HIGH_COMPRESSION):
global _compress_thread_pool
if not str_list:
return str_list
do_compress = lz4_compressHC if withHC else lz4_compress
def can_parallelize_strlist(strlist):
return len(strlist) > LZ4_N_PARALLEL and len(strlist[0]) > LZ4_MINSZ_PARALLEL
use_parallel = (ENABLE_PARALLEL and withHC) or can_parallelize_strlist(str_list)
if BENCHMARK_MODE or use_parallel:
if _compress_thread_pool is None:
_compress_thread_pool = ThreadPool(LZ4_WORKERS)
return _compress_thread_pool.map(do_compress, str_list)
return [do_compress(s) for s in str_list] | Compress an array of strings
Parameters
----------
str_list: `list[str]`
The input list of strings which need to be compressed.
withHC: `bool`
This flag controls whether lz4HC will be used.
Returns
-------
`list[str`
The list of the compressed strings. |
def to_records_(self) -> dict:
try:
dic = self.df.to_dict(orient="records")
return dic
except Exception as e:
self.err(e, "Can not convert data to records") | Returns a list of dictionary records from the main dataframe
:return: a python dictionnary with the data
:rtype: str
:example: ``ds.to_records_()`` |
def set_center(self, lat, lon):
self.object_queue.put(SlipCenter((lat,lon))) | set center of view |
def expand_image(image, shape):
if (shape[0] % image.shape[0]) or (shape[1] % image.shape[1]):
raise ValueError("Output shape must be an integer multiple of input "
"image shape.")
sx = shape[1] // image.shape[1]
sy = shape[0] // image.shape[0]
ox = (sx - 1.0) / (2.0 * sx)
oy = (sy - 1.0) / (2.0 * sy)
y, x = np.indices(shape, dtype=np.float)
x = x / sx - ox
y = y / sy - oy
return bilinear_interp(image, x, y) | Expand image from original shape to requested shape. Output shape
must be an integer multiple of input image shape for each axis. |
def quiver(
x,
y,
z,
u,
v,
w,
size=default_size * 10,
size_selected=default_size_selected * 10,
color=default_color,
color_selected=default_color_selected,
marker="arrow",
**kwargs
):
fig = gcf()
_grow_limits(x, y, z)
if 'vx' in kwargs or 'vy' in kwargs or 'vz' in kwargs:
raise KeyError('Please use u, v, w instead of vx, vy, vz')
s = ipv.Scatter(
x=x,
y=y,
z=z,
vx=u,
vy=v,
vz=w,
color=color,
size=size,
color_selected=color_selected,
size_selected=size_selected,
geo=marker,
**kwargs
)
fig.scatters = fig.scatters + [s]
return s | Create a quiver plot, which is like a scatter plot but with arrows pointing in the direction given by u, v and w.
:param x: {x}
:param y: {y}
:param z: {z}
:param u: {u_dir}
:param v: {v_dir}
:param w: {w_dir}
:param size: {size}
:param size_selected: like size, but for selected glyphs
:param color: {color}
:param color_selected: like color, but for selected glyphs
:param marker: (currently only 'arrow' would make sense)
:param kwargs: extra arguments passed on to the Scatter constructor
:return: :any:`Scatter` |
def add_special_file(self, mask, path, from_quick_server, ctype=None):
full_path = path if not from_quick_server else os.path.join(
os.path.dirname(__file__), path)
def read_file(_req, _args):
with open(full_path, 'rb') as f_out:
return Response(f_out.read(), ctype=ctype)
self.add_text_get_mask(mask, read_file)
self.set_file_argc(mask, 0) | Adds a special file that might have a different actual path than
its address.
Parameters
----------
mask : string
The URL that must be matched to perform this request.
path : string
The actual file path.
from_quick_server : bool
If set the file path is relative to *this* script otherwise it is
relative to the process.
ctype : string
Optional content type. |
def cmd_playtune(self, args):
if len(args) < 1:
print("Usage: playtune TUNE")
return
tune = args[0]
str1 = tune[0:30]
str2 = tune[30:]
if sys.version_info.major >= 3 and not isinstance(str1, bytes):
str1 = bytes(str1, "ascii")
if sys.version_info.major >= 3 and not isinstance(str2, bytes):
str2 = bytes(str2, "ascii")
self.master.mav.play_tune_send(self.settings.target_system,
self.settings.target_component,
str1, str2) | send PLAY_TUNE message |
def partial(__fn, *a, **kw):
return (PARTIAL, (__fn, a, tuple(kw.items()))) | Wrap a note for injection of a partially applied function.
This allows for annotated functions to be injected for composition::
from jeni import annotate
@annotate('foo', bar=annotate.maybe('bar'))
def foobar(foo, bar=None):
return
@annotate('foo', annotate.partial(foobar))
def bazquux(foo, fn):
# fn: injector.partial(foobar)
return
Keyword arguments are treated as `maybe` when using partial, in order
to allow partial application of only the notes which can be provided,
where the caller could then apply arguments known to be unavailable in
the injector. Note that with Python 3 function annotations, all
annotations are injected as keyword arguments.
Injections on the partial function are lazy and not applied until the
injected partial function is called. See `eager_partial` to inject
eagerly. |
def image_save_buffer_fix(maxblock=1048576):
before = ImageFile.MAXBLOCK
ImageFile.MAXBLOCK = maxblock
try:
yield
finally:
ImageFile.MAXBLOCK = before | Contextmanager that change MAXBLOCK in ImageFile. |
def fit_offset_and_rotation(coords0, coords1):
coords0 = numpy.asarray(coords0)
coords1 = numpy.asarray(coords1)
cp = coords0.mean(axis=0)
cq = coords1.mean(axis=0)
p0 = coords0 - cp
q0 = coords1 - cq
crossvar = numpy.dot(numpy.transpose(p0), q0)
u, _, vt = linalg.svd(crossvar)
d = linalg.det(u) * linalg.det(vt)
if d < 0:
u[:, -1] = -u[:, -1]
rot = numpy.transpose(numpy.dot(u, vt))
off = -numpy.dot(rot, cp) + cq
return off, rot | Fit a rotation and a traslation between two sets points.
Fit a rotation matrix and a traslation bewtween two matched sets
consisting of M N-dimensional points
Parameters
----------
coords0 : (M, N) array_like
coords1 : (M, N) array_lke
Returns
-------
offset : (N, ) array_like
rotation : (N, N) array_like
Notes
------
Fit offset and rotation using Kabsch's algorithm [1]_ [2]_
.. [1] Kabsch algorithm: https://en.wikipedia.org/wiki/Kabsch_algorithm
.. [2] Also here: http://nghiaho.com/?page_id=671 |
def guard_handler(instance, transition_id):
if not instance:
return True
clazz_name = instance.portal_type
wf_module = _load_wf_module('{0}.guards'.format(clazz_name.lower()))
if not wf_module:
return True
key = 'guard_{0}'.format(transition_id)
guard = getattr(wf_module, key, False)
if not guard:
return True
return guard(instance) | Generic workflow guard handler that returns true if the transition_id
passed in can be performed to the instance passed in.
This function is called automatically by a Script (Python) located at
bika/lims/skins/guard_handler.py, which in turn is fired by Zope when an
expression like "python:here.guard_handler('<transition_id>')" is set to
any given guard (used by default in all bika's DC Workflow guards).
Walks through bika.lims.workflow.<obj_type>.guards and looks for a function
that matches with 'guard_<transition_id>'. If found, calls the function and
returns its value (true or false). If not found, returns True by default.
:param instance: the object for which the transition_id has to be evaluated
:param transition_id: the id of the transition
:type instance: ATContentType
:type transition_id: string
:return: true if the transition can be performed to the passed in instance
:rtype: bool |
def intersectionlist_to_matrix(ilist, xterms, yterms):
z = [ [0] * len(xterms) for i1 in range(len(yterms)) ]
xmap = {}
xi = 0
for x in xterms:
xmap[x] = xi
xi = xi+1
ymap = {}
yi = 0
for y in yterms:
ymap[y] = yi
yi = yi+1
for i in ilist:
z[ymap[i['y']]][xmap[i['x']]] = i['j']
logging.debug("Z={}".format(z))
return (z,xterms,yterms) | WILL BE DEPRECATED
Replace with method to return pandas dataframe |
def set_selection(self, taskfile):
self.set_project(taskfile.task.project)
self.set_releasetype(taskfile.releasetype)
if taskfile.task.department.assetflag:
browser = self.assetbrws
verbrowser = self.assetverbrws
tabi = 0
rootobj = taskfile.task.element.atype
else:
browser = self.shotbrws
verbrowser = self.shotverbrws
tabi = 1
rootobj = taskfile.task.element.sequence
self.set_level(browser, 0, rootobj)
self.set_level(browser, 1, taskfile.task.element)
self.set_level(browser, 2, taskfile.task)
self.set_level(browser, 3, [taskfile.descriptor])
self.set_level(verbrowser, 0, taskfile)
self.selection_tabw.setCurrentIndex(tabi) | Set the selection to the given taskfile
:param taskfile: the taskfile to set the selection to
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None |
def subvolume_snapshot(source, dest=None, name=None, read_only=False):
if not dest and not name:
raise CommandExecutionError('Provide parameter dest, name, or both')
cmd = ['btrfs', 'subvolume', 'snapshot']
if read_only:
cmd.append('-r')
if dest and not name:
cmd.append(dest)
if dest and name:
name = os.path.join(dest, name)
if name:
cmd.append(name)
res = __salt__['cmd.run_all'](cmd)
salt.utils.fsutils._verify_run(res)
return True | Create a snapshot of a source subvolume
source
Source subvolume from where to create the snapshot
dest
If only dest is given, the subvolume will be named as the
basename of the source
name
Name of the snapshot
read_only
Create a read only snapshot
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp dest=/.snapshots
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp name=backup |
def extract_text_log_artifacts(job_log):
artifact_bc = ArtifactBuilderCollection(job_log.url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({
"job_guid": job_log.job.guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
return artifact_list | Generate a set of artifacts by parsing from the raw text log. |
async def populate(self, agent_cls, *args, **kwargs):
n = self.gs[0] * self.gs[1]
tasks = []
for addr in self.addrs:
task = asyncio.ensure_future(self._populate_slave(addr, agent_cls,
n, *args,
**kwargs))
tasks.append(task)
rets = await asyncio.gather(*tasks)
return rets | Populate all the slave grid environments with agents. Assumes that
no agents have been spawned yet to the slave environment grids. This
excludes the slave environment managers as they are not in the grids.) |
def import_package(name):
import zipimport
try:
mod = __import__(name)
except ImportError:
clear_zipimport_cache()
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | Given a package name like 'foo.bar.quux', imports the package
and returns the desired module. |
def PorodGuinier(q, a, alpha, Rg):
return PorodGuinierMulti(q, a, alpha, Rg) | Empirical Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``a``: factor of the power-law branch
``alpha``: power-law exponent
``Rg``: radius of gyration
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``G`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719. |
def validate_config_file(cls, config_filepath):
is_file = os.path.isfile(config_filepath)
if not is_file and os.path.isabs(config_filepath):
raise IOError('File path %s is not a valid yml, ini or cfg file or does not exist' % config_filepath)
elif is_file:
if os.path.getsize(config_filepath) == 0:
raise IOError('File %s is empty' % config_filepath)
with open(config_filepath, 'r') as f:
if yaml.load(f) is None:
raise IOError('No YAML config was found in file %s' % config_filepath) | Validates the filepath to the config. Detects whether it is a true YAML file + existance
:param config_filepath: str, file path to the config file to query
:return: None
:raises: IOError |
def in6_addrtovendor(addr):
mac = in6_addrtomac(addr)
if mac is None or conf.manufdb is None:
return None
res = conf.manufdb._get_manuf(mac)
if len(res) == 17 and res.count(':') != 5:
res = "UNKNOWN"
return res | Extract the MAC address from a modified EUI-64 constructed IPv6
address provided and use the IANA oui.txt file to get the vendor.
The database used for the conversion is the one loaded by Scapy
from a Wireshark installation if discovered in a well-known
location. None is returned on error, "UNKNOWN" if the vendor is
unknown. |
def raw_secret_generator(plugin, secret_line, filetype):
for raw_secret in plugin.secret_generator(secret_line, filetype=filetype):
yield raw_secret
if issubclass(plugin.__class__, HighEntropyStringsPlugin):
with plugin.non_quoted_string_regex(strict=False):
for raw_secret in plugin.secret_generator(secret_line):
yield raw_secret | Generates raw secrets by re-scanning the line, with the specified plugin
:type plugin: BasePlugin
:type secret_line: str
:type filetype: FileType |
def iter_series(self, workbook, row, col):
for series in self.__series:
series = dict(series)
series["values"] = series["values"].get_formula(workbook, row, col)
if "categories" in series:
series["categories"] = series["categories"].get_formula(workbook, row, col)
yield series | Yield series dictionaries with values resolved to the final excel formulas. |
def _stop_thread(self):
self._stopping_event.set()
queue_content = []
try:
while True:
queue_content.append(self._queue.get_nowait())
except Empty:
pass
self._enqueueing_thread.join()
try:
queue_content.append(self._queue.get_nowait())
except Empty:
pass
self._queue = Queue(max(len(queue_content), self._buffer_size))
for batch in queue_content:
self._queue.put(batch) | Stop the enqueueing thread. Keep the queue content and stream state. |
def get_page(self, path, return_content=True, return_html=True):
response = self._telegraph.method('getPage', path=path, values={
'return_content': return_content
})
if return_content and return_html:
response['content'] = nodes_to_html(response['content'])
return response | Get a Telegraph page
:param path: Path to the Telegraph page (in the format Title-12-31,
i.e. everything that comes after https://telegra.ph/)
:param return_content: If true, content field will be returned
:param return_html: If true, returns HTML instead of Nodes list |
def get_loss(self, logits: mx.sym.Symbol, labels: mx.sym.Symbol) -> mx.sym.Symbol:
if self.loss_config.normalization_type == C.LOSS_NORM_VALID:
normalization = "valid"
elif self.loss_config.normalization_type == C.LOSS_NORM_BATCH:
normalization = "null"
else:
raise ValueError("Unknown loss normalization type: %s" % self.loss_config.normalization_type)
return mx.sym.SoftmaxOutput(data=logits,
label=labels,
ignore_label=self.ignore_label,
use_ignore=True,
normalization=normalization,
smooth_alpha=self.loss_config.label_smoothing,
name=self.name) | Returns loss symbol given logits and integer-coded labels.
:param logits: Shape: (batch_size * target_seq_len, target_vocab_size).
:param labels: Shape: (batch_size * target_seq_len,).
:return: List of loss symbols. |
def get(self,
resource_id=None,
resource_action=None,
resource_cls=None,
single_resource=False):
endpoint = self.endpoint
if not resource_cls:
resource_cls = self._cls
if resource_id:
endpoint = self._build_url(endpoint, resource_id)
if resource_action:
endpoint = self._build_url(endpoint, resource_action)
response = self.api.execute("GET", endpoint)
if not response.ok:
raise Error.parse(response.json())
if resource_id or single_resource:
return resource_cls.parse(response.json())
return [resource_cls.parse(resource) for resource in response.json()] | Gets the details for one or more resources by ID
Args:
cls - gophish.models.Model - The resource class
resource_id - str - The endpoint (URL path) for the resource
resource_action - str - An action to perform on the resource
resource_cls - cls - A class to use for parsing, if different than
the base resource
single_resource - bool - An override to tell Gophish that even
though we aren't requesting a single resource, we expect a
single response object
Returns:
One or more instances of cls parsed from the returned JSON |
def from_string(address):
address = address.split('.')
if len(address) != WIPV4Address.octet_count:
raise ValueError('Invalid ip address: %s' % address)
result = WIPV4Address()
for i in range(WIPV4Address.octet_count):
result.__address[i] = WBinArray(int(address[i]), WFixedSizeByteArray.byte_size)
return result | Parse string for IPv4 address
:param address: address to parse
:return: |
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
filenames = sorted(tf.gfile.Glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.gfile.Open(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read() | Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False. |
def _read_para_from(self, code, cbit, clen, *, desc, length, version):
if clen != 16:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
_addr = self._read_fileng(16)
from_ = dict(
type=desc,
critical=cbit,
length=clen,
ip=ipaddress.ip_address(_addr),
)
return from_ | Read HIP FROM parameter.
Structure of HIP FROM parameter [RFC 8004]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Address |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 from.type Parameter Type
1 15 from.critical Critical Bit
2 16 from.length Length of Contents
4 32 from.ip Address |
def _create_style(name, family=None, **kwargs):
if family == 'paragraph' and 'marginbottom' not in kwargs:
kwargs['marginbottom'] = '.5cm'
style = Style(name=name, family=family)
kwargs_par = {}
keys = sorted(kwargs.keys())
for k in keys:
if 'margin' in k:
kwargs_par[k] = kwargs.pop(k)
style.addElement(TextProperties(**kwargs))
if kwargs_par:
style.addElement(ParagraphProperties(**kwargs_par))
return style | Helper function for creating a new style. |
def parameter_from_numpy(self, name, array):
p = self.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p | Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object |
async def append_messages(self, name: str,
messages: Sequence[AppendMessage],
selected: SelectedMailbox = None) \
-> Tuple[AppendUid, Optional[SelectedMailbox]]:
... | Appends a message to the end of the mailbox.
See Also:
`RFC 3502 6.3.11.
<https://tools.ietf.org/html/rfc3502#section-6.3.11>`_
Args:
name: The name of the mailbox.
messages: The messages to append.
selected: If applicable, the currently selected mailbox name.
Raises:
:class:`~pymap.exceptions.MailboxNotFound`
:class:`~pymap.exceptions.AppendFailure` |
def _assign_posterior(self):
prior_centers = self.get_centers(self.local_prior)
posterior_centers = self.get_centers(self.local_posterior_)
posterior_widths = self.get_widths(self.local_posterior_)
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
self.set_centers(self.local_posterior_, posterior_centers[col_ind])
self.set_widths(self.local_posterior_, posterior_widths[col_ind])
return self | assign posterior to prior based on Hungarian algorithm
Returns
-------
TFA
Returns the instance itself. |
def clear_host_port(self):
if self.host_port:
self._adb.forward(['--remove', 'tcp:%d' % self.host_port])
self.host_port = None | Stops the adb port forwarding of the host port used by this client. |
def parse(self, request, source):
if source == "body":
req_scope = request.post_param("scope")
elif source == "query":
req_scope = request.get_param("scope")
else:
raise ValueError("Unknown scope source '" + source + "'")
if req_scope is None:
if self.default is not None:
self.scopes = [self.default]
self.send_back = True
return
elif len(self.available_scopes) != 0:
raise OAuthInvalidError(
error="invalid_scope",
explanation="Missing scope parameter in request")
else:
return
req_scopes = req_scope.split(self.separator)
self.scopes = [scope for scope in req_scopes
if scope in self.available_scopes]
if len(self.scopes) == 0 and self.default is not None:
self.scopes = [self.default]
self.send_back = True | Parses scope value in given request.
Expects the value of the "scope" parameter in request to be a string
where each requested scope is separated by a white space::
# One scope requested
"profile_read"
# Multiple scopes
"profile_read profile_write"
:param request: An instance of :class:`oauth2.web.Request`.
:param source: Where to read the scope from. Pass "body" in case of a
application/x-www-form-urlencoded body and "query" in
case the scope is supplied as a query parameter in the
URL of a request. |
def matrix_rank(model):
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return con_helpers.rank(s_matrix) | Return the rank of the model's stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. |
def _predict(self, features):
from sklearn.exceptions import NotFittedError
try:
prediction = self.kernel.predict_classes(features)[:, 0]
except NotFittedError:
raise NotFittedError(
"{} is not fitted yet. Call 'fit' with appropriate "
"arguments before using this method.".format(
type(self).__name__
)
)
return prediction | Predict matches and non-matches.
Parameters
----------
features : numpy.ndarray
The data to predict the class of.
Returns
-------
numpy.ndarray
The predicted classes. |
def restore_descriptor(self, converted_descriptor):
fields = []
for field in converted_descriptor['fields']:
field_type = self.restore_type(field['type'])
resfield = {
'name': field['name'],
'type': field_type,
}
if field.get('mode', 'NULLABLE') != 'NULLABLE':
resfield['constraints'] = {'required': True}
fields.append(resfield)
descriptor = {'fields': fields}
return descriptor | Restore descriptor rom BigQuery |
def register_read_multiple(self, register_indices):
num_regs = len(register_indices)
buf = (ctypes.c_uint32 * num_regs)(*register_indices)
data = (ctypes.c_uint32 * num_regs)(0)
statuses = (ctypes.c_uint8 * num_regs)(0)
res = self._dll.JLINKARM_ReadRegs(buf, data, statuses, num_regs)
if res < 0:
raise errors.JLinkException(res)
return list(data) | Retrieves the values from the registers specified.
Args:
self (JLink): the ``JLink`` instance
register_indices (list): list of registers to read
Returns:
A list of values corresponding one-to-one for each of the given
register indices. The returned list of values are the values in
order of which the indices were specified.
Raises:
JLinkException: if a given register is invalid or an error occurs. |
def duration_to_timedelta(obj):
matches = DURATION_PATTERN.search(obj)
matches = matches.groupdict(default="0")
matches = {k: int(v) for k, v in matches.items()}
return timedelta(**matches) | Converts duration to timedelta
>>> duration_to_timedelta("10m")
>>> datetime.timedelta(0, 600) |
def truncate_database(self, database=None):
if database in self.databases and database is not self.database:
self.change_db(database)
tables = self.tables if isinstance(self.tables, list) else [self.tables]
if len(tables) > 0:
self.drop(tables)
self._printer('\t' + str(len(tables)), 'tables truncated from', database)
return tables | Drop all tables in a database. |
def naive_request(self, url, method, **kwargs):
return self._internal_request(self.naive_session, url, method, **kwargs) | Makes a request to url using an without oauth authorization
session, but through a normal session
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response |
def camel_to_underscore(name):
output = []
for i, c in enumerate(name):
if i > 0:
pc = name[i - 1]
if c.isupper() and not pc.isupper() and pc != '_':
output.append('_')
elif i > 3 and not c.isupper():
previous = name[i - 3:i]
if previous.isalpha() and previous.isupper():
output.insert(len(output) - 1, '_')
output.append(c.lower())
return ''.join(output) | Convert camel case style naming to underscore style naming
If there are existing underscores they will be collapsed with the
to-be-added underscores. Multiple consecutive capital letters will not be
split except for the last one.
>>> camel_to_underscore('SpamEggsAndBacon')
'spam_eggs_and_bacon'
>>> camel_to_underscore('Spam_and_bacon')
'spam_and_bacon'
>>> camel_to_underscore('Spam_And_Bacon')
'spam_and_bacon'
>>> camel_to_underscore('__SpamAndBacon__')
'__spam_and_bacon__'
>>> camel_to_underscore('__SpamANDBacon__')
'__spam_and_bacon__' |
def unlock(path,
zk_hosts=None,
identifier=None,
max_concurrency=1,
ephemeral_lease=False,
scheme=None,
profile=None,
username=None,
password=None,
default_acl=None
):
zk = _get_zk_conn(profile=profile, hosts=zk_hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
if path not in __context__['semaphore_map']:
__context__['semaphore_map'][path] = _Semaphore(zk, path, identifier,
max_leases=max_concurrency,
ephemeral_lease=ephemeral_lease)
if path in __context__['semaphore_map']:
__context__['semaphore_map'][path].release()
del __context__['semaphore_map'][path]
return True
else:
logging.error('Unable to find lease for path %s', path)
return False | Remove lease from semaphore
path
The path in zookeeper where the lock is
zk_hosts
zookeeper connect string
identifier
Name to identify this minion, if unspecified defaults to hostname
max_concurrency
Maximum number of lock holders
timeout
timeout to wait for the lock. A None timeout will block forever
ephemeral_lease
Whether the locks in zookeper should be ephemeral
Example:
.. code-block: bash
salt minion zk_concurrency.unlock /lock/path host1:1234,host2:1234 |
def run(self):
with self._run_lock:
while self.mounts:
for mount in self.mounts:
try:
next(mount)
except StopIteration:
self.mounts.remove(mount) | Start driving the chain, block until done |
def draw(self, surf):
if self.shown:
for w in self.widgets:
surf.blit(w.image, self.convert_rect(w.rect))
for c in self.containers:
c.draw(surf) | Draw all widgets and sub-containers to @surf. |
def run_update_cat(_):
recs = MPost2Catalog.query_all().objects()
for rec in recs:
if rec.tag_kind != 'z':
print('-' * 40)
print(rec.uid)
print(rec.tag_id)
print(rec.par_id)
MPost2Catalog.update_field(rec.uid, par_id=rec.tag_id[:2] + "00") | Update the catagery. |
def terminate(self, force=False):
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False | This forces a child process to terminate. |
def clear(self):
for i in range(self.maxlevel):
self._head[2+i] = self._tail
self._tail[-1] = 0
self._level = 1 | Remove all key-value pairs. |
def __put_slice_in_slim(slim, dataim, sh, i):
a, b = np.unravel_index(int(i), sh)
st0 = int(dataim.shape[0] * a)
st1 = int(dataim.shape[1] * b)
sp0 = int(st0 + dataim.shape[0])
sp1 = int(st1 + dataim.shape[1])
slim[
st0:sp0,
st1:sp1
] = dataim
return slim | put one small slice as a tile in a big image |
def request(self):
self._request.url = '{}/v2/{}'.format(self.tcex.default_args.tc_api_path, self._request_uri)
self._apply_filters()
self.tcex.log.debug(u'Resource URL: ({})'.format(self._request.url))
response = self._request.send(stream=self._stream)
data, status = self._request_process(response)
return {'data': data, 'response': response, 'status': status} | Send the request to the API.
This method will send the request to the API. It will try to handle
all the types of responses and provide the relevant data when possible.
Some basic error detection and handling is implemented, but not all failure
cases will get caught.
Return:
(dictionary): Response/Results data. |
def get_facts_by_name(api_url=None, fact_name=None, verify=False, cert=list()):
return utils._make_api_request(api_url, '/facts/{0}'.format(fact_name), verify, cert) | Returns facts by name
:param api_url: Base PuppetDB API url
:param fact_name: Name of fact |
def _next_class(cls):
return next(
class_
for class_ in cls.__mro__
if not issubclass(class_, Multi)
) | Multi-subclasses should use the parent class |
def rpc(self, address, rpc_id):
if address in self.mock_rpcs and rpc_id in self.mock_rpcs[address]:
value = self.mock_rpcs[address][rpc_id]
return value
result = self._call_rpc(address, rpc_id, bytes())
if len(result) != 4:
self.warn(u"RPC 0x%X on address %d: response had invalid length %d not equal to 4" % (rpc_id, address, len(result)))
if len(result) < 4:
raise HardwareError("Response from RPC was not long enough to parse as an integer", rpc_id=rpc_id, address=address, response_length=len(result))
if len(result) > 4:
result = result[:4]
res, = struct.unpack("<L", result)
return res | Call an RPC and receive the result as an integer.
If the RPC does not properly return a 32 bit integer, raise a warning
unless it cannot be converted into an integer at all, in which case
a HardwareError is thrown.
Args:
address (int): The address of the tile we want to call the RPC
on
rpc_id (int): The id of the RPC that we want to call
Returns:
int: The result of the RPC call. If the rpc did not succeed
an error is thrown instead. |
def extract_text(self, node):
if not isinstance(node, (list, tuple)):
node = [node]
pieces, self.pieces = self.pieces, ['']
for n in node:
for sn in n.childNodes:
self.parse(sn)
ret = ''.join(self.pieces)
self.pieces = pieces
return ret | Return the string representation of the node or list of nodes by parsing the
subnodes, but returning the result as a string instead of adding it to `self.pieces`.
Note that this allows extracting text even if the node is in the ignore list. |
def cdot(L, out=None):
r
L = asarray(L, float)
layout_error = "Wrong matrix layout."
if L.ndim != 2:
raise ValueError(layout_error)
if L.shape[0] != L.shape[1]:
raise ValueError(layout_error)
if out is None:
out = empty((L.shape[0], L.shape[1]), float)
return einsum("ij,kj->ik", L, L, out=out) | r"""Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`. |
def make_anchor_id(self):
result = re.sub(
'[^a-zA-Z0-9_]', '_', self.user + '_' + self.timestamp)
return result | Return string to use as URL anchor for this comment. |
def error(self, amplexception):
msg = '\t'+str(amplexception).replace('\n', '\n\t')
print('Error:\n{:s}'.format(msg))
raise amplexception | Receives notification of an error. |
def flatten_once(iterable, check=is_iterable):
for value in iterable:
if check(value):
for item in value:
yield item
else:
yield value | Flattens only the first level. |
def _are_aligned_angles(self, b1, b2):
"Are two boxes aligned according to their angle?"
return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol | Are two boxes aligned according to their angle? |
def decode_contents(self, contents, obj):
obj.siblings = [self.decode_content(c, RiakContent(obj))
for c in contents]
if len(obj.siblings) > 1 and obj.resolver is not None:
obj.resolver(obj)
return obj | Decodes the list of siblings from the protobuf representation
into the object.
:param contents: a list of RpbContent messages
:type contents: list
:param obj: a RiakObject
:type obj: RiakObject
:rtype RiakObject |
def install(self):
packages = self.packages()
if packages:
print("")
for pkg in packages:
ver = SBoGrep(pkg).version()
prgnam = "{0}-{1}".format(pkg, ver)
if find_package(prgnam, self.meta.output):
binary = slack_package(prgnam)
PackageManager(binary).upgrade(flag="--install-new")
else:
print("\nPackage {0} not found in the {1} for "
"installation\n".format(prgnam, self.meta.output))
else:
print("\nPackages not found in the queue for installation\n")
raise SystemExit(1) | Install packages from queue |
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None):
if executable is None:
executable = '/bin/sh'
sudo_user = self.sudo_user
rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc != None:
return dict(rc=rc, stdout=out, stderr=err )
else:
return dict(stdout=out, stderr=err ) | execute a command string over SSH, return the output |
def to_header(self, realm=''):
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(v)) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header} | Serialize as a header for an HTTPAuth request. |
def close(self):
if self._closed:
raise ValueError('scope is already marked as closed')
if self.parent:
for symbol, c in self.leaked_referenced_symbols.items():
self.parent.reference(symbol, c)
self._closed = True | Mark the scope as closed, i.e. all symbols have been declared,
and no further declarations should be done. |
def installed():
try:
path = ChromeCookies._getPath()
with open(path) as f: pass
return True
except Exception as e:
return False | Returns whether or not Google Chrome is installed
Determines the application data path for Google Chrome
and checks if the path exists. If so, returns True, otherwise
it will return False.
Returns
bool - True if Chrome is installed |
def _build_models_query(self, query):
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query | Builds a query from `query` that filters to documents only from registered models. |
def col_apply(self, col, func, *args, **kwargs):
if col in self.data:
self.data[col] = self.data[col].apply(func, *args, **kwargs)
else:
self.meta[col] = self.meta[col].apply(func, *args, **kwargs) | Apply a function to a column
Parameters
----------
col: string
column in either data or metadata
func: functional
function to apply |
def _lib(self, name, only_if_have=False):
emit = True
if only_if_have:
emit = self.env.get('HAVE_LIB' + self.env_key(name))
if emit:
return '-l' + name
return '' | Specify a linker library.
Example:
LDFLAGS={{ lib("rt") }} {{ lib("pthread", True) }}
Will unconditionally add `-lrt` and check the environment if the key
`HAVE_LIBPTHREAD` is set to be true, then add `-lpthread`. |
def __unroll(self, rolled):
return np.array(np.concatenate([matrix.flatten() for matrix in rolled], axis=1)).reshape(-1) | Converts parameter matrices into an array. |
def to_fits(self, filename, wavelengths=None, flux_unit=None, area=None,
vegaspec=None, **kwargs):
w, y = self._get_arrays(wavelengths, flux_unit=flux_unit, area=area,
vegaspec=vegaspec)
bkeys = {'tdisp1': 'G15.7', 'tdisp2': 'G15.7'}
if 'expr' in self.meta:
bkeys['expr'] = (self.meta['expr'], 'synphot expression')
if 'ext_header' in kwargs:
kwargs['ext_header'].update(bkeys)
else:
kwargs['ext_header'] = bkeys
specio.write_fits_spec(filename, w, y, **kwargs) | Write the spectrum to a FITS file.
Parameters
----------
filename : str
Output filename.
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``self.waveset`` is used.
flux_unit : str or `~astropy.units.core.Unit` or `None`
Flux is converted to this unit before written out.
If not given, internal unit is used.
area, vegaspec
See :func:`~synphot.units.convert_flux`.
kwargs : dict
Keywords accepted by :func:`~synphot.specio.write_fits_spec`. |
def _capitalize_first_letter(word):
if word[0].isalpha():
if word[0].isupper():
return "[" + word[0].swapcase() + word[0] + "]" + word[1:]
else:
return "[" + word[0] + word[0].swapcase() + "]" + word[1:]
return word | Return a regex pattern with the first letter.
Accepts both lowercase and uppercase. |
def add_attributes(self, data, type):
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value) | add required attributes |
def _timeout_thread(self, remain):
time.sleep(remain)
if not self._ended:
self._ended = True
self._release_all() | Timeout before releasing every thing, if nothing was returned |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.