code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def get_modules(folder):
if is_frozen():
zipname = os.path.dirname(os.path.dirname(__file__))
parentmodule = os.path.basename(os.path.dirname(__file__))
with zipfile.ZipFile(zipname, 'r') as f:
prefix = "%s/%s/" % (parentmodule, folder)
modnames = [os.path.splitext(n[len(prefix):])[0]
for n in f.namelist()
if n.startswith(prefix) and "__init__" not in n]
else:
dirname = os.path.join(os.path.dirname(__file__), folder)
modnames = get_importable_modules(dirname)
for modname in modnames:
try:
name ="..%s.%s" % (folder, modname)
yield importlib.import_module(name, __name__)
except ImportError as msg:
out.error("could not load module %s: %s" % (modname, msg)) | Find all valid modules in the given folder which must be in
in the same directory as this loader.py module. A valid module
has a .py extension, and is importable.
@return: all loaded valid modules
@rtype: iterator of module |
def handle(data_type, data, data_id=None, caller=None):
if not data_id:
data_id = data_type
if data_id not in _handlers:
_handlers[data_id] = dict(
[(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)])
for handler in list(_handlers[data_id].values()):
try:
data = handler(data, caller=caller)
except Exception as inst:
vodka.log.error("Data handler '%s' failed with error" % handler)
vodka.log.error(traceback.format_exc())
return data | execute all data handlers on the specified data according to data type
Args:
data_type (str): data type handle
data (dict or list): data
Kwargs:
data_id (str): can be used to differentiate between different data
sets of the same data type. If not specified will default to
the data type
caller (object): if specified, holds the object or function that
is trying to handle data
Returns:
dict or list - data after handlers have been executed on it |
def pages(self):
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
if page.page_id == self.page_id:
pages.append(page)
else:
pages = pages + har_parser.pages
return pages | The aggregate pages of all the parser objects. |
def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_option_groups(OptionGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1 |
def to_b58check(self, testnet=False):
b = self.testnet_bytes if testnet else bytes(self)
return base58.b58encode_check(b) | Generates a Base58Check encoding of this key.
Args:
testnet (bool): True if the key is to be used with
testnet, False otherwise.
Returns:
str: A Base58Check encoded string representing the key. |
def is_modified(self):
if self.__modified_data__ is not None:
return True
for value in self.__original_data__:
try:
if value.is_modified():
return True
except AttributeError:
pass
return False | Returns whether list is modified or not |
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None):
if int_keys:
for in_key in int_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
in_dict[in_key] = int(in_dict[in_key])
if date_keys:
for in_key in date_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
_from = in_dict[in_key]
if isinstance(_from, basestring):
dtime = parse_datetime(_from)
elif isinstance(_from, datetime):
dtime = _from
in_dict[in_key] = dtime.isoformat()
elif (in_key in in_dict) and in_dict.get(in_key, None) is None:
del in_dict[in_key]
for k, v in in_dict.items():
if v is None:
del in_dict[k]
return in_dict | Extends a given object for API Production. |
def feature_list():
lib_features_c_array = ctypes.POINTER(Feature)()
lib_features_size = ctypes.c_size_t()
check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size)))
features = [lib_features_c_array[i] for i in range(lib_features_size.value)]
return features | Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc
Returns
-------
list
List of :class:`.Feature` objects |
def wait_for_port(self, port, timeout=10, **probe_kwargs):
Probe(timeout=timeout, fnc=functools.partial(self.is_port_open, port), **probe_kwargs).run() | block until specified port starts accepting connections, raises an exc ProbeTimeout
if timeout is reached
:param port: int, port number
:param timeout: int or float (seconds), time to wait for establishing the connection
:param probe_kwargs: arguments passed to Probe constructor
:return: None |
def ls():
heading, body = cli_syncthing_adapter.ls()
if heading:
click.echo(heading)
if body:
click.echo(body.strip()) | List all synchronized directories. |
def load_fasta_file(filename):
with open(filename, "r") as handle:
records = list(SeqIO.parse(handle, "fasta"))
return records | Load a FASTA file and return the sequences as a list of SeqRecords
Args:
filename (str): Path to the FASTA file to load
Returns:
list: list of all sequences in the FASTA file as Biopython SeqRecord objects |
def xor(s, pad):
from itertools import cycle
s = bytearray(force_bytes(s, encoding='latin-1'))
pad = bytearray(force_bytes(pad, encoding='latin-1'))
return binary_type(bytearray(x ^ y for x, y in zip(s, cycle(pad)))) | XOR a given string ``s`` with the one-time-pad ``pad`` |
def config_profile_list(self):
these_profiles = self._config_profile_list() or []
profile_list = [q for p in these_profiles for q in
[p.get('profileName')]]
return profile_list | Return config profile list from DCNM. |
def init_selection(self):
si = self.shotverbrws.selected_indexes(0)
if si:
self.shot_ver_sel_changed(si[0])
else:
self.shot_ver_sel_changed(QtCore.QModelIndex())
ai = self.assetverbrws.selected_indexes(0)
if ai:
self.asset_ver_sel_changed(ai[0])
else:
self.asset_ver_sel_changed(QtCore.QModelIndex()) | Call selection changed in the beginning, so signals get emitted once
Emit shot_taskfile_sel_changed signal and asset_taskfile_sel_changed.
:returns: None
:raises: None |
def get_agent(self, agent_id):
collection = JSONClientValidated('authentication',
collection='Agent',
runtime=self._runtime)
result = collection.find_one(
dict({'_id': ObjectId(self._get_id(agent_id, 'authentication').get_identifier())},
**self._view_filter()))
return objects.Agent(osid_object_map=result, runtime=self._runtime, proxy=self._proxy) | Gets the ``Agent`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Agent`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to an ``Agent`` and retained for compatibility.
arg: agent_id (osid.id.Id): the ``Id`` of an ``Agent``
return: (osid.authentication.Agent) - the returned ``Agent``
raise: NotFound - no ``Agent`` found with the given ``Id``
raise: NullArgument - ``agent_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def glob1(self, dir_relpath, glob):
if self.isignored(dir_relpath, directory=True):
return []
matched_files = self._glob1_raw(dir_relpath, glob)
prefix = self._relpath_no_dot(dir_relpath)
return self._filter_ignored(matched_files, selector=lambda p: os.path.join(prefix, p)) | Returns a list of paths in path that match glob and are not ignored. |
def calculate_shannon_entropy(self, data):
if not data:
return 0
entropy = 0
for x in self.charset:
p_x = float(data.count(x)) / len(data)
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy | Returns the entropy of a given string.
Borrowed from: http://blog.dkbza.org/2007/05/scanning-data-for-entropy-anomalies.html.
:param data: string. The word to analyze.
:returns: float, between 0.0 and 8.0 |
def local_path(self, url, filename=None, decompress=False, download=False):
if download:
return self.fetch(url=url, filename=filename, decompress=decompress)
else:
filename = self.local_filename(url, filename, decompress)
return join(self.cache_directory_path, filename) | What will the full local path be if we download the given file? |
def register_run_plugins(self, plugin_name, plugin_class):
if plugin_name in self.registered_plugins:
raise PluginException("Plugin {} already registered! "
"Duplicate plugins?".format(plugin_name))
self.logger.debug("Registering plugin %s", plugin_name)
if plugin_class.get_allocators():
register_func = self.plugin_types[PluginTypes.ALLOCATOR]
register_func(plugin_name, plugin_class)
self.registered_plugins.append(plugin_name) | Loads a plugin as a dictionary and attaches needed parts to correct Icetea run
global parts.
:param plugin_name: Name of the plugins
:param plugin_class: PluginBase
:return: Nothing |
def _setup_http_session(self):
headers = {"Content-type": "application/json"}
if (self._id_token):
headers.update({"authorization": "Bearer {}".format(
self._id_token)})
self._session.headers.update(headers)
self._session.verify = False | Sets up the common HTTP session parameters used by requests. |
def _find_usage_networking_sgs(self):
logger.debug("Getting usage for EC2 VPC resources")
sgs_per_vpc = defaultdict(int)
rules_per_sg = defaultdict(int)
for sg in self.resource_conn.security_groups.all():
if sg.vpc_id is not None:
sgs_per_vpc[sg.vpc_id] += 1
rules_per_sg[sg.id] = len(sg.ip_permissions)
for vpc_id, count in sgs_per_vpc.items():
self.limits['Security groups per VPC']._add_current_usage(
count,
aws_type='AWS::EC2::VPC',
resource_id=vpc_id,
)
for sg_id, count in rules_per_sg.items():
self.limits['Rules per VPC security group']._add_current_usage(
count,
aws_type='AWS::EC2::SecurityGroupRule',
resource_id=sg_id,
) | calculate usage for VPC-related things |
def ulocalized_gmt0_time(self, time, context, request):
value = get_date(context, time)
if not value:
return ""
value = value.toZone("GMT+0")
return self.ulocalized_time(value, context, request) | Returns the localized time in string format, but in GMT+0 |
def match_tagname(self, el, tag):
name = (util.lower(tag.name) if not self.is_xml and tag.name is not None else tag.name)
return not (
name is not None and
name not in (self.get_tag(el), '*')
) | Match tag name. |
async def filter_new_posts(self, source_id, post_ids):
new_ids = []
try:
db_client = self._db
posts_in_db = await db_client.get_known_posts(source_id, post_ids)
new_ids = [p for p in post_ids if p not in posts_in_db]
except Exception as exc:
logger.error("Error when filtering for new posts {} {}".format(source_id, post_ids))
logger.exception(exc)
return new_ids | Filters ist of post_id for new ones.
:param source_id: id of the source
:type string:
:param post_ids: list of post ids
:type list:
:returns: list of unknown post ids. |
def begin(self, request, data):
request = self.get_request(
http_url = self.REQUEST_TOKEN_URL,
parameters = dict(oauth_callback = self.get_callback(request)))
content = self.load_request(request)
if not content:
return redirect('netauth-login')
request = self.get_request(token = Token.from_string(content), http_url=self.AUTHORIZE_URL)
return redirect(request.to_url()) | Try to get Request Token from OAuth Provider and
redirect user to provider's site for approval. |
def count_open_fds():
pid = os.getpid()
procs = subprocess.check_output(
['lsof', '-w', '-Ff', '-p', str(pid)])
nprocs = len(
[s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()]
)
return nprocs | return the number of open file descriptors for current process.
.. warning: will only work on UNIX-like os-es.
http://stackoverflow.com/a/7142094 |
def chown(dirs, user=None, group=None):
if isinstance(dirs, basestring):
dirs = [dirs]
args = ' '.join(dirs)
if user and group:
return sudo('chown {}:{} {}'.format(user, group, args))
elif user:
return sudo('chown {} {}'.format(user, args))
elif group:
return sudo('chgrp {} {}'.format(group, args))
else:
return None | User sudo to set user and group ownership |
def add_unique_runid(testcase, run_id=None):
testcase["description"] = '{}<br id="{}"/>'.format(
testcase.get("description") or "", run_id or id(add_unique_runid)
) | Adds run id to the test description.
The `run_id` runs makes the descriptions unique between imports and force Polarion
to update every testcase every time. |
def __get_values(self):
values = []
if self.__remote:
description = self.__client.describe(self.__point)
if description is not None:
if description['type'] != 'Point':
raise IOTUnknown('%s is not a Point' % self.__point)
values = description['meta']['values']
else:
limit = 100
offset = 0
while True:
new = self.__point.list(limit=limit, offset=offset)
values += new
if len(new) < limit:
break
offset += limit
lang = self.__client.default_lang
for value in values:
value['comment'] = value['comment'].get(lang, None) if value['comment'] else None
return values | Retrieve value information either via describe or point value listing. MUST be called within lock. |
def emit(self, record):
level = record.levelno
if not FLAGS.is_parsed():
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
if _is_absl_fatal_record(record):
self.flush()
os.abort() | Prints a record out to some streams.
If FLAGS.logtostderr is set, it will print to sys.stderr ONLY.
If FLAGS.alsologtostderr is set, it will print to sys.stderr.
If FLAGS.logtostderr is not set, it will log to the stream
associated with the current thread.
Args:
record: logging.LogRecord, the record to emit. |
def apply_to(self, x, columns=False):
if isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[0] == 3 and columns:
return x + self.t.reshape((3,1))
if isinstance(x, np.ndarray) and (x.shape == (3, ) or (len(x.shape) == 2 and x.shape[1] == 3)) and not columns:
return x + self.t
elif isinstance(x, Complete):
return Complete(x.r, x.t + self.t)
elif isinstance(x, Translation):
return Translation(x.t + self.t)
elif isinstance(x, Rotation):
return Complete(x.r, self.t)
elif isinstance(x, UnitCell):
return x
else:
raise ValueError("Can not apply this translation to %s" % x) | Apply this translation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are translated. In case of trans-
formations, a new transformation is returned that consists of this
translation applied AFTER the given translation. In case of a unit
cell, the original object is returned.
This method is equivalent to ``self*x``. |
def configure_lease(self, lease, lease_max, mount_point=DEFAULT_MOUNT_POINT):
params = {
'lease': lease,
'lease_max': lease_max,
}
api_path = '/v1/{mount_point}/config/lease'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
) | Configure lease settings for the AWS secrets engine.
It is optional, as there are default values for lease and lease_max.
Supported methods:
POST: /{mount_point}/config/lease. Produces: 204 (empty body)
:param lease: Specifies the lease value provided as a string duration with time suffix. "h" (hour) is the
largest suffix.
:type lease: str | unicode
:param lease_max: Specifies the maximum lease value provided as a string duration with time suffix. "h" (hour)
is the largest suffix.
:type lease_max: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response |
def format(logger,
show_successful=True,
show_errors=True,
show_traceback=True):
output = []
errors = logger.get_aborted_actions()
if show_errors and errors:
output += _underline('Failed actions:')
for log in logger.get_aborted_logs():
if show_traceback:
output.append(log.get_name() + ':')
output.append(log.get_error())
else:
output.append(log.get_name() + ': ' + log.get_error(False))
output.append('')
if show_successful:
output += _underline('Successful actions:')
for log in logger.get_succeeded_logs():
output.append(log.get_name())
output.append('')
return '\n'.join(output).strip() | Prints a report of the actions that were logged by the given Logger.
The report contains a list of successful actions, as well as the full
error message on failed actions.
:type logger: Logger
:param logger: The logger that recorded what happened in the queue.
:rtype: string
:return: A string summarizing the status of every performed task. |
def _iterate_prefix(self, callsign, timestamp=timestamp_now):
prefix = callsign
if re.search('(VK|AX|VI)9[A-Z]{3}', callsign):
if timestamp > datetime(2006,1,1, tzinfo=UTC):
prefix = callsign[0:3]+callsign[4:5]
while len(prefix) > 0:
try:
return self._lookuplib.lookup_prefix(prefix, timestamp)
except KeyError:
prefix = prefix.replace(' ', '')[:-1]
continue
raise KeyError | truncate call until it corresponds to a Prefix in the database |
def update_from_json(self, path=join('config', 'hdx_dataset_static.json')):
super(Dataset, self).update_from_json(path)
self.separate_resources() | Update dataset metadata with static metadata from JSON file
Args:
path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.
Returns:
None |
def do_history(self, line):
self._split_args(line, 0, 0)
for idx, item in enumerate(self._history):
d1_cli.impl.util.print_info("{0: 3d} {1}".format(idx, item)) | history Display a list of commands that have been entered. |
def compose_all(stream, Loader=Loader):
loader = Loader(stream)
try:
while loader.check_node():
yield loader.get_node()
finally:
loader.dispose() | Parse all YAML documents in a stream
and produce corresponding representation trees. |
def toPIL(self, **attribs):
import PIL.Image
bytes = self.convert("png")
sfile = io.BytesIO(bytes)
pil = PIL.Image.open(sfile)
return pil | Convert canvas to a PIL image |
def panzoom(marks):
return PanZoom(scales={
'x': sum([mark._get_dimension_scales('x', preserve_domain=True) for mark in marks], []),
'y': sum([mark._get_dimension_scales('y', preserve_domain=True) for mark in marks], [])
}) | Helper function for panning and zooming over a set of marks.
Creates and returns a panzoom interaction with the 'x' and 'y' dimension
scales of the specified marks. |
def _execute_command(self, command, workunit_name=None, workunit_labels=None):
workunit_name = workunit_name or command.executable
workunit_labels = {WorkUnitLabel.TOOL} | set(workunit_labels or ())
with self.context.new_workunit(name=workunit_name,
labels=workunit_labels,
cmd=str(command)) as workunit:
returncode = self._run_node_distribution_command(command, workunit)
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
return returncode, command | Executes a node or npm command via self._run_node_distribution_command.
:param NodeDistribution.Command command: The command to run.
:param string workunit_name: A name for the execution's work unit; default command.executable.
:param list workunit_labels: Any extra :class:`pants.base.workunit.WorkUnitLabel`s to apply.
:returns: A tuple of (returncode, command).
:rtype: A tuple of (int,
:class:`pants.contrib.node.subsystems.node_distribution.NodeDistribution.Command`) |
def build_docs(location="doc-source", target=None, library="icetea_lib"):
cmd_ar = ["sphinx-apidoc", "-o", location, library]
try:
print("Generating api docs.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
target = "doc{}html".format(os.sep) if target is None else target
cmd_ar = ["sphinx-build", "-b", "html", location, target]
try:
print("Building html documentation.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
print("Documentation built.")
return 0 | Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull. |
def pickle_dumps(self, protocol=None):
strio = StringIO()
pmg_pickle_dump(self, strio,
protocol=self.pickle_protocol if protocol is None
else protocol)
return strio.getvalue() | Return a string with the pickle representation.
`protocol` selects the pickle protocol. self.pickle_protocol is
used if `protocol` is None |
def get(self, key, get_cas=False):
for server in self.servers:
value, cas = server.get(key)
if value is not None:
if get_cas:
return value, cas
else:
return value
if get_cas:
return None, None | Get a key from server.
:param key: Key's name
:type key: six.string_types
:param get_cas: If true, return (value, cas), where cas is the new CAS value.
:type get_cas: boolean
:return: Returns a key data from server.
:rtype: object |
def assert_page_source_contains(self, expected_value, failure_message='Expected page source to contain: "{}"'):
assertion = lambda: expected_value in self.driver_wrapper.page_source()
self.webdriver_assert(assertion, unicode(failure_message).format(expected_value)) | Asserts that the page source contains the string passed in expected_value |
def update(self, job_id, name=NotUpdated, description=NotUpdated,
is_public=NotUpdated, is_protected=NotUpdated):
data = {}
self._copy_if_updated(data, name=name, description=description,
is_public=is_public, is_protected=is_protected)
return self._patch('/jobs/%s' % job_id, data) | Update a Job. |
def square_root(n, epsilon=0.001):
guess = n / 2
while abs(guess * guess - n) > epsilon:
guess = (guess + (n / guess)) / 2
return guess | Return square root of n, with maximum absolute error epsilon |
def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response:
with handle_exceptions(request, debug) as handler:
result = call(methods.items[request.method], *request.args, **request.kwargs)
handler.response = SuccessResponse(result=result, id=request.id)
return handler.response | Call a Request, catching exceptions to ensure we always return a Response.
Args:
request: The Request object.
methods: The list of methods that can be called.
debug: Include more information in error responses.
Returns:
A Response object. |
def valarray(shape, value=np.NaN, typecode=None):
if typecode is None:
typecode = bool
out = np.ones(shape, dtype=typecode) * value
if not isinstance(out, np.ndarray):
out = np.asarray(out)
return out | Return an array of all value. |
def capitalize_unicode_name(s):
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail | Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier. |
def get_first_lang():
request_lang = request.headers.get('Accept-Language').split(',')
if request_lang:
lang = locale.normalize(request_lang[0]).split('.')[0]
else:
lang = False
return lang | Get the first lang of Accept-Language Header. |
def getTypeStr(_type):
r
if isinstance(_type, CustomType):
return str(_type)
if hasattr(_type, '__name__'):
return _type.__name__
return '' | r"""Gets the string representation of the given type. |
def npm(usr_pwd=None, clean=False):
try: cmd('which npm')
except:
return
print('-[npm]----------')
p = cmd("npm outdated -g | awk 'NR>1 {print $1}'")
if not p: return
pkgs = getPackages(p)
for p in pkgs:
cmd('{} {}'.format('npm update -g ', p), usr_pwd=usr_pwd, run=global_run) | Handle npm for Node.js |
def _remote_file_size(url=None, file_name=None, pb_dir=None):
if file_name and pb_dir:
url = posixpath.join(config.db_index_url, pb_dir, file_name)
response = requests.head(url, headers={'Accept-Encoding': 'identity'})
response.raise_for_status()
remote_file_size = int(response.headers['content-length'])
return remote_file_size | Get the remote file size in bytes
Parameters
----------
url : str, optional
The full url of the file. Use this option to explicitly
state the full url.
file_name : str, optional
The base file name. Use this argument along with pb_dir if you
want the full url to be constructed.
pb_dir : str, optional
The base file name. Use this argument along with file_name if
you want the full url to be constructed.
Returns
-------
remote_file_size : int
Size of the file in bytes |
def _add_membership_multicast_socket(self):
self._membership_request = socket.inet_aton(self._multicast_group) \
+ socket.inet_aton(self._multicast_ip)
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self._membership_request
) | Make membership request to multicast
:rtype: None |
def to_commit(obj):
if obj.type == 'tag':
obj = deref_tag(obj)
if obj.type != "commit":
raise ValueError("Cannot convert object %r to type commit" % obj)
return obj | Convert the given object to a commit if possible and return it |
def tokenize(self):
tokens = []
token_spec = [
('mlc', r'/\*.*?\*/'),
('slc', r'//[^\r\n]*?\r?\n'),
('perl', r'<%.*?%>'),
('incl', r'`include'),
]
tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_spec)
for m in re.finditer(tok_regex, self.text, re.DOTALL):
if m.lastgroup in ("incl", "perl"):
tokens.append((m.lastgroup, m.start(0), m.end(0)-1))
return tokens | Tokenize the input text
Scans for instances of perl tags and include directives.
Tokenization skips line and block comments.
Returns
-------
list
List of tuples: (typ, start, end)
Where:
- typ is "perl" or "incl"
- start/end mark the first/last char offset of the token |
def export_wif(self) -> str:
data = b''.join([b'\x80', self.__private_key, b'\01'])
checksum = Digest.hash256(data[0:34])
wif = base58.b58encode(b''.join([data, checksum[0:4]]))
return wif.decode('ascii') | This interface is used to get export ECDSA private key in the form of WIF which
is a way to encoding an ECDSA private key and make it easier to copy.
:return: a WIF encode private key. |
def effective_nsamples(self):
try:
act = numpy.array(list(self.acts.values())).max()
except (AttributeError, TypeError):
act = numpy.inf
if self.burn_in is None:
nperwalker = max(int(self.niterations // act), 1)
elif self.burn_in.is_burned_in:
nperwalker = int(
(self.niterations - self.burn_in.burn_in_iteration) // act)
nperwalker = max(nperwalker, 1)
else:
nperwalker = 0
return self.nwalkers * nperwalker | The effective number of samples post burn-in that the sampler has
acquired so far. |
def get(self, url, data=None):
response = self.http.get(url,
headers=self.headers,
params=data,
**self.requests_params)
return self.process(response) | Executes an HTTP GET request for the given URL.
``data`` should be a dictionary of url parameters |
def plot_sed(sed, showlnl=False, **kwargs):
ax = kwargs.pop('ax', plt.gca())
cmap = kwargs.get('cmap', 'BuGn')
annotate_name(sed, ax=ax)
SEDPlotter.plot_flux_points(sed, **kwargs)
if np.any(sed['ts'] > 9.):
if 'model_flux' in sed:
SEDPlotter.plot_model(sed['model_flux'],
noband=showlnl, **kwargs)
if showlnl:
SEDPlotter.plot_lnlscan(sed, **kwargs)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('Energy [MeV]')
ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]') | Render a plot of a spectral energy distribution.
Parameters
----------
showlnl : bool
Overlay a map of the delta-loglikelihood values vs. flux
in each energy bin.
cmap : str
Colormap that will be used for the delta-loglikelihood
map.
llhcut : float
Minimum delta-loglikelihood value.
ul_ts_threshold : float
TS threshold that determines whether the MLE or UL
is plotted in each energy bin. |
def filter(self, chromosome, **kwargs) :
def appendAllele(alleles, sources, snp) :
pos = snp.start
if snp.alt[0] == '-' :
pass
elif snp.ref[0] == '-' :
pass
else :
sources[snpSet] = snp
alleles.append(snp.alt)
refAllele = chromosome.refSequence[pos]
alleles.append(refAllele)
sources['ref'] = refAllele
return alleles, sources
warn = 'Warning: the default snp filter ignores indels. IGNORED %s of SNP set: %s at pos: %s of chromosome: %s'
sources = {}
alleles = []
for snpSet, data in kwargs.iteritems() :
if type(data) is list :
for snp in data :
alleles, sources = appendAllele(alleles, sources, snp)
else :
allels, sources = appendAllele(alleles, sources, data)
return SequenceSNP(alleles, sources = sources) | The default filter mixes applied all SNPs and ignores Insertions and Deletions. |
def threshold(image, block_size=DEFAULT_BLOCKSIZE, mask=None):
if mask is None:
mask = np.zeros(image.shape[:2], dtype=np.uint8)
mask[:] = 255
if len(image.shape) > 2 and image.shape[2] == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
res = _calc_block_mean_variance(image, mask, block_size)
res = image.astype(np.float32) - res.astype(np.float32) + 255
_, res = cv2.threshold(res, 215, 255, cv2.THRESH_BINARY)
return res | Applies adaptive thresholding to the given image.
Args:
image: BGRA image.
block_size: optional int block_size to use for adaptive thresholding.
mask: optional mask.
Returns:
Thresholded image. |
def revoker(self, revoker, **prefs):
hash_algo = prefs.pop('hash', None)
sig = PGPSignature.new(SignatureType.DirectlyOnKey, self.key_algorithm, hash_algo, self.fingerprint.keyid)
sensitive = prefs.pop('sensitive', False)
keyclass = RevocationKeyClass.Normal | (RevocationKeyClass.Sensitive if sensitive else 0x00)
sig._signature.subpackets.addnew('RevocationKey',
hashed=True,
algorithm=revoker.key_algorithm,
fingerprint=revoker.fingerprint,
keyclass=keyclass)
prefs['revocable'] = False
return self._sign(self, sig, **prefs) | Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool`` |
def _set_relationship_type(self, type_identifier, display_name=None, display_label=None, description=None, domain='Relationship'):
if display_name is None:
display_name = type_identifier
if display_label is None:
display_label = display_name
if description is None:
description = 'Relationship Type for ' + display_name
self._relationship_type = Type(authority='DLKIT',
namespace='relationship.Relationship',
identifier=type_identifier,
display_name=display_name,
display_label=display_label,
description=description,
domain=domain) | Sets the relationship type |
def state(self, *args, **kwargs):
return self._makeApiCall(self.funcinfo["state"], *args, **kwargs) | Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable`` |
def ClaimRecords(self,
limit=10000,
timeout="30m",
start_time=None,
record_filter=lambda x: False,
max_filtered=1000):
if not self.locked:
raise aff4.LockError("Queue must be locked to claim records.")
with data_store.DB.GetMutationPool() as mutation_pool:
return mutation_pool.QueueClaimRecords(
self.urn,
self.rdf_type,
limit=limit,
timeout=timeout,
start_time=start_time,
record_filter=record_filter,
max_filtered=max_filtered) | Returns and claims up to limit unclaimed records for timeout seconds.
Returns a list of records which are now "claimed", a claimed record will
generally be unavailable to be claimed until the claim times out. Note
however that in case of an unexpected timeout or other error a record might
be claimed twice at the same time. For this reason it should be considered
weaker than a true lock.
Args:
limit: The number of records to claim.
timeout: The duration of the claim.
start_time: The time to start claiming records at. Only records with a
timestamp after this point will be claimed.
record_filter: A filter method to determine if the record should be
returned. It will be called serially on each record and the record will
be filtered (not returned or locked) if it returns True.
max_filtered: If non-zero, limits the number of results read when
filtered. Specifically, if max_filtered filtered results are read
sequentially without any unfiltered results, we stop looking for
results.
Returns:
A list (id, record) where record is a self.rdf_type and id is a record
identifier which can be used to delete or release the record.
Raises:
LockError: If the queue is not locked. |
def custom_resolve(self):
if not callable(self.custom_resolver):
return
new_addresses = []
for address in self.addresses:
for new_address in self.custom_resolver(address):
new_addresses.append(new_address)
self.addresses = new_addresses | If a custom resolver is defined, perform custom resolution on
the contained addresses.
:return: |
def get_resource(self, path, params=None):
url = '%s%s' % (path, self._param_list(params))
headers = {
'Accept': 'application/json;odata=minimalmetadata'
}
response = O365_DAO().getURL(self._url(url), headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return json_loads(response.data) | O365 GET method. Return representation of the requested resource. |
def tailor(pattern_or_root, dimensions=None, distributed_dim='time',
read_only=False):
return TileManager(pattern_or_root, dimensions=dimensions,
distributed_dim=distributed_dim, read_only=read_only) | Return a TileManager to wrap the root descriptor and tailor all the
dimensions to a specified window.
Keyword arguments:
root -- a NCObject descriptor.
pattern -- a filename string to open a NCObject descriptor.
dimensions -- a dictionary to configurate the dimensions limits. |
def unpatch(self):
if not self._patched:
return
for func in self._read_compilers + self._write_compilers:
func.execute_sql = self._original[func]
self.cache_backend.unpatch()
self._patched = False | un-applies this patch. |
def close(self):
with self._close_lock:
sfd = self._sfd
if sfd >= 0:
self._sfd = -1
self._signals = frozenset()
close(sfd) | Close the internal signalfd file descriptor if it isn't closed
:raises OSError:
If the underlying ``close(2)`` fails. The error message matches
those found in the manual page. |
def value_text(self):
search = self._selected.get()
for item in self._rbuttons:
if item.value == search:
return item.text
return "" | Sets or returns the option selected in a ButtonGroup by its text value. |
def save_current_figure_as(self):
if self.current_thumbnail is not None:
self.save_figure_as(self.current_thumbnail.canvas.fig,
self.current_thumbnail.canvas.fmt) | Save the currently selected figure. |
def add_summary(self, summary, global_step=None):
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
value.ClearField("metadata")
continue
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step) | Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer and adds it
to the event file.
Parameters
----------
summary : A `Summary` protocol buffer
Optionally serialized as a string.
global_step: Number
Optional global step value to record with the summary. |
def _use_memcache(self, key, options=None):
flag = ContextOptions.use_memcache(options)
if flag is None:
flag = self._memcache_policy(key)
if flag is None:
flag = ContextOptions.use_memcache(self._conn.config)
if flag is None:
flag = True
return flag | Return whether to use memcache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached in memcache, False otherwise. |
def translate(self, vector, inc_alt_states=True):
vector = numpy.array(vector)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector += vector
return | Translates every atom in the AMPAL object.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains. |
def _create_interval_filter(interval):
def filter_fn(value):
if (not isinstance(value, six.integer_types) and
not isinstance(value, float)):
raise error.HParamsError(
'Cannot use an interval filter for a value of type: %s, Value: %s' %
(type(value), value))
return interval.min_value <= value and value <= interval.max_value
return filter_fn | Returns a function that checkes whether a number belongs to an interval.
Args:
interval: A tensorboard.hparams.Interval protobuf describing the interval.
Returns:
A function taking a number (a float or an object of a type in
six.integer_types) that returns True if the number belongs to (the closed)
'interval'. |
def _polling_iteration(self):
if self.__task is None:
self.ready_event().set()
elif self.__task.check_events() is True:
self.ready_event().set()
self.registry().task_finished(self) | Poll for scheduled task stop events
:return: None |
def merge_entities(doc):
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc | Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities |
def job_runner(self):
outputs = luigi.task.flatten(self.output())
for output in outputs:
if not isinstance(output, luigi.contrib.hdfs.HdfsTarget):
warnings.warn("Job is using one or more non-HdfsTarget outputs" +
" so it will be run in local mode")
return LocalJobRunner()
else:
return DefaultHadoopJobRunner() | Get the MapReduce runner for this job.
If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used.
Otherwise, the LocalJobRunner which streams all data through the local machine
will be used (great for testing). |
def _print_drift_report(self):
try:
response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)
rows = []
for resource in response.get('StackResources', []):
row = []
row.append(resource.get('LogicalResourceId', 'unknown'))
row.append(resource.get('PhysicalResourceId', 'unknown'))
row.append(resource.get('ResourceStatus', 'unknown'))
row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))
rows.append(row)
print('Drift Report:')
print(tabulate(rows, headers=[
'Logical ID',
'Physical ID',
'Resource Status',
'Drift Info'
]))
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
return True | Report the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
Note: not yet implemented |
def _cli_check_format(fmt):
if fmt is None:
return None
fmt = fmt.lower()
if not fmt in api.get_formats():
errstr = "Format '" + fmt + "' does not exist.\n"
errstr += "For a complete list of formats, use the 'bse list-formats' command"
raise RuntimeError(errstr)
return fmt | Checks that a basis set format exists and if not, raises a helpful exception |
async def generate_waifu_insult(self, avatar):
if not isinstance(avatar, str):
raise TypeError("type of 'avatar' must be str.")
async with aiohttp.ClientSession() as session:
async with session.post("https://api.weeb.sh/auto-image/waifu-insult", headers=self.__headers, data={"avatar": avatar}) as resp:
if resp.status == 200:
return await resp.read()
else:
raise Exception((await resp.json())['message']) | Generate a waifu insult image.
This function is a coroutine.
Parameters:
avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image
Return Type: image data |
def users_get(self, domain):
path = self._get_management_path(domain)
return self.http_request(path=path,
method='GET') | Retrieve a list of users from the server.
:param AuthDomain domain: The authentication domain to retrieve users from.
:return: :class:`~.HttpResult`. The list of users can be obtained from
the returned object's `value` property. |
def to_struct(cls, name=None):
if name is None:
name = cls.__name__
basic_attrs = dict([(attr_name, value)
for attr_name, value in cls.get_attrs()
if isinstance(value, Column)])
if not basic_attrs:
return None
src = 'struct {0} {{'.format(name)
for attr_name, value in basic_attrs.items():
src += '{0} {1};'.format(value.type.typename, attr_name)
src += '};'
if ROOT.gROOT.ProcessLine(src) != 0:
return None
return getattr(ROOT, name, None) | Convert the TreeModel into a compiled C struct |
def GetBatchJobDownloadUrlWhenReady(client, batch_job_id,
max_poll_attempts=MAX_POLL_ATTEMPTS):
batch_job = GetBatchJob(client, batch_job_id)
if batch_job['status'] == 'CANCELED':
raise Exception('Batch Job with ID "%s" was canceled before completing.'
% batch_job_id)
poll_attempt = 0
while (poll_attempt in range(max_poll_attempts) and
batch_job['status'] in PENDING_STATUSES):
sleep_interval = (30 * (2 ** poll_attempt) +
(random.randint(0, 10000) / 1000))
print 'Batch Job not ready, sleeping for %s seconds.' % sleep_interval
time.sleep(sleep_interval)
batch_job = GetBatchJob(client, batch_job_id)
poll_attempt += 1
if 'downloadUrl' in batch_job:
url = batch_job['downloadUrl']['url']
print ('Batch Job with Id "%s", Status "%s", and DownloadUrl "%s" ready.'
% (batch_job['id'], batch_job['status'], url))
return url
print ('BatchJob with ID "%s" is being canceled because it was in a pending '
'state after polling %d times.' % (batch_job_id, max_poll_attempts))
CancelBatchJob(client, batch_job) | Retrieves the downloadUrl when the BatchJob is complete.
Args:
client: an instantiated AdWordsClient used to poll the BatchJob.
batch_job_id: a long identifying the BatchJob to be polled.
max_poll_attempts: an int defining the number of times the BatchJob will be
checked to determine whether it has completed.
Returns:
A str containing the downloadUrl of the completed BatchJob.
Raises:
Exception: If the BatchJob hasn't finished after the maximum poll attempts
have been made. |
def _endmsg(self, rd):
msg = ""
s = ""
if rd.hours > 0:
if rd.hours > 1:
s = "s"
msg += colors.bold(str(rd.hours)) + " hour" + s + " "
s = ""
if rd.minutes > 0:
if rd.minutes > 1:
s = "s"
msg += colors.bold(str(rd.minutes)) + " minute" + s + " "
milliseconds = int(rd.microseconds / 1000)
if milliseconds > 0:
msg += colors.bold(str(rd.seconds) + "." + str(milliseconds))
msg += " seconds"
return msg | Returns an end message with elapsed time |
def iterator(plugins, context):
test = pyblish.logic.registered_test()
state = {
"nextOrder": None,
"ordersWithError": set()
}
for plugin in plugins:
state["nextOrder"] = plugin.order
message = test(**state)
if message:
raise StopIteration("Stopped due to %s" % message)
instances = pyblish.api.instances_by_plugin(context, plugin)
if plugin.__instanceEnabled__:
for instance in instances:
yield plugin, instance
else:
yield plugin, None | An iterator for plug-in and instance pairs |
def expand_variable_dicts(
list_of_variable_dicts: 'List[Union[Dataset, OrderedDict]]',
) -> 'List[Mapping[Any, Variable]]':
from .dataarray import DataArray
from .dataset import Dataset
var_dicts = []
for variables in list_of_variable_dicts:
if isinstance(variables, Dataset):
var_dicts.append(variables.variables)
continue
sanitized_vars = OrderedDict()
for name, var in variables.items():
if isinstance(var, DataArray):
coords = var._coords.copy()
coords.pop(name, None)
var_dicts.append(coords)
var = as_variable(var, name=name)
sanitized_vars[name] = var
var_dicts.append(sanitized_vars)
return var_dicts | Given a list of dicts with xarray object values, expand the values.
Parameters
----------
list_of_variable_dicts : list of dict or Dataset objects
Each value for the mappings must be of the following types:
- an xarray.Variable
- a tuple `(dims, data[, attrs[, encoding]])` that can be converted in
an xarray.Variable
- or an xarray.DataArray
Returns
-------
A list of ordered dictionaries corresponding to inputs, or coordinates from
an input's values. The values of each ordered dictionary are all
xarray.Variable objects. |
def getFeedContent(self, feed, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, loadLimit, since, until) | Return items for a particular feed |
def _write_iodir(self, iodir=None):
if iodir is not None:
self.iodir = iodir
self.i2c.write_list(self.IODIR, self.iodir) | Write the specified byte value to the IODIR registor. If no value
specified the current buffered value will be written. |
def logger_init(level):
levellist = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
handler = logging.StreamHandler()
fmt = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
handler.setFormatter(logging.Formatter(fmt))
logger = logging.root
logger.addHandler(handler)
logger.setLevel(levellist[level]) | Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`. |
def agent_service_deregister(consul_url=None, token=None, serviceid=None):
ret = {}
data = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if not serviceid:
raise SaltInvocationError('Required argument "serviceid" is missing.')
function = 'agent/service/deregister/{0}'.format(serviceid)
res = _query(consul_url=consul_url,
function=function,
token=token,
method='PUT',
data=data)
if res['res']:
ret['res'] = True
ret['message'] = 'Service {0} removed from agent.'.format(serviceid)
else:
ret['res'] = False
ret['message'] = 'Unable to remove service {0}.'.format(serviceid)
return ret | Used to remove a service.
:param consul_url: The Consul server URL.
:param serviceid: A serviceid describing the service.
:return: Boolean and message indicating success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.agent_service_deregister serviceid='redis' |
def read_url(url):
logging.debug('reading {url} ...'.format(url=url))
token = os.environ.get("BOKEH_GITHUB_API_TOKEN")
headers = {}
if token:
headers['Authorization'] = 'token %s' % token
request = Request(url, headers=headers)
response = urlopen(request).read()
return json.loads(response.decode("UTF-8")) | Reads given URL as JSON and returns data as loaded python object. |
def apply_t0(self, hits):
if HAVE_NUMBA:
apply_t0_nb(
hits.time, hits.dom_id, hits.channel_id, self._lookup_tables
)
else:
n = len(hits)
cal = np.empty(n)
lookup = self._calib_by_dom_and_channel
for i in range(n):
calib = lookup[hits['dom_id'][i]][hits['channel_id'][i]]
cal[i] = calib[6]
hits.time += cal
return hits | Apply only t0s |
def ping_directories_handler(sender, **kwargs):
entry = kwargs['instance']
if entry.is_visible and settings.SAVE_PING_DIRECTORIES:
for directory in settings.PING_DIRECTORIES:
DirectoryPinger(directory, [entry]) | Ping directories when an entry is saved. |
def viable_source_types_for_generator (generator):
assert isinstance(generator, Generator)
if generator not in __viable_source_types_cache:
__vstg_cached_generators.append(generator)
__viable_source_types_cache[generator] = viable_source_types_for_generator_real (generator)
return __viable_source_types_cache[generator] | Caches the result of 'viable_source_types_for_generator'. |
def __PrintMessageCommentLines(self, message_type):
description = message_type.description or '%s message type.' % (
message_type.name)
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(description, width):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.message_types,
'Messages', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.fields,
'Fields', prefix='// ') | Print the description of this message. |
def script_post_save(model, os_path, contents_manager, **kwargs):
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script) | convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script` |
def get_option(self, option):
value = getattr(self, option, None)
if value is not None:
return value
return getattr(settings, "COUNTRIES_{0}".format(option.upper())) | Get a configuration option, trying the options attribute first and
falling back to a Django project setting. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.