code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _key_name(self):
if self._key is not None:
return self._key
return self.__class__.__name__.lower() | Return the key referring to this object
The default value is the lower case version of the class name
:rtype: str |
def aloha_to_html(html_source):
xml = aloha_to_etree(html_source)
return etree.tostring(xml, pretty_print=True) | Converts HTML5 from Aloha to a more structured HTML5 |
def unzip_file(filename):
if filename.endswith('bz2'):
bz2file = bz2.BZ2File(filename)
fdn, tmpfilepath = tempfile.mkstemp()
with closing(os.fdopen(fdn, 'wb')) as ofpt:
try:
ofpt.write(bz2file.read())
except IOError:
import traceback
traceback.print_exc()
LOGGER.info("Failed to read bzipped file %s", str(filename))
os.remove(tmpfilepath)
return None
return tmpfilepath
return None | Unzip the file if file is bzipped = ending with 'bz2 |
def list_vm_images_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/images',
'?api-version=', COMP_API])
return do_get_next(endpoint, access_token) | List VM images in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of a list of VM images. |
def get_attribute_at(config, target_path, key, default_value=None):
for target in target_path:
config = config[target]
return config[key] if key in config else default_value | Return attribute value at a given path
:param config:
:param target_path:
:param key:
:param default_value:
:return: |
def update_dcnm_partition_static_route(self, tenant_id, arg_dict):
ip_list = self.os_helper.get_subnet_nwk_excl(tenant_id,
arg_dict.get('excl_list'))
srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)
ret = self.dcnm_obj.update_partition_static_route(
arg_dict.get('tenant_name'), fw_const.SERV_PART_NAME, ip_list,
vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,
service_node_ip=srvc_node_ip)
if not ret:
LOG.error("Unable to update DCNM ext profile with static "
"route %s", arg_dict.get('router_id'))
self.delete_intf_router(tenant_id, arg_dict.get('tenant_name'),
arg_dict.get('router_id'))
return False
return True | Add static route in DCNM's partition.
This gets pushed to the relevant leaf switches. |
def get_frame(self):
metric_items = list(self.db.metrics.find({'run_name': self.model_config.run_name}).sort('epoch_idx'))
if len(metric_items) == 0:
return pd.DataFrame(columns=['run_name'])
else:
return pd.DataFrame(metric_items).drop(['_id', 'model_name'], axis=1).set_index('epoch_idx') | Get a dataframe of metrics from this storage |
def remove_interval_helper(self, interval, done, should_raise_error):
if self.center_hit(interval):
if not should_raise_error and interval not in self.s_center:
done.append(1)
return self
try:
self.s_center.remove(interval)
except:
self.print_structure()
raise KeyError(interval)
if self.s_center:
done.append(1)
return self
return self.prune()
else:
direction = self.hit_branch(interval)
if not self[direction]:
if should_raise_error:
raise ValueError
done.append(1)
return self
self[direction] = self[direction].remove_interval_helper(interval, done, should_raise_error)
if not done:
return self.rotate()
return self | Returns self after removing interval and balancing.
If interval doesn't exist, raise ValueError.
This method may set done to [1] to tell all callers that
rebalancing has completed.
See Eternally Confuzzled's jsw_remove_r function (lines 1-32)
in his AVL tree article for reference. |
def get(self, document=None, plugin=None):
if plugin is not None:
if document is None:
documents_list = {}
for key in self.documents.keys():
if self.documents[key].plugin == plugin:
documents_list[key] = self.documents[key]
return documents_list
else:
if document in self.documents.keys():
if self.documents[document].plugin == plugin:
return self.documents[document]
else:
return None
else:
return None
else:
if document is None:
return self.documents
else:
if document in self.documents.keys():
return self.documents[document]
else:
return None | Get one or more documents.
:param document: Name of the document
:type document: str
:param plugin: Plugin object, under which the document was registered
:type plugin: GwBasePattern |
def get_task(self, task_id):
try:
return self.get(task_id=task_id)
except self.model.DoesNotExist:
if self._last_id == task_id:
self.warn_if_repeatable_read()
self._last_id = task_id
return self.model(task_id=task_id) | Get task meta for task by ``task_id``.
:keyword exception_retry_count: How many times to retry by
transaction rollback on exception. This could theoretically
happen in a race condition if another worker is trying to
create the same task. The default is to retry once. |
def create(self):
self.db_attrs = self.consul.create_db(
self.instance_name,
self.instance_type,
self.admin_username,
self.admin_password,
db_name=self.db_name,
storage_size_gb=self.storage_size,
timeout_s=self.launch_timeout_s,
) | Creates a new database |
def show_notification(cls, channel_id, *args, **kwargs):
app = AndroidApplication.instance()
builder = Notification.Builder(app, channel_id)
builder.update(*args, **kwargs)
return builder.show() | Create and show a Notification. See `Notification.Builder.update`
for a list of accepted parameters. |
def get(dic, path, seps=PATH_SEPS, idx_reg=_JSNP_GET_ARRAY_IDX_REG):
items = [_jsnp_unescape(p) for p in _split_path(path, seps)]
if not items:
return (dic, '')
try:
if len(items) == 1:
return (dic[items[0]], '')
prnt = functools.reduce(operator.getitem, items[:-1], dic)
arr = anyconfig.utils.is_list_like(prnt) and idx_reg.match(items[-1])
return (prnt[int(items[-1])], '') if arr else (prnt[items[-1]], '')
except (TypeError, KeyError, IndexError) as exc:
return (None, str(exc)) | getter for nested dicts.
:param dic: a dict[-like] object
:param path: Path expression to point object wanted
:param seps: Separator char candidates
:return: A tuple of (result_object, error_message)
>>> d = {'a': {'b': {'c': 0, 'd': [1, 2]}}, '': 3}
>>> assert get(d, '/') == (3, '') # key becomes '' (empty string).
>>> assert get(d, "/a/b/c") == (0, '')
>>> sorted(get(d, "a.b")[0].items())
[('c', 0), ('d', [1, 2])]
>>> (get(d, "a.b.d"), get(d, "/a/b/d/1"))
(([1, 2], ''), (2, ''))
>>> get(d, "a.b.key_not_exist") # doctest: +ELLIPSIS
(None, "'...'")
>>> get(d, "/a/b/d/2")
(None, 'list index out of range')
>>> get(d, "/a/b/d/-") # doctest: +ELLIPSIS
(None, 'list indices must be integers...') |
def section_multiplane(self,
plane_origin,
plane_normal,
heights):
from .exchange.load import load_path
lines, transforms, faces = intersections.mesh_multiplane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
heights=heights)
paths = [None] * len(lines)
for index, L, T in zip(range(len(lines)),
lines,
transforms):
if len(L) > 0:
paths[index] = load_path(
L, metadata={'to_3D': T})
return paths | Return multiple parallel cross sections of the current
mesh in 2D.
Parameters
---------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3,) float
Point on the cross section plane
heights : (n,) float
Each section is offset by height along
the plane normal.
Returns
---------
paths : (n,) Path2D or None
2D cross sections at specified heights.
path.metadata['to_3D'] contains transform
to return 2D section back into 3D space. |
async def crypto_sign(wallet_handle: int,
signer_vk: str,
msg: bytes) -> bytes:
logger = logging.getLogger(__name__)
logger.debug("crypto_sign: >>> wallet_handle: %r, signer_vk: %r, msg: %r",
wallet_handle,
signer_vk,
msg)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(crypto_sign, "cb"):
logger.debug("crypto_sign: Creating callback")
crypto_sign.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
c_signer_vk = c_char_p(signer_vk.encode('utf-8'))
c_msg_len = c_uint32(len(msg))
signature = await do_call('indy_crypto_sign',
c_wallet_handle,
c_signer_vk,
msg,
c_msg_len,
crypto_sign.cb)
logger.debug("crypto_sign: <<< res: %r", signature)
return signature | Signs a message with a key.
Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey) for specific DID.
:param wallet_handle: wallet handler (created by open_wallet).
:param signer_vk: id (verkey) of my key. The key must be created by calling create_key or create_and_store_my_did
:param msg: a message to be signed
:return: a signature string |
def reboot(search, one=True, force=False):
return _action('reboot', search, one, force) | Reboot one or more vms
search : string
filter vms, see the execution module.
one : boolean
reboot only one vm
force : boolean
force reboot, faster but no graceful shutdown
.. note::
If the search parameter does not contain an equal (=) symbol it will be
assumed it will be tried as uuid, hostname, and alias.
CLI Example:
.. code-block:: bash
salt-run vmadm.reboot 91244bba-1146-e4ec-c07e-e825e0223aa9
salt-run vmadm.reboot search='alias=marije'
salt-run vmadm.reboot search='type=KVM' one=False |
def determine_deaths(self, event: Event):
effective_rate = self.mortality_rate(event.index)
effective_probability = 1 - np.exp(-effective_rate)
draw = self.randomness.get_draw(event.index)
affected_simulants = draw < effective_probability
self.population_view.update(pd.Series('dead', index=event.index[affected_simulants])) | Determines who dies each time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information. |
def get_mzmlfile_map(self):
cursor = self.get_cursor()
cursor.execute('SELECT mzmlfile_id, mzmlfilename FROM mzmlfiles')
return {fn: fnid for fnid, fn in cursor.fetchall()} | Returns dict of mzmlfilenames and their db ids |
def porosity(im):
r
im = sp.array(im, dtype=int)
Vp = sp.sum(im == 1)
Vs = sp.sum(im == 0)
e = Vp/(Vs + Vp)
return e | r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase.
All other values are ignored, so this can also return the relative
fraction of a phase of interest.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void space (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
See Also
--------
phase_fraction
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity. |
def resize(self, lines=None, columns=None):
lines = lines or self.lines
columns = columns or self.columns
if lines == self.lines and columns == self.columns:
return
self.dirty.update(range(lines))
if lines < self.lines:
self.save_cursor()
self.cursor_position(0, 0)
self.delete_lines(self.lines - lines)
self.restore_cursor()
if columns < self.columns:
for line in self.buffer.values():
for x in range(columns, self.columns):
line.pop(x, None)
self.lines, self.columns = lines, columns
self.set_margins() | Resize the screen to the given size.
If the requested screen size has more lines than the existing
screen, lines will be added at the bottom. If the requested
size has less lines than the existing screen lines will be
clipped at the top of the screen. Similarly, if the existing
screen has less columns than the requested screen, columns will
be added at the right, and if it has more -- columns will be
clipped at the right.
:param int lines: number of lines in the new screen.
:param int columns: number of columns in the new screen.
.. versionchanged:: 0.7.0
If the requested screen size is identical to the current screen
size, the method does nothing. |
def get_touch_dict(self, ind=None, out=bool):
if self.config is None:
msg = "Config must be set in order to get touch dict !"
raise Exception(msg)
dElt = {}
ind = self._check_indch(ind, out=bool)
for ss in self.lStruct_computeInOut:
kn = "%s_%s"%(ss.__class__.__name__, ss.Id.Name)
indtouch = self.select(touch=kn, out=bool)
if np.any(indtouch):
indok = indtouch & ind
indout = indtouch & ~ind
if np.any(indok) or np.any(indout):
if out == int:
indok = indok.nonzero()[0]
indout = indout.nonzero()[0]
dElt[kn] = {'indok':indok, 'indout':indout,
'col':ss.get_color()}
return dElt | Get a dictionnary of Cls_Name struct with indices of Rays touching
Only includes Struct object with compute = True
(as returned by self.lStruct__computeInOut_computeInOut)
Also return the associated colors
If in is not None, the indices for each Struct are split between:
- indok : rays touching Struct and in ind
- indout: rays touching Struct but not in ind |
def send_mail(subject, message, from_email, recipient_emails, files=None,
html=False, reply_to=None, bcc=None, cc=None, files_manually=None):
import django.core.mail
try:
logging.debug('Sending mail to: {0}'.format(', '.join(r for r in recipient_emails)))
logging.debug('Message: {0}'.format(message))
email = django.core.mail.EmailMessage(subject, message, from_email, recipient_emails,
bcc, cc=cc)
if html:
email.content_subtype = "html"
if files:
for file in files:
email.attach_file(file)
if files_manually:
for filename, content, mimetype in files_manually:
email.attach(filename, content, mimetype)
if reply_to:
email.extra_headers = {'Reply-To': reply_to}
email.send()
except Exception as e:
logging.error('Error sending message [{0}] from {1} to {2} {3}'.format(
subject, from_email, recipient_emails, e)) | Sends email with advanced optional parameters
To attach non-file content (e.g. content not saved on disk), use
files_manually parameter and provide list of 3 element tuples, e.g.
[('design.png', img_data, 'image/png'),] which will be passed to
email.attach(). |
def run_plugins(self):
for obj in self.loader.objects:
self.output_dict[obj.output_options['name']] = None
self.thread_manager.add_thread(obj.main, obj.options['interval']) | Creates a thread for each plugin and lets the thread_manager handle it. |
def from_record(cls, record):
if not isinstance(record, pymarc.Record):
raise TypeError('record must be of type pymarc.Record')
record.__class__ = Record
return record | Factory methods to create Record from pymarc.Record object. |
def create_chapter_from_file(self, file_name, url=None, title=None):
with codecs.open(file_name, 'r', encoding='utf-8') as f:
content_string = f.read()
return self.create_chapter_from_string(content_string, url, title) | Creates a Chapter object from an html or xhtml file. Sanitizes the
file's content using the clean_function method, and saves
it as the content of the created chapter.
Args:
file_name (string): The file_name containing the html or xhtml
content of the created Chapter
url (Option[string]): A url to infer the title of the chapter from
title (Option[string]): The title of the created Chapter. By
default, this is None, in which case the title will try to be
inferred from the webpage at the url.
Returns:
Chapter: A chapter object whose content is the given file
and whose title is that provided or inferred from the url |
def run(self, ket: State) -> State:
qubits = self.qubits
indices = [ket.qubits.index(q) for q in qubits]
tensor = bk.tensormul(self.tensor, ket.tensor, indices)
return State(tensor, ket.qubits, ket.memory) | Apply the action of this gate upon a state |
def script_input(module_name):
if module_name not in registered_modules:
return page_not_found(module_name)
form = registered_modules[module_name].WebAPI()
return render_template('script_index.html',
form=form,
scripts=registered_modules,
module_name=module_name) | Render a module's input page. Forms are created based on objects in
the module's WebAPI class. |
def get_resources_of_type(network_id, type_id, **kwargs):
nodes_with_type = db.DBSession.query(Node).join(ResourceType).filter(Node.network_id==network_id, ResourceType.type_id==type_id).all()
links_with_type = db.DBSession.query(Link).join(ResourceType).filter(Link.network_id==network_id, ResourceType.type_id==type_id).all()
groups_with_type = db.DBSession.query(ResourceGroup).join(ResourceType).filter(ResourceGroup.network_id==network_id, ResourceType.type_id==type_id).all()
return nodes_with_type, links_with_type, groups_with_type | Return the Nodes, Links and ResourceGroups which
have the type specified. |
def set_json(self, obj, status=HttpStatusCodes.HTTP_200):
obj = json.dumps(obj, sort_keys=True, default=lambda x: str(x))
self.set_status(status)
self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json')
self.set_content(obj) | Helper method to set a JSON response.
Args:
obj (:obj:`object`): JSON serializable object
status (:obj:`str`, optional): Status code of the response |
def add_janitor(self, janitor):
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor in self.config.janitors:
return
self.config.janitors.append(janitor)
self.__set_config_value("janitors", self.config.janitors) | Add janitor to the room |
def calc_resp(password_hash, server_challenge):
password_hash += b'\0' * (21 - len(password_hash))
res = b''
dobj = des.DES(password_hash[0:7])
res = res + dobj.encrypt(server_challenge[0:8])
dobj = des.DES(password_hash[7:14])
res = res + dobj.encrypt(server_challenge[0:8])
dobj = des.DES(password_hash[14:21])
res = res + dobj.encrypt(server_challenge[0:8])
return res | calc_resp generates the LM response given a 16-byte password hash and the
challenge from the Type-2 message.
@param password_hash
16-byte password hash
@param server_challenge
8-byte challenge from Type-2 message
returns
24-byte buffer to contain the LM response upon return |
def dIbr_dV(Yf, Yt, V):
Vnorm = div(V, abs(V))
diagV = spdiag(V)
diagVnorm = spdiag(Vnorm)
dIf_dVa = Yf * 1j * diagV
dIf_dVm = Yf * diagVnorm
dIt_dVa = Yt * 1j * diagV
dIt_dVm = Yt * diagVnorm
If = Yf * V
It = Yt * V
return dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It | Computes partial derivatives of branch currents w.r.t. voltage.
Ray Zimmerman, "dIbr_dV.m", MATPOWER, version 4.0b1,
PSERC (Cornell), http://www.pserc.cornell.edu/matpower/ |
def namedlist(objname, fieldnames):
'like namedtuple but editable'
class NamedListTemplate(list):
__name__ = objname
_fields = fieldnames
def __init__(self, L=None, **kwargs):
if L is None:
L = [None]*len(fieldnames)
super().__init__(L)
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def length(cls):
return len(cls._fields)
for i, attrname in enumerate(fieldnames):
setattr(NamedListTemplate, attrname, property(operator.itemgetter(i), itemsetter(i)))
return NamedListTemplate | like namedtuple but editable |
def echo_verbose_results(data, no_color):
click.echo()
click.echo(
'\n'.join(
'{}: {}'.format(key, val) for key, val in data['info'].items()
)
)
click.echo()
for test in data['tests']:
if test['outcome'] == 'passed':
fg = 'green'
elif test['outcome'] == 'skipped':
fg = 'yellow'
else:
fg = 'red'
name = parse_test_name(test['name'])
echo_style(
'{} {}'.format(name, test['outcome'].upper()),
no_color,
fg=fg
) | Print list of tests and result of each test. |
def log(self, level, prefix = ''):
logging.log(level, "%sin interface: %s", prefix, self.in_interface)
logging.log(level, "%sout interface: %s", prefix, self.out_interface)
logging.log(level, "%ssource: %s", prefix, self.source)
logging.log(level, "%sdestination: %s", prefix, self.destination)
logging.log(level, "%smatches:", prefix)
for match in self.matches:
match.log(level, prefix + ' ')
if self.jump:
logging.log(level, "%sjump:", prefix)
self.jump.log(level, prefix + ' ') | Writes the contents of the Rule to the logging system. |
def _write_standard(self, message, extra):
level = extra['level']
if self.include_extra:
del extra['timestamp']
del extra['level']
del extra['logger']
if len(extra) > 0:
message += " " + str(extra)
if level == 'INFO':
self.logger.info(message)
elif level == 'DEBUG':
self.logger.debug(message)
elif level == 'WARNING':
self.logger.warning(message)
elif level == 'ERROR':
self.logger.error(message)
elif level == 'CRITICAL':
self.logger.critical(message)
else:
self.logger.debug(message) | Writes a standard log statement
@param message: The message to write
@param extra: The object to pull defaults from |
def get_permissions(self, namespace, explicit=False):
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
keys = namespace.keys
p, _ = self._check(keys, self.index, explicit=explicit)
return p | Returns the permissions level for the specified namespace
Arguments:
namespace -- permissioning namespace (str)
explicit -- require explicitly set permissions to the provided namespace
Returns:
int -- permissioning flags |
def linear_rref(A, b, Matrix=None, S=None):
if Matrix is None:
from sympy import Matrix
if S is None:
from sympy import S
mat_rows = [_map2l(S, list(row) + [v]) for row, v in zip(A, b)]
aug = Matrix(mat_rows)
raug, pivot = aug.rref()
nindep = len(pivot)
return raug[:nindep, :-1], raug[:nindep, -1] | Transform a linear system to reduced row-echelon form
Transforms both the matrix and right-hand side of a linear
system of equations to reduced row echelon form
Parameters
----------
A : Matrix-like
Iterable of rows.
b : iterable
Returns
-------
A', b' - transformed versions |
def array(self) -> numpy.ndarray:
array = numpy.full(self.shape, fillvalue, dtype=float)
for idx, subarray in enumerate(self.arrays.values()):
array[self.get_timeplaceslice(idx)] = subarray
return array | The aggregated data of all logged |IOSequence| objects contained
in one single |numpy.ndarray| object.
The documentation on |NetCDFVariableAgg.shape| explains how
|NetCDFVariableAgg.array| is structured. This first example
confirms that, under default configuration (`timeaxis=1`),
the first axis corresponds to the location, while the second
one corresponds to time:
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableAgg
>>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.average_series())
>>> ncvar.array
array([[ 12. , 13. , 14. , 15. ],
[ 16.5, 18.5, 20.5, 22.5],
[ 25. , 28. , 31. , 34. ]])
When using the first axis as the "timeaxis", the resulting
|NetCDFVariableAgg.array| is the transposed:
>>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=0)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.average_series())
>>> ncvar.array
array([[ 12. , 16.5, 25. ],
[ 13. , 18.5, 28. ],
[ 14. , 20.5, 31. ],
[ 15. , 22.5, 34. ]]) |
def plotter_cls(self):
ret = self._plotter_cls
if ret is None:
self._logger.debug('importing %s', self.module)
mod = import_module(self.module)
plotter = self.plotter_name
if plotter not in vars(mod):
raise ImportError("Module %r does not have a %r plotter!" % (
mod, plotter))
ret = self._plotter_cls = getattr(mod, plotter)
_versions.update(get_versions(key=lambda s: s == self._plugin))
return ret | The plotter class |
def format_hexadecimal_field(spec, prec, number, locale):
if number < 0:
number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1
format_ = u'0%d%s' % (int(prec or 0), spec)
return format(number, format_) | Formats a hexadeciaml field. |
def view_global_hcurves(token, dstore):
oq = dstore['oqparam']
nsites = len(dstore['sitecol'])
rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
mean = getters.PmapGetter(dstore, rlzs_assoc).get_mean()
array = calc.convert_to_array(mean, nsites, oq.imtls)
res = numpy.zeros(1, array.dtype)
for name in array.dtype.names:
res[name] = array[name].mean()
return rst_table(res) | Display the global hazard curves for the calculation. They are
used for debugging purposes when comparing the results of two
calculations. They are the mean over the sites of the mean hazard
curves. |
def list_port_fwd(zone, permanent=True):
ret = []
cmd = '--zone={0} --list-forward-ports'.format(zone)
if permanent:
cmd += ' --permanent'
for i in __firewall_cmd(cmd).splitlines():
(src, proto, dest, addr) = i.split(':')
ret.append(
{'Source port': src.split('=')[1],
'Protocol': proto.split('=')[1],
'Destination port': dest.split('=')[1],
'Destination address': addr.split('=')[1]}
)
return ret | List port forwarding
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.list_port_fwd public |
def remove_qc_reports(portal):
logger.info("Removing Reports > Quality Control ...")
ti = portal.reports.getTypeInfo()
actions = map(lambda action: action.id, ti._actions)
for index, action in enumerate(actions, start=0):
if action == 'qualitycontrol':
ti.deleteActions([index])
break
logger.info("Removing Reports > Quality Control [DONE]") | Removes the action Quality Control from Reports |
def iter_history(
self,
chat_id: Union[int, str],
limit: int = 0,
offset: int = 0,
offset_id: int = 0,
offset_date: int = 0,
reverse: bool = False
) -> Generator["pyrogram.Message", None, None]:
offset_id = offset_id or (1 if reverse else 0)
current = 0
total = limit or (1 << 31) - 1
limit = min(100, total)
while True:
messages = self.get_history(
chat_id=chat_id,
limit=limit,
offset=offset,
offset_id=offset_id,
offset_date=offset_date,
reverse=reverse
).messages
if not messages:
return
offset_id = messages[-1].message_id + (1 if reverse else 0)
for message in messages:
yield message
current += 1
if current >= total:
return | Use this method to iterate through a chat history sequentially.
This convenience method does the same as repeatedly calling :meth:`get_history` in a loop, thus saving you from
the hassle of setting up boilerplate code. It is useful for getting the whole chat history with a single call.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
limit (``int``, *optional*):
Limits the number of messages to be retrieved.
By default, no limit is applied and all messages are returned.
offset (``int``, *optional*):
Sequential number of the first message to be returned..
Negative values are also accepted and become useful in case you set offset_id or offset_date.
offset_id (``int``, *optional*):
Identifier of the first message to be returned.
offset_date (``int``, *optional*):
Pass a date in Unix time as offset to retrieve only older messages starting from that date.
reverse (``bool``, *optional*):
Pass True to retrieve the messages in reversed order (from older to most recent).
Returns:
A generator yielding :obj:`Message <pyrogram.Message>` objects.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
def close(self):
if self._publish:
self._events.put((self._listener.publish_server_closed,
(self._description.address, self._topology_id)))
self._monitor.close()
self._pool.reset() | Clear the connection pool and stop the monitor.
Reconnect with open(). |
def invert_delete_row2(self, key, value):
self.rows = filter(lambda x: x.get(key) == x.get(value), self.rows) | Invert of type two where there are two columns given |
def remove(self, nodes):
nodes = nodes if isinstance(nodes, list) else [nodes]
for node in nodes:
k = self.id(node)
self.edges = list(filter(lambda e: e[0] != k and e[1] != k, self.edges))
del self.nodes[k] | Remove a node and its edges. |
def handle_error(self, exp):
payload = {
"message": "Invalid or incomplete data provided.",
"errors": exp.errors
}
self.endpoint.return_error(self.error_status, payload=payload) | Called if a Mapper returns MappingInvalid. Should handle the error
and return it in the appropriate format, can be overridden in order
to change the error format.
:param exp: MappingInvalid exception raised |
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None):
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation='PUT')
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error("Failed to update %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. |
def is_cozy_registered():
req = curl_couchdb('/cozy/_design/user/_view/all')
users = req.json()['rows']
if len(users) > 0:
return True
else:
return False | Check if a Cozy is registered |
def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs):
address = _build_uri(address, eventhub)
return cls(address, sas_token=sas_token, **kwargs) | Create an EventHubClient from an existing auth token or token generator.
:param address: The Event Hub address URL
:type address: str
:param sas_token: A SAS token or function that returns a SAS token. If a function is supplied,
it will be used to retrieve subsequent tokens in the case of token expiry. The function should
take no arguments.
:type sas_token: str or callable
:param eventhub: The name of the EventHub, if not already included in the address URL.
:type eventhub: str
:param debug: Whether to output network trace logs to the logger. Default
is `False`.
:type debug: bool
:param http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present: 'username', 'password'.
:type http_proxy: dict[str, Any]
:param auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:type auth_timeout: int |
def escape_string(value):
res = StringIO()
res.write('"')
for c in value:
if c in CHAR_TO_ESCAPE:
res.write(f'\\{CHAR_TO_ESCAPE[c]}')
elif c.isprintable():
res.write(c)
elif ord(c) < 0x100:
res.write(f'\\x{ord(c):02x}')
elif ord(c) < 0x10000:
res.write(f'\\u{ord(c):04x}')
else:
res.write(f'\\U{ord(c):06x}')
res.write('"')
return res.getvalue() | Converts a string to its S-expression representation, adding quotes
and escaping funny characters. |
def to_logodds_scoring_matrix( self, background=None, correction=DEFAULT_CORRECTION ):
alphabet_size = len( self.alphabet )
if background is None:
background = ones( alphabet_size, float32 ) / alphabet_size
totals = numpy.sum( self.values, 1 )[:,newaxis]
values = log2( maximum( self.values, correction ) ) \
- log2( totals ) \
- log2( maximum( background, correction ) )
return ScoringMatrix.create_from_other( self, values.astype( float32 ) ) | Create a standard logodds scoring matrix. |
def get_page_full(self, page_id):
try:
result = self._request('/getpagefull/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return [] | Get full page info and full html code |
def CheckHash(self, responses):
index = responses.request_data["index"]
if index not in self.state.pending_files:
return
file_tracker = self.state.pending_files[index]
hash_response = responses.First()
if not responses.success or not hash_response:
urn = file_tracker["stat_entry"].pathspec.AFF4Path(self.client_urn)
self.Log("Failed to read %s: %s", urn, responses.status)
self._FileFetchFailed(index, responses.request.request.name)
return
file_tracker.setdefault("hash_list", []).append(hash_response)
self.state.blob_hashes_pending += 1
if self.state.blob_hashes_pending > self.MIN_CALL_TO_FILE_STORE:
self.FetchFileContent() | Adds the block hash to the file tracker responsible for this vfs URN. |
def surface2image(surface):
global g_lock
with g_lock:
img_io = io.BytesIO()
surface.write_to_png(img_io)
img_io.seek(0)
img = PIL.Image.open(img_io)
img.load()
if "A" not in img.getbands():
return img
img_no_alpha = PIL.Image.new("RGB", img.size, (255, 255, 255))
img_no_alpha.paste(img, mask=img.split()[3])
return img_no_alpha | Convert a cairo surface into a PIL image |
def _gevent_patch():
try:
assert gevent
assert grequests
except NameError:
logger.warn('gevent not exist, fallback to multiprocess...')
return MULTITHREAD
else:
monkey.patch_all()
return GEVENT | Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int |
def get_shape_view(self, shape_obj, avoid_oob=True):
x1, y1, x2, y2 = [int(np.round(n)) for n in shape_obj.get_llur()]
if avoid_oob:
wd, ht = self.get_size()
x1, x2 = max(0, x1), min(x2, wd - 1)
y1, y2 = max(0, y1), min(y2, ht - 1)
yi = np.mgrid[y1:y2 + 1].reshape(-1, 1)
xi = np.mgrid[x1:x2 + 1].reshape(1, -1)
pts = np.asarray((xi, yi)).T
contains = shape_obj.contains_pts(pts)
view = np.s_[y1:y2 + 1, x1:x2 + 1]
return (view, contains) | Calculate a bounding box in the data enclosing `shape_obj` and
return a view that accesses it and a mask that is True only for
pixels enclosed in the region.
If `avoid_oob` is True (default) then the bounding box is clipped
to avoid coordinates outside of the actual data. |
def active_devices(self, active_devices):
if active_devices is None:
raise ValueError("Invalid value for `active_devices`, must not be `None`")
if active_devices is not None and active_devices < 0:
raise ValueError("Invalid value for `active_devices`, must be a value greater than or equal to `0`")
self._active_devices = active_devices | Sets the active_devices of this ReportBillingData.
:param active_devices: The active_devices of this ReportBillingData.
:type: int |
def pop_momentum_by_name(self, name):
momentum = self.get_momentum_by_name(name)
self.remove_momentum(momentum)
return momentum | Removes and returns a momentum by the given name.
:param name: the momentum name.
:returns: a momentum removed.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`. |
def get_annotation(cls, fn):
while fn is not None:
if hasattr(fn, '_schema_annotation'):
return fn._schema_annotation
fn = getattr(fn, 'im_func', fn)
closure = getattr(fn, '__closure__', None)
fn = closure[0].cell_contents if closure is not None else None
return None | Find the _schema_annotation attribute for the given function.
This will descend through decorators until it finds something that has
the attribute. If it doesn't find it anywhere, it will return None.
:param func fn: Find the attribute on this function.
:returns: an instance of
:class:`~doctor.resource.ResourceSchemaAnnotation` or
None. |
def dirsize_get(l_filesWithoutPath, **kwargs):
str_path = ""
for k,v in kwargs.items():
if k == 'path': str_path = v
d_ret = {}
l_size = []
size = 0
for f in l_filesWithoutPath:
str_f = '%s/%s' % (str_path, f)
if not os.path.islink(str_f):
try:
size += os.path.getsize(str_f)
except:
pass
str_size = pftree.sizeof_fmt(size)
return {
'status': True,
'diskUsage_raw': size,
'diskUsage_human': str_size
} | Sample callback that determines a directory size. |
def get_hits_in_events(hits_array, events, assume_sorted=True, condition=None):
logging.debug("Calculate hits that exists in the given %d events." % len(events))
if assume_sorted:
events, _ = reduce_sorted_to_intersect(events, hits_array['event_number'])
if events.shape[0] == 0:
return hits_array[0:0]
try:
if assume_sorted:
selection = analysis_utils.in1d_events(hits_array['event_number'], events)
else:
logging.warning('Events are usually sorted. Are you sure you want this?')
selection = np.in1d(hits_array['event_number'], events)
if condition is None:
hits_in_events = hits_array[selection]
else:
for variable in set(re.findall(r'[a-zA-Z_]+', condition)):
exec(variable + ' = hits_array[\'' + variable + '\']')
hits_in_events = hits_array[ne.evaluate(condition + ' & selection')]
except MemoryError:
logging.error('There are too many hits to do in RAM operations. Consider decreasing chunk size and use the write_hits_in_events function instead.')
raise MemoryError
return hits_in_events | Selects the hits that occurred in events and optional selection criterion.
If a event range can be defined use the get_data_in_event_range function. It is much faster.
Parameters
----------
hits_array : numpy.array
events : array
assume_sorted : bool
Is true if the events to select are sorted from low to high value. Increases speed by 35%.
condition : string
A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken.
Returns
-------
numpy.array
hit array with the hits in events. |
def _create_body(self, name, label=None, cidr=None):
label = label or name
body = {"network": {
"label": label,
"cidr": cidr,
}}
return body | Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute. |
def validate_regexp(ctx, param, value):
if value:
try:
value = re.compile(value)
except ValueError:
raise click.BadParameter('invalid regular expression.')
return value | Validate and compile regular expression. |
def _setup_output(self, path, force):
if os.path.isdir(path) or os.path.isfile(path):
if force:
logging.warn("Deleting previous file/directory '%s'" % path)
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
raise Exception("Cowardly refusing to overwrite already existing path at %s" % path) | Clear the way for an output to be placed at path |
def _walk(path, follow_links=False, maximum_depth=None):
root_level = path.rstrip(os.path.sep).count(os.path.sep)
for root, dirs, files in os.walk(path, followlinks=follow_links):
yield root, dirs, files
if maximum_depth is None:
continue
if root_level + maximum_depth <= root.count(os.path.sep):
del dirs[:] | A modified os.walk with support for maximum traversal depth. |
def wrap_topgames(cls, response):
games = []
json = response.json()
topjsons = json['top']
for t in topjsons:
g = cls.wrap_json(json=t['game'],
viewers=t['viewers'],
channels=t['channels'])
games.append(g)
return games | Wrap the response from quering the top games into instances
and return them
:param response: The response for quering the top games
:type response: :class:`requests.Response`
:returns: the new game instances
:rtype: :class:`list` of :class:`Game`
:raises: None |
def group_by_month_per_hour(self):
data_by_month_per_hour = OrderedDict()
for m in xrange(1, 13):
for h in xrange(0, 24):
data_by_month_per_hour[(m, h)] = []
for v, dt in zip(self.values, self.datetimes):
data_by_month_per_hour[(dt.month, dt.hour)].append(v)
return data_by_month_per_hour | Return a dictionary of this collection's values grouped by each month per hour.
Key values are tuples of 2 integers:
The first represents the month of the year between 1-12.
The first represents the hour of the day between 0-24.
(eg. (12, 23) for December at 11 PM) |
def __safe_handler_callback(self, handler, method_name, *args, **kwargs):
if handler is None or method_name is None:
return None
only_boolean = kwargs.pop("only_boolean", False)
none_as_true = kwargs.pop("none_as_true", False)
try:
method = getattr(handler, method_name)
except AttributeError:
result = None
else:
try:
result = method(*args, **kwargs)
except Exception as ex:
result = None
self._logger.exception(
"Error calling handler '%s': %s", handler, ex
)
if result is None and none_as_true:
result = True
if only_boolean:
return bool(result)
return result | Calls the given method with the given arguments in the given handler.
Logs exceptions, but doesn't propagate them.
Special arguments can be given in kwargs:
* 'none_as_true': If set to True and the method returned None or
doesn't exist, the result is considered as True.
If set to False, None result is kept as is.
Default is False.
* 'only_boolean': If True, the result can only be True or False, else
the result is the value returned by the method.
Default is False.
:param handler: The handler to call
:param method_name: The name of the method to call
:param args: List of arguments for the method to call
:param kwargs: Dictionary of arguments for the method to call and to
control the call
:return: The method result, or None on error |
def load_rabit_checkpoint(self):
version = ctypes.c_int()
_check_call(_LIB.XGBoosterLoadRabitCheckpoint(
self.handle, ctypes.byref(version)))
return version.value | Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model. |
def _group_by(data, criteria):
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res | Group objects in data using a function or a key |
def enabled(name, root=None, **kwargs):
if __salt__['cmd.retcode'](_systemctl_cmd('is-enabled', name, root=root),
python_shell=False,
ignore_retcode=True) == 0:
return True
elif '@' in name:
local_config_path = _root(LOCAL_CONFIG_PATH, '/')
cmd = ['find', local_config_path, '-name', name,
'-type', 'l', '-print', '-quit']
if bool(__salt__['cmd.run'](cmd, python_shell=False)):
return True
elif name in _get_sysv_services(root):
return _sysv_enabled(name, root)
return False | Return if the named service is enabled to start on boot
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name> |
def is_list(node):
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == u"["
and node.children[-1].value == u"]") | Does the node represent a list literal? |
def list2html(lst):
txt = '<TABLE width=100% border=0>'
for l in lst:
txt += '<TR>\n'
if type(l) is str:
txt+= '<TD>' + l + '</TD>\n'
elif type(l) is list:
txt+= '<TD>'
for i in l:
txt += i + ', '
txt+= '</TD>'
else:
txt+= '<TD>' + str(l) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt | convert a list to html using table formatting |
def _permute_aux_specs(self):
calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy()
calc_aux_mapping[_OBJ_LIB_STR] = None
[calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES]
specs = self._get_aux_specs()
for suite_name, calc_name in calc_aux_mapping.items():
specs[calc_name] = specs.pop(suite_name)
return _permuted_dicts_of_specs(specs) | Generate all permutations of the non-core specifications. |
def npar(self):
self.control_data.npar = self.parameter_data.shape[0]
return self.control_data.npar | get number of parameters
Returns
-------
npar : int
the number of parameters |
def _NTU_from_P_solver(P1, R1, NTU_min, NTU_max, function, **kwargs):
P1_max = _NTU_from_P_objective(NTU_max, R1, 0, function, **kwargs)
P1_min = _NTU_from_P_objective(NTU_min, R1, 0, function, **kwargs)
if P1 > P1_max:
raise ValueError('No solution possible gives such a high P1; maximum P1=%f at NTU1=%f' %(P1_max, NTU_max))
if P1 < P1_min:
raise ValueError('No solution possible gives such a low P1; minimum P1=%f at NTU1=%f' %(P1_min, NTU_min))
to_solve = lambda NTU1: _NTU_from_P_objective(NTU1, R1, P1, function, **kwargs)
return ridder(to_solve, NTU_min, NTU_max) | Private function to solve the P-NTU method backwards, given the
function to use, the upper and lower NTU bounds for consideration,
and the desired P1 and R1 values. |
def ensure_str(data: Union[str, bytes]) -> str:
if isinstance(data, bytes):
return str(data, 'utf-8')
return data | Convert data in str if data are bytes
:param data: Data
:rtype str: |
def _build_prior(self, unconstrained_tensor, constrained_tensor):
if not misc.is_tensor(unconstrained_tensor):
raise GPflowError("Unconstrained input must be a tensor.")
if not misc.is_tensor(constrained_tensor):
raise GPflowError("Constrained input must be a tensor.")
prior_name = 'prior'
if self.prior is None:
return tf.constant(0.0, settings.float_type, name=prior_name)
log_jacobian = self.transform.log_jacobian_tensor(unconstrained_tensor)
logp_var = self.prior.logp(constrained_tensor)
return tf.squeeze(tf.add(logp_var, log_jacobian, name=prior_name)) | Build a tensorflow representation of the prior density.
The log Jacobian is included. |
def create(self, enable_turn=values.unset, type=values.unset,
unique_name=values.unset, status_callback=values.unset,
status_callback_method=values.unset, max_participants=values.unset,
record_participants_on_connect=values.unset,
video_codecs=values.unset, media_region=values.unset):
data = values.of({
'EnableTurn': enable_turn,
'Type': type,
'UniqueName': unique_name,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'MaxParticipants': max_participants,
'RecordParticipantsOnConnect': record_participants_on_connect,
'VideoCodecs': serialize.map(video_codecs, lambda e: e),
'MediaRegion': media_region,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return RoomInstance(self._version, payload, ) | Create a new RoomInstance
:param bool enable_turn: Use Twilio Network Traversal for TURN service.
:param RoomInstance.RoomType type: Type of room, either peer-to-peer, group-small or group.
:param unicode unique_name: Name of the Room.
:param unicode status_callback: A URL that Twilio sends asynchronous webhook requests to on every room event.
:param unicode status_callback_method: HTTP method Twilio should use when requesting the above URL.
:param unicode max_participants: Maximum number of Participants in the Room.
:param bool record_participants_on_connect: Start Participant recording when connected.
:param RoomInstance.VideoCodec video_codecs: An array of video codecs supported when publishing a Track in the Room.
:param unicode media_region: Region for the media server in Group Rooms.
:returns: Newly created RoomInstance
:rtype: twilio.rest.video.v1.room.RoomInstance |
def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs):
part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % (
quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location))
if kwargs:
part += urlencode(kwargs)
return self._server.query(part, method=self._server._session.post) | Simplified add for the most common options.
Parameters:
name (str): Name of the library
agent (str): Example com.plexapp.agents.imdb
type (str): movie, show, # check me
location (str): /path/to/files
language (str): Two letter language fx en
kwargs (dict): Advanced options should be passed as a dict. where the id is the key.
**Photo Preferences**
* **agent** (str): com.plexapp.agents.none
* **enableAutoPhotoTags** (bool): Tag photos. Default value false.
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Photo Scanner
**Movie Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Movie Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source, Default value 0 Possible options:
0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador,
16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland,
22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands,
29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal,
35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa,
40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom,
46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Movie Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 Possible
options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada,
9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain,
42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay,
49:Venezuela.
**Show Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first.
* **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Series Scanner
**TheTVDB Show Options** (com.plexapp.agents.thetvdb)
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
**TheMovieDB Show Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 options
0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile,
10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa,
41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States,
48:Uruguay, 49:Venezuela.
**Other Video Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Other Video Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source Default value 0 Possible options:
0:Rotten Tomatoes,1:IMDb,2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France,
17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica,
24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua,
31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico,
37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad,
45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default
value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize,
6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic,
13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany,
19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica,
25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand,
31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore,
40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad,
46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. |
def update(self, response_headers):
if "x-ratelimit-remaining" not in response_headers:
if self.remaining is not None:
self.remaining -= 1
self.used += 1
return
now = time.time()
prev_remaining = self.remaining
seconds_to_reset = int(response_headers["x-ratelimit-reset"])
self.remaining = float(response_headers["x-ratelimit-remaining"])
self.used = int(response_headers["x-ratelimit-used"])
self.reset_timestamp = now + seconds_to_reset
if self.remaining <= 0:
self.next_request_timestamp = self.reset_timestamp
return
if prev_remaining is not None and prev_remaining > self.remaining:
estimated_clients = prev_remaining - self.remaining
else:
estimated_clients = 1.0
self.next_request_timestamp = min(
self.reset_timestamp,
now + (estimated_clients * seconds_to_reset / self.remaining),
) | Update the state of the rate limiter based on the response headers.
This method should only be called following a HTTP request to reddit.
Response headers that do not contain x-ratelimit fields will be treated
as a single request. This behavior is to error on the safe-side as such
responses should trigger exceptions that indicate invalid behavior. |
def _remove_none_values(dictionary):
return list(map(dictionary.pop,
[i for i in dictionary if dictionary[i] is None])) | Remove dictionary keys whose value is None |
def get_video_info_for_course_and_profiles(course_id, profiles):
course_id = six.text_type(course_id)
try:
encoded_videos = EncodedVideo.objects.filter(
profile__profile_name__in=profiles,
video__courses__course_id=course_id
).select_related()
except Exception:
error_message = u"Could not get encoded videos for course: {0}".format(course_id)
logger.exception(error_message)
raise ValInternalError(error_message)
return_dict = {}
for enc_vid in encoded_videos:
return_dict.setdefault(enc_vid.video.edx_video_id, {}).update(
{
"duration": enc_vid.video.duration,
}
)
return_dict[enc_vid.video.edx_video_id].setdefault("profiles", {}).update(
{enc_vid.profile.profile_name: {
"url": enc_vid.url,
"file_size": enc_vid.file_size,
}}
)
return return_dict | Returns a dict of edx_video_ids with a dict of requested profiles.
Args:
course_id (str): id of the course
profiles (list): list of profile_names
Returns:
(dict): Returns all the profiles attached to a specific
edx_video_id
{
edx_video_id: {
'duration': length of the video in seconds,
'profiles': {
profile_name: {
'url': url of the encoding
'file_size': size of the file in bytes
},
}
},
}
Example:
Given two videos with two profiles each in course_id 'test_course':
{
u'edx_video_id_1': {
u'duration: 1111,
u'profiles': {
u'mobile': {
'url': u'http: //www.example.com/meow',
'file_size': 2222
},
u'desktop': {
'url': u'http: //www.example.com/woof',
'file_size': 4444
}
}
},
u'edx_video_id_2': {
u'duration: 2222,
u'profiles': {
u'mobile': {
'url': u'http: //www.example.com/roar',
'file_size': 6666
},
u'desktop': {
'url': u'http: //www.example.com/bzzz',
'file_size': 8888
}
}
}
} |
def safe_filename(self, part):
return safe_filename(
part,
os_type=self._os_type, no_control=self._no_control,
ascii_only=self._ascii_only, case=self._case,
max_length=self._max_filename_length,
) | Return a safe filename or file part. |
def count_publishingcountries(country, **kwargs):
url = gbif_baseurl + 'occurrence/counts/publishingCountries'
out = gbif_GET(url, {"country": country}, **kwargs)
return out | Lists occurrence counts for all countries that publish data about the given country
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_publishingcountries(country = "DE") |
def from_date_time_string(cls, datetime_string, leap_year=False):
dt = datetime.strptime(datetime_string, '%d %b %H:%M')
return cls(dt.month, dt.day, dt.hour, dt.minute, leap_year) | Create Ladybug DateTime from a DateTime string.
Usage:
dt = DateTime.from_date_time_string("31 Dec 12:00") |
def SPI_config(self,config):
'Configure SPI interface parameters.'
self.bus.write_byte_data(self.address, 0xF0, config)
return self.bus.read_byte_data(self.address, 0xF0) | Configure SPI interface parameters. |
def _get_filename(request, item):
if request.keep_image_names:
filename = OgcImageService.finalize_filename(item['niceName'].replace(' ', '_'))
else:
filename = OgcImageService.finalize_filename(
'_'.join([str(GeopediaService._parse_layer(request.layer)), item['objectPath'].rsplit('/', 1)[-1]]),
request.image_format
)
LOGGER.debug("filename=%s", filename)
return filename | Creates a filename |
def data_from_repaircafe_org():
browser = webdriver.Chrome()
browser.get("https://repaircafe.org/en/?s=Contact+the+local+organisers")
browser.maximize_window()
viewmore_button = True
while viewmore_button:
try:
viewmore = browser.find_element_by_id("viewmore_link")
browser.execute_script("arguments[0].scrollIntoView();", viewmore)
viewmore.click()
except:
viewmore_button = False
sleep(2)
page_source = BeautifulSoup(browser.page_source, "lxml")
browser.quit()
data = []
for h4 in page_source.find_all("h4"):
for a in h4.find_all('a', href=True):
data.append({"name": a.contents[0], "url": a['href']})
return data | Gets data from repaircafe_org. |
def map_to_resource(self, data_element, resource=None):
if not IDataElement.providedBy(data_element):
raise ValueError('Expected data element, got %s.' % data_element)
if resource is None:
coll = \
create_staging_collection(data_element.mapping.mapped_class)
agg = coll.get_aggregate()
agg.add(data_element)
if IMemberDataElement.providedBy(data_element):
ent = next(iter(agg))
resource = \
data_element.mapping.mapped_class.create_from_entity(ent)
else:
resource = coll
else:
resource.update(data_element)
return resource | Maps the given data element to a new resource or updates the given
resource.
:raises ValueError: If :param:`data_element` does not provide
:class:`everest.representers.interfaces.IDataElement`. |
def cmd_startstop(options):
statelu = {"start": "stopped", "stop": "running"}
options.inst_state = statelu[options.command]
debg.dprint("toggle set state: ", options.inst_state)
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
response = awsc.startstop(tar_inst, options.command)
responselu = {"start": "StartingInstances", "stop": "StoppingInstances"}
filt = responselu[options.command]
resp = {}
state_term = ('CurrentState', 'PreviousState')
for i, j in enumerate(state_term):
resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name']
print("Current State: {}{}{} - Previous State: {}{}{}\n".
format(C_STAT[resp[0]], resp[0], C_NORM,
C_STAT[resp[1]], resp[1], C_NORM)) | Start or Stop the specified instance.
Finds instances that match args and instance-state expected by the
command. Then, the target instance is determined, the action is
performed on the instance, and the eturn information is displayed.
Args:
options (object): contains args and data from parser. |
def from_millis(cls, timeout_ms):
if hasattr(timeout_ms, 'has_expired'):
return timeout_ms
if timeout_ms is None:
return cls(None)
return cls(timeout_ms / 1000.0) | Create a new PolledTimeout if needed.
If timeout_ms is already a PolledTimeout, just return it, otherwise create a
new PolledTimeout with the given timeout in milliseconds.
Args:
timeout_ms: PolledTimeout object, or number of milliseconds to use for
creating a new one.
Returns:
A PolledTimeout object that will expire in timeout_ms milliseconds, which
may be timeout_ms itself, or a newly allocated PolledTimeout. |
def nonempty_set(C, mincount_connectivity=0):
if mincount_connectivity > 0:
C = C.copy()
C[np.where(C < mincount_connectivity)] = 0
return np.where(C.sum(axis=0) + C.sum(axis=1) > 0)[0] | Returns the set of states that have at least one incoming or outgoing count |
def register_as_default(language: Language):
def decorator(cls: Type['CoverageExtractor']):
cls.register_as_default(language)
return cls
return decorator | Registers a coverage extractor class as the default coverage extractor
for a given language. Requires that the coverage extractor class has
already been registered with a given name.
.. code: python
from bugzoo.core import Language
from bugzoo.mgr.coverage import CoverageExtractor, register, \
register_as_default
@register_as_default(Language.CPP)
@register('mycov')
class MyCoverageExtractor(CoverageExtractor):
... |
def load_child_sections_for_section(context, section, count=None):
page = section.get_main_language_page()
locale = context.get('locale_code')
qs = SectionPage.objects.child_of(page).filter(
language__is_main_language=True)
if not locale:
return qs[:count]
return get_pages(context, qs, locale) | Returns all child sections
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles. |
def subword(w):
w = w.reshape(4, 8)
return SBOX[w[0]] + SBOX[w[1]] + SBOX[w[2]] + SBOX[w[3]] | Function used in the Key Expansion routine that takes a four-byte input word
and applies an S-box to each of the four bytes to produce an output word. |
def get_cursor_pos(window):
xpos_value = ctypes.c_double(0.0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_double(0.0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetCursorPos(window, xpos, ypos)
return xpos_value.value, ypos_value.value | Retrieves the last reported cursor position, relative to the client
area of the window.
Wrapper for:
void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.