language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
java
|
public static DbColumn.Builder dbColumn(String name, DbColumnType type, boolean autoInc) {
return new DbColumn.Builder().name(name).type(type).autoInc(autoInc);
}
|
java
|
public void marshall(ConfigurationAggregator configurationAggregator, ProtocolMarshaller protocolMarshaller) {
if (configurationAggregator == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(configurationAggregator.getConfigurationAggregatorName(), CONFIGURATIONAGGREGATORNAME_BINDING);
protocolMarshaller.marshall(configurationAggregator.getConfigurationAggregatorArn(), CONFIGURATIONAGGREGATORARN_BINDING);
protocolMarshaller.marshall(configurationAggregator.getAccountAggregationSources(), ACCOUNTAGGREGATIONSOURCES_BINDING);
protocolMarshaller.marshall(configurationAggregator.getOrganizationAggregationSource(), ORGANIZATIONAGGREGATIONSOURCE_BINDING);
protocolMarshaller.marshall(configurationAggregator.getCreationTime(), CREATIONTIME_BINDING);
protocolMarshaller.marshall(configurationAggregator.getLastUpdatedTime(), LASTUPDATEDTIME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def _encoder(self, obj):
""" Encode a toc element leaf-node """
return {'__class__': obj.__class__.__name__,
'ident': obj.ident,
'group': obj.group,
'name': obj.name,
'ctype': obj.ctype,
'pytype': obj.pytype,
'access': obj.access}
raise TypeError(repr(obj) + ' is not JSON serializable')
|
java
|
@Override
public boolean isTransacted()
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
{
SibTr.entry(this, tc, "isTransacted");
SibTr.exit(this, tc, "isTransacted", Boolean.valueOf(transacted));
}
return transacted;
}
|
java
|
public static QPathEntry parse(String qEntry) throws IllegalNameException, NumberFormatException
{
int delimIndex = qEntry.lastIndexOf(QPath.PREFIX_DELIMITER);
String qnameString = qEntry.substring(0, delimIndex);
String indexString = qEntry.substring(delimIndex + 1);
InternalQName qname = InternalQName.parse(qnameString);
return new QPathEntry(qname, Integer.valueOf(indexString));
}
|
java
|
public List<Metric> getAverages(int span) {
Map<String,Metric> accum = new LinkedHashMap<>();
int count = span / period;
if (count > dataList.size())
count = dataList.size();
for (int i = dataList.size() - count; i < dataList.size(); i++) {
MetricData metricData = dataList.get(i);
for (Metric metric : metricData.getMetrics()) {
Metric total = accum.get(metric.getName());
if (total == null) {
total = new Metric(metric.getId(), metric.getName(), metric.getValue());
accum.put(metric.getName(), total);
}
else {
total.setValue(total.getValue() + metric.getValue());
}
}
}
for (Metric metric : accum.values()) {
metric.setValue(Math.round((double)metric.getValue() / count));
}
return new ArrayList<>(accum.values());
}
|
java
|
public void visitApply(JCMethodInvocation tree) {
if (TreeInfo.name(tree.meth) == names._super) {
Symbol constructor = TreeInfo.symbol(tree.meth);
ClassSymbol c = (ClassSymbol)constructor.owner;
if (c.hasOuterInstance() &&
!tree.meth.hasTag(SELECT) &&
outerThisStack.head != null)
visitSymbol(outerThisStack.head);
}
super.visitApply(tree);
}
|
python
|
def DESCRIBE(self):
"""Request description of what services RTSP server make available."""
message = "DESCRIBE " + self.session.url + " RTSP/1.0\r\n"
message += self.sequence
message += self.authentication
message += self.user_agent
message += "Accept: application/sdp\r\n"
message += '\r\n'
return message
|
java
|
public static Object convertClob(Connection conn, InputStream input) throws SQLException {
return convertClob(conn, toByteArray(input));
}
|
java
|
@JsonCreator
public static HostAddress fromString(String hostPortString)
{
requireNonNull(hostPortString, "hostPortString is null");
String host;
String portString = null;
if (hostPortString.startsWith("[")) {
// Parse a bracketed host, typically an IPv6 literal.
Matcher matcher = BRACKET_PATTERN.matcher(hostPortString);
if (!matcher.matches()) {
throw new IllegalArgumentException("Invalid bracketed host/port: " + hostPortString);
}
host = matcher.group(1);
portString = matcher.group(2); // could be null
}
else {
int colonPos = hostPortString.indexOf(':');
if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) {
// Exactly 1 colon. Split into host:port.
host = hostPortString.substring(0, colonPos);
portString = hostPortString.substring(colonPos + 1);
}
else {
// 0 or 2+ colons. Bare hostname or IPv6 literal.
host = hostPortString;
}
}
int port = NO_PORT;
if (portString != null && portString.length() != 0) {
// Try to parse the whole port string as a number.
// JDK7 accepts leading plus signs. We don't want to.
if (portString.startsWith("+")) {
throw new IllegalArgumentException("Unparseable port number: " + hostPortString);
}
try {
port = Integer.parseInt(portString);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Unparseable port number: " + hostPortString);
}
if (!isValidPort(port)) {
throw new IllegalArgumentException("Port number out of range: " + hostPortString);
}
}
return new HostAddress(host, port);
}
|
java
|
public boolean removeAll(final IntHashSet coll)
{
boolean acc = false;
for (final int value : coll.values)
{
if (value != MISSING_VALUE)
{
acc |= remove(value);
}
}
if (coll.containsMissingValue)
{
acc |= remove(MISSING_VALUE);
}
return acc;
}
|
python
|
def parse_payment_result(self, xml):
"""解析微信支付结果通知"""
try:
data = xmltodict.parse(xml)
except (xmltodict.ParsingInterrupted, ExpatError):
raise InvalidSignatureException()
if not data or 'xml' not in data:
raise InvalidSignatureException()
data = data['xml']
sign = data.pop('sign', None)
real_sign = calculate_signature(data, self.api_key if not self.sandbox else self.sandbox_api_key)
if sign != real_sign:
raise InvalidSignatureException()
for key in ('total_fee', 'settlement_total_fee', 'cash_fee', 'coupon_fee', 'coupon_count'):
if key in data:
data[key] = int(data[key])
data['sign'] = sign
return data
|
python
|
def interrupt(self, threadId=None):
"""
Interrupts the thread at the given id.
:param threadId | <int> || None
"""
back = self.backend()
if back:
back.interrupt(threadId)
|
python
|
def make_mon_removed_dir(path, file_name):
""" move old monitor data """
try:
os.makedirs('/var/lib/ceph/mon-removed')
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name))
|
java
|
public static void writeHash(File file, HashType hashType) throws IOException {
SparkeyWriter writer = append(file);
writer.writeHash(hashType);
writer.close();
}
|
java
|
public static double[] randomDoubleArray(int len, Random r) {
final double[] ret = new double[len];
for(int i = 0; i < len; i++) {
ret[i] = r.nextDouble();
}
return ret;
}
|
python
|
def create(image,
name=None,
start=False,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
**kwargs):
'''
Create a new container
image
Image from which to create the container
name
Name for the new container. If not provided, Docker will randomly
generate one for you (it will be included in the return data).
start : False
If ``True``, start container after creating it
.. versionadded:: 2018.3.0
skip_translate
This function translates Salt CLI or SLS input into the format which
docker-py expects. However, in the event that Salt's translation logic
fails (due to potential changes in the Docker Remote API, or to bugs in
the translation code), this argument can be used to exert granular
control over which arguments are translated and which are not.
Pass this argument as a comma-separated list (or Python list) of
arguments, and translation for each passed argument name will be
skipped. Alternatively, pass ``True`` and *all* translation will be
skipped.
Skipping tranlsation allows for arguments to be formatted directly in
the format which docker-py expects. This allows for API changes and
other issues to be more easily worked around. An example of using this
option to skip translation would be:
.. code-block:: bash
salt myminion docker.create image=centos:7.3.1611 skip_translate=environment environment="{'FOO': 'bar'}"
See the following links for more information:
- `docker-py Low-level API`_
- `Docker Engine API`_
ignore_collisions : False
Since many of docker-py's arguments differ in name from their CLI
counterparts (with which most Docker users are more familiar), Salt
detects usage of these and aliases them to the docker-py version of
that argument. However, if both the alias and the docker-py version of
the same argument (e.g. ``env`` and ``environment``) are used, an error
will be raised. Set this argument to ``True`` to suppress these errors
and keep the docker-py version of the argument.
validate_ip_addrs : True
For parameters which accept IP addresses as input, IP address
validation will be performed. To disable, set this to ``False``
client_timeout : 60
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
.. note::
This is only used if Salt needs to pull the requested image.
**CONTAINER CONFIGURATION ARGUMENTS**
auto_remove (or *rm*) : False
Enable auto-removal of the container on daemon side when the
container’s process exits (analogous to running a docker container with
``--rm`` on the CLI).
Examples:
- ``auto_remove=True``
- ``rm=True``
binds
Files/directories to bind mount. Each bind mount should be passed in
one of the following formats:
- ``<host_path>:<container_path>`` - ``host_path`` is mounted within
the container as ``container_path`` with read-write access.
- ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is
mounted within the container as ``container_path`` with read-write
access. Additionally, the specified selinux context will be set
within the container.
- ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is
mounted within the container as ``container_path``, with the
read-only or read-write setting explicitly defined.
- ``<host_path>:<container_path>:<read_only>,<selinux_context>`` -
``host_path`` is mounted within the container as ``container_path``,
with the read-only or read-write setting explicitly defined.
Additionally, the specified selinux context will be set within the
container.
``<read_only>`` can be either ``ro`` for read-write access, or ``ro``
for read-only access. When omitted, it is assumed to be read-write.
``<selinux_context>`` can be ``z`` if the volume is shared between
multiple containers, or ``Z`` if the volume should be private.
.. note::
When both ``<read_only>`` and ``<selinux_context>`` are specified,
there must be a comma before ``<selinux_context>``.
Binds can be expressed as a comma-separated list or a Python list,
however in cases where both ro/rw and an selinux context are specified,
the binds *must* be specified as a Python list.
Examples:
- ``binds=/srv/www:/var/www:ro``
- ``binds=/srv/www:/var/www:rw``
- ``binds=/srv/www:/var/www``
- ``binds="['/srv/www:/var/www:ro,Z']"``
- ``binds="['/srv/www:/var/www:rw,Z']"``
- ``binds=/srv/www:/var/www:Z``
.. note::
The second and third examples above are equivalent to each other,
as are the last two examples.
blkio_weight
Block IO weight (relative weight), accepts a weight value between 10
and 1000.
Example: ``blkio_weight=100``
blkio_weight_device
Block IO weight (relative device weight), specified as a list of
expressions in the format ``PATH:WEIGHT``
Example: ``blkio_weight_device=/dev/sda:100``
cap_add
List of capabilities to add within the container. Can be passed as a
comma-separated list or a Python list. Requires Docker 1.2.0 or
newer.
Examples:
- ``cap_add=SYS_ADMIN,MKNOD``
- ``cap_add="[SYS_ADMIN, MKNOD]"``
cap_drop
List of capabilities to drop within the container. Can be passed as a
comma-separated string or a Python list. Requires Docker 1.2.0 or
newer.
Examples:
- ``cap_drop=SYS_ADMIN,MKNOD``,
- ``cap_drop="[SYS_ADMIN, MKNOD]"``
command (or *cmd*)
Command to run in the container
Example: ``command=bash`` or ``cmd=bash``
.. versionchanged:: 2015.8.1
``cmd`` is now also accepted
cpuset_cpus (or *cpuset*)
CPUs on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of CPUs
(e.g. ``0,1``).
Examples:
- ``cpuset_cpus="0-3"``
- ``cpuset="0,1"``
cpuset_mems
Memory nodes on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of MEMs
(e.g. ``0,1``). Only effective on NUMA systems.
Examples:
- ``cpuset_mems="0-3"``
- ``cpuset_mems="0,1"``
cpu_group
The length of a CPU period in microseconds
Example: ``cpu_group=100000``
cpu_period
Microseconds of CPU time that the container can get in a CPU period
Example: ``cpu_period=50000``
cpu_shares
CPU shares (relative weight), specified as an integer between 2 and 1024.
Example: ``cpu_shares=512``
detach : False
If ``True``, run the container's command in the background (daemon
mode)
Example: ``detach=True``
devices
List of host devices to expose within the container
Examples:
- ``devices="/dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r"``
- ``devices="['/dev/net/tun', '/dev/xvda1:/dev/xvda1', '/dev/xvdb1:/dev/xvdb1:r']"``
device_read_bps
Limit read rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``.
Examples:
- ``device_read_bps="/dev/sda:1mb,/dev/sdb:5mb"``
- ``device_read_bps="['/dev/sda:100mb', '/dev/sdb:5mb']"``
device_read_iops
Limit read rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations.
Examples:
- ``device_read_iops="/dev/sda:1000,/dev/sdb:500"``
- ``device_read_iops="['/dev/sda:1000', '/dev/sdb:500']"``
device_write_bps
Limit write rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb`` or
``gb``.
Examples:
- ``device_write_bps="/dev/sda:100mb,/dev/sdb:50mb"``
- ``device_write_bps="['/dev/sda:100mb', '/dev/sdb:50mb']"``
device_read_iops
Limit write rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations.
Examples:
- ``device_read_iops="/dev/sda:1000,/dev/sdb:500"``
- ``device_read_iops="['/dev/sda:1000', '/dev/sdb:500']"``
dns
List of DNS nameservers. Can be passed as a comma-separated list or a
Python list.
Examples:
- ``dns=8.8.8.8,8.8.4.4``
- ``dns="['8.8.8.8', '8.8.4.4']"``
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
dns_opt
Additional options to be added to the container’s ``resolv.conf`` file
Example: ``dns_opt=ndots:9``
dns_search
List of DNS search domains. Can be passed as a comma-separated list
or a Python list.
Examples:
- ``dns_search=foo1.domain.tld,foo2.domain.tld``
- ``dns_search="[foo1.domain.tld, foo2.domain.tld]"``
domainname
The domain name to use for the container
Example: ``domainname=domain.tld``
entrypoint
Entrypoint for the container. Either a string (e.g. ``"mycmd --arg1
--arg2"``) or a Python list (e.g. ``"['mycmd', '--arg1', '--arg2']"``)
Examples:
- ``entrypoint="cat access.log"``
- ``entrypoint="['cat', 'access.log']"``
environment (or *env*)
Either a dictionary of environment variable names and their values, or
a Python list of strings in the format ``VARNAME=value``.
Examples:
- ``environment='VAR1=value,VAR2=value'``
- ``environment="['VAR1=value', 'VAR2=value']"``
- ``environment="{'VAR1': 'value', 'VAR2': 'value'}"``
extra_hosts
Additional hosts to add to the container's /etc/hosts file. Can be
passed as a comma-separated list or a Python list. Requires Docker
1.3.0 or newer.
Examples:
- ``extra_hosts=web1:10.9.8.7,web2:10.9.8.8``
- ``extra_hosts="['web1:10.9.8.7', 'web2:10.9.8.8']"``
- ``extra_hosts="{'web1': '10.9.8.7', 'web2': '10.9.8.8'}"``
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
group_add
List of additional group names and/or IDs that the container process
will run as
Examples:
- ``group_add=web,network``
- ``group_add="['web', 'network']"``
hostname
Hostname of the container. If not provided, and if a ``name`` has been
provided, the ``hostname`` will default to the ``name`` that was
passed.
Example: ``hostname=web1``
.. warning::
If the container is started with ``network_mode=host``, the
hostname will be overridden by the hostname of the Minion.
interactive (or *stdin_open*): False
Leave stdin open, even if not attached
Examples:
- ``interactive=True``
- ``stdin_open=True``
ipc_mode (or *ipc*)
Set the IPC mode for the container. The default behavior is to create a
private IPC namespace for the container, but this option can be
used to change that behavior:
- ``container:<container_name_or_id>`` reuses another container shared
memory, semaphores and message queues
- ``host``: use the host's shared memory, semaphores and message queues
Examples:
- ``ipc_mode=container:foo``
- ``ipc=host``
.. warning::
Using ``host`` gives the container full access to local shared
memory and is therefore considered insecure.
isolation
Specifies the type of isolation technology used by containers
Example: ``isolation=hyperv``
.. note::
The default value on Windows server is ``process``, while the
default value on Windows client is ``hyperv``. On Linux, only
``default`` is supported.
labels (or *label*)
Add metadata to the container. Labels can be set both with and without
values:
Examples:
- ``labels=foo,bar=baz``
- ``labels="['foo', 'bar=baz']"``
.. versionchanged:: 2018.3.0
Labels both with and without values can now be mixed. Earlier
releases only permitted one method or the other.
links
Link this container to another. Links should be specified in the format
``<container_name_or_id>:<link_alias>``. Multiple links can be passed,
ether as a comma separated list or a Python list.
Examples:
- ``links=web1:link1,web2:link2``,
- ``links="['web1:link1', 'web2:link2']"``
- ``links="{'web1': 'link1', 'web2': 'link2'}"``
log_driver
Set container's logging driver. Requires Docker 1.6 or newer.
Example:
- ``log_driver=syslog``
.. note::
The logging driver feature was improved in Docker 1.13 introducing
option name changes. Please see Docker's `Configure logging
drivers`_ documentation for more information.
.. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/
log_opt
Config options for the ``log_driver`` config option. Requires Docker
1.6 or newer.
Example:
- ``log_opt="syslog-address=tcp://192.168.0.42,syslog-facility=daemon"``
- ``log_opt="['syslog-address=tcp://192.168.0.42', 'syslog-facility=daemon']"``
- ``log_opt="{'syslog-address': 'tcp://192.168.0.42', 'syslog-facility': 'daemon'}"``
lxc_conf
Additional LXC configuration parameters to set before starting the
container.
Examples:
- ``lxc_conf="lxc.utsname=docker,lxc.arch=x86_64"``
- ``lxc_conf="['lxc.utsname=docker', 'lxc.arch=x86_64']"``
- ``lxc_conf="{'lxc.utsname': 'docker', 'lxc.arch': 'x86_64'}"``
.. note::
These LXC configuration parameters will only have the desired
effect if the container is using the LXC execution driver, which
has been deprecated for some time.
mac_address
MAC address to use for the container. If not specified, a random MAC
address will be used.
Example: ``mac_address=01:23:45:67:89:0a``
mem_limit (or *memory*) : 0
Memory limit. Can be specified in bytes or using single-letter units
(i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no
memory limit.
Examples:
- ``mem_limit=512M``
- ``memory=1073741824``
mem_swappiness
Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
Example: ``mem_swappiness=60``
memswap_limit (or *memory_swap*) : -1
Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A
value of ``0`` means no swap limit.
Examples:
- ``memswap_limit=1G``
- ``memory_swap=2147483648``
network_disabled : False
If ``True``, networking will be disabled within the container
Example: ``network_disabled=True``
network_mode : bridge
One of the following:
- ``bridge`` - Creates a new network stack for the container on the
docker bridge
- ``none`` - No networking (equivalent of the Docker CLI argument
``--net=none``). Not to be confused with Python's ``None``.
- ``container:<name_or_id>`` - Reuses another container's network stack
- ``host`` - Use the host's network stack inside the container
.. warning::
Using ``host`` mode gives the container full access to the hosts
system's services (such as D-Bus), and is therefore considered
insecure.
Examples:
- ``network_mode=null``
- ``network_mode=container:web1``
oom_kill_disable
Whether to disable OOM killer
Example: ``oom_kill_disable=False``
oom_score_adj
An integer value containing the score given to the container in order
to tune OOM killer preferences
Example: ``oom_score_adj=500``
pid_mode
Set to ``host`` to use the host container's PID namespace within the
container. Requires Docker 1.5.0 or newer.
Example: ``pid_mode=host``
pids_limit
Set the container's PID limit. Set to ``-1`` for unlimited.
Example: ``pids_limit=2000``
port_bindings (or *publish*)
Bind exposed ports which were exposed using the ``ports`` argument to
:py:func:`docker.create <salt.modules.dockermod.create>`. These
should be passed in the same way as the ``--publish`` argument to the
``docker run`` CLI command:
- ``ip:hostPort:containerPort`` - Bind a specific IP and port on the
host to a specific port within the container.
- ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a
specific port within the container.
- ``hostPort:containerPort`` - Bind a specific port on all of the
host's interfaces to a specific port within the container.
- ``containerPort`` - Bind an ephemeral port on all of the host's
interfaces to a specific port within the container.
Multiple bindings can be separated by commas, or passed as a Python
list. The below two examples are equivalent:
- ``port_bindings="5000:5000,2123:2123/udp,8080"``
- ``port_bindings="['5000:5000', '2123:2123/udp', 8080]"``
Port bindings can also include ranges:
- ``port_bindings="14505-14506:4505-4506"``
.. note::
When specifying a protocol, it must be passed in the
``containerPort`` value, as seen in the examples above.
ports
A list of ports to expose on the container. Can be passed as
comma-separated list or a Python list. If the protocol is omitted, the
port will be assumed to be a TCP port.
Examples:
- ``ports=1111,2222/udp``
- ``ports="[1111, '2222/udp']"``
privileged : False
If ``True``, runs the exec process with extended privileges
Example: ``privileged=True``
publish_all_ports (or *publish_all*): False
Publish all ports to the host
Example: ``publish_all_ports=True``
read_only : False
If ``True``, mount the container’s root filesystem as read only
Example: ``read_only=True``
restart_policy (or *restart*)
Set a restart policy for the container. Must be passed as a string in
the format ``policy[:retry_count]`` where ``policy`` is one of
``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count``
is an optional limit to the number of retries. The retry count is ignored
when using the ``always`` or ``unless-stopped`` restart policy.
Examples:
- ``restart_policy=on-failure:5``
- ``restart_policy=always``
security_opt
Security configuration for MLS systems such as SELinux and AppArmor.
Can be passed as a comma-separated list or a Python list.
Examples:
- ``security_opt=apparmor:unconfined,param2:value2``
- ``security_opt='["apparmor:unconfined", "param2:value2"]'``
.. important::
Some security options can contain commas. In these cases, this
argument *must* be passed as a Python list, as splitting by comma
will result in an invalid configuration.
.. note::
See the documentation for security_opt at
https://docs.docker.com/engine/reference/run/#security-configuration
shm_size
Size of /dev/shm
Example: ``shm_size=128M``
stop_signal
The signal used to stop the container. The default is ``SIGTERM``.
Example: ``stop_signal=SIGRTMIN+3``
stop_timeout
Timeout to stop the container, in seconds
Example: ``stop_timeout=5``
storage_opt
Storage driver options for the container
Examples:
- ``storage_opt='dm.basesize=40G'``
- ``storage_opt="['dm.basesize=40G']"``
- ``storage_opt="{'dm.basesize': '40G'}"``
sysctls (or *sysctl*)
Set sysctl options for the container
Examples:
- ``sysctl='fs.nr_open=1048576,kernel.pid_max=32768'``
- ``sysctls="['fs.nr_open=1048576', 'kernel.pid_max=32768']"``
- ``sysctls="{'fs.nr_open': '1048576', 'kernel.pid_max': '32768'}"``
tmpfs
A map of container directories which should be replaced by tmpfs
mounts, and their corresponding mount options. Can be passed as Python
list of PATH:VALUE mappings, or a Python dictionary. However, since
commas usually appear in the values, this option *cannot* be passed as
a comma-separated list.
Examples:
- ``tmpfs="['/run:rw,noexec,nosuid,size=65536k', '/var/lib/mysql:rw,noexec,nosuid,size=600m']"``
- ``tmpfs="{'/run': 'rw,noexec,nosuid,size=65536k', '/var/lib/mysql': 'rw,noexec,nosuid,size=600m'}"``
tty : False
Attach TTYs
Example: ``tty=True``
ulimits (or *ulimit*)
List of ulimits. These limits should be passed in the format
``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being
optional. Can be passed as a comma-separated list or a Python list.
Examples:
- ``ulimits="nofile=1024:1024,nproc=60"``
- ``ulimits="['nofile=1024:1024', 'nproc=60']"``
user
User under which to run exec process
Example: ``user=foo``
userns_mode (or *user_ns_mode*)
Sets the user namsepace mode, when the user namespace remapping option
is enabled.
Example: ``userns_mode=host``
volumes (or *volume*)
List of directories to expose as volumes. Can be passed as a
comma-separated list or a Python list.
Examples:
- ``volumes=/mnt/vol1,/mnt/vol2``
- ``volume="['/mnt/vol1', '/mnt/vol2']"``
volumes_from
Container names or IDs from which the container will get volumes. Can
be passed as a comma-separated list or a Python list.
Example: ``volumes_from=foo``, ``volumes_from=foo,bar``,
``volumes_from="[foo, bar]"``
volume_driver
Sets the container's volume driver
Example: ``volume_driver=foobar``
working_dir (or *workdir*)
Working directory inside the container
Examples:
- ``working_dir=/var/log/nginx``
- ``workdir=/var/www/myapp``
**RETURN DATA**
A dictionary containing the following keys:
- ``Id`` - ID of the newly-created container
- ``Name`` - Name of the newly-created container
CLI Example:
.. code-block:: bash
# Create a data-only container
salt myminion docker.create myuser/mycontainer volumes="/mnt/vol1,/mnt/vol2"
# Create a CentOS 7 container that will stay running once started
salt myminion docker.create centos:7 name=mycent7 interactive=True tty=True command=bash
'''
if kwargs.pop('inspect', True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**kwargs)
if unused_kwargs:
log.warning(
'The following arguments were ignored because they are not '
'recognized by docker-py: %s', sorted(unused_kwargs)
)
log.debug(
'docker.create: creating container %susing the following '
'arguments: %s',
'with name \'{0}\' '.format(name) if name is not None else '',
kwargs
)
time_started = time.time()
response = _client_wrapper('create_container',
image,
name=name,
**kwargs)
response['Time_Elapsed'] = time.time() - time_started
_clear_context()
if name is None:
name = inspect_container(response['Id'])['Name'].lstrip('/')
response['Name'] = name
if start:
try:
start_(name)
except CommandExecutionError as exc:
raise CommandExecutionError(
'Failed to start container after creation',
info={'response': response, 'error': exc.__str__()}
)
else:
response['Started'] = True
return response
|
python
|
def parse_keytab(keytab):
"""Read the contents of a KRB5 keytab file, returning a list of
credentials listed within
Parameters
----------
keytab : `str`
path to keytab file
Returns
-------
creds : `list` of `tuple`
the (unique) list of `(username, realm, kvno)` as read from the
keytab file
Examples
--------
>>> from gwpy.io.kerberos import parse_keytab
>>> print(parse_keytab("creds.keytab"))
[('albert.einstein', 'LIGO.ORG', 1)]
"""
try:
out = subprocess.check_output(['klist', '-k', keytab],
stderr=subprocess.PIPE)
except OSError:
raise KerberosError("Failed to locate klist, cannot read keytab")
except subprocess.CalledProcessError:
raise KerberosError("Cannot read keytab {!r}".format(keytab))
principals = []
for line in out.splitlines():
if isinstance(line, bytes):
line = line.decode('utf-8')
try:
kvno, principal, = re.split(r'\s+', line.strip(' '), 1)
except ValueError:
continue
else:
if not kvno.isdigit():
continue
principals.append(tuple(principal.split('@')) + (int(kvno),))
# return unique, ordered list
return list(OrderedDict.fromkeys(principals).keys())
|
java
|
public ServerBuilder tls(
File keyCertChainFile, File keyFile, @Nullable String keyPassword) throws SSLException {
defaultVirtualHostBuilderUpdated();
defaultVirtualHostBuilder.tls(keyCertChainFile, keyFile, keyPassword);
return this;
}
|
java
|
@Override
public void clearCache() {
entityCache.clearCache(CommerceCountryImpl.class);
finderCache.clearCache(FINDER_CLASS_NAME_ENTITY);
finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITH_PAGINATION);
finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION);
}
|
java
|
public DoubleIntIndex getTransactionIDList() {
writeLock.lock();
try {
DoubleIntIndex lookup = new DoubleIntIndex(10, false);
lookup.setKeysSearchTarget();
Iterator it = this.rowActionMap.keySet().iterator();
for (; it.hasNext(); ) {
lookup.addUnique(it.nextInt(), 0);
}
return lookup;
} finally {
writeLock.unlock();
}
}
|
python
|
def _load_data(self, band):
"""In-flight effective areas for the Swift UVOT, as obtained from the CALDB.
See Breeveld+ 2011. XXX: confirm that these are equal-energy, not
quantum-efficiency.
"""
d = bandpass_data_fits('sw' + self._band_map[band] + '_20041120v106.arf')[1].data
# note:
# data.WAVE_MIN[i] < data.WAVE_MIN[i+1], but
# data.WAVE_MIN[i] > data.WAVE_MAX[i] (!)
# data.WAVE_MIN[i] = data.WAVE_MAX[i+1] (!)
wmid = 0.5 * (d.WAVE_MIN + d.WAVE_MAX) # in Ångström
df = pd.DataFrame({'wlen': wmid, 'resp': d.SPECRESP,
'wlo': d.WAVE_MAX, 'whi': d.WAVE_MIN})
return df
|
python
|
def skips(self, user):
"""
Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__.
"""
return self._get(self._build_url(self.endpoint.skips(id=user)))
|
python
|
def var_replace(self, text):
"""Replaces all instances of @VAR with their values in the specified text.
"""
result = text
for var in self._vardict:
result = result.replace("@{}".format(var), self._vardict[var])
return result
|
java
|
public int pkHash(DbMapping dbMapping, Map<String, Object> d) {
return pkHash(dbMapping, d, null);
}
|
java
|
protected void storeAdminObject(AdminObject ao, XMLStreamWriter writer) throws Exception
{
writer.writeStartElement(CommonXML.ELEMENT_ADMIN_OBJECT);
if (ao.getClassName() != null)
writer.writeAttribute(CommonXML.ATTRIBUTE_CLASS_NAME,
ao.getValue(CommonXML.ATTRIBUTE_CLASS_NAME, ao.getClassName()));
if (ao.getJndiName() != null)
writer.writeAttribute(CommonXML.ATTRIBUTE_JNDI_NAME,
ao.getValue(CommonXML.ATTRIBUTE_JNDI_NAME, ao.getJndiName()));
if (ao.isEnabled() != null && (ao.hasExpression(CommonXML.ATTRIBUTE_ENABLED) ||
!Defaults.ENABLED.equals(ao.isEnabled())))
writer.writeAttribute(CommonXML.ATTRIBUTE_ENABLED,
ao.getValue(CommonXML.ATTRIBUTE_ENABLED, ao.isEnabled().toString()));
if (ao.getId() != null)
writer.writeAttribute(CommonXML.ATTRIBUTE_ID, ao.getValue(CommonXML.ATTRIBUTE_ID, ao.getId()));
if (ao.getConfigProperties() != null && !ao.getConfigProperties().isEmpty())
{
Iterator<Map.Entry<String, String>> it = ao.getConfigProperties().entrySet().iterator();
while (it.hasNext())
{
Map.Entry<String, String> entry = it.next();
writer.writeStartElement(CommonXML.ELEMENT_CONFIG_PROPERTY);
writer.writeAttribute(CommonXML.ATTRIBUTE_NAME, entry.getKey());
writer.writeCharacters(ao.getValue(CommonXML.ELEMENT_CONFIG_PROPERTY, entry.getKey(), entry.getValue()));
writer.writeEndElement();
}
}
writer.writeEndElement();
}
|
python
|
def remover(self, id_logicalenvironment):
"""Remove Logical Environment from by the identifier.
:param id_logicalenvironment: Identifier of the Logical Environment. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: The identifier of Logical Environment is null and invalid.
:raise AmbienteLogicoNaoExisteError: Logical Environment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_logicalenvironment):
raise InvalidParameterError(
u'The identifier of Logical Environment is invalid or was not informed.')
url = 'logicalenvironment/' + str(id_logicalenvironment) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
|
java
|
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
}
}
|
python
|
def get_all_build_config_set_records(self, id, **kwargs):
"""
Get all build config set execution records associated with this build config set, returns empty list if none are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_build_config_set_records(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build config set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationSetRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_build_config_set_records_with_http_info(id, **kwargs)
else:
(data) = self.get_all_build_config_set_records_with_http_info(id, **kwargs)
return data
|
python
|
def main():
"""
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specify input file, default is magic_measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
PLT = 1
plots = 0
user, meas_file = "", "magic_measurements.txt"
pltspec = ""
dir_path = '.'
fmt = 'png'
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind = args.index("-usr")
user = args[ind+1]
if '-f' in args:
ind = args.index("-f")
meas_file = args[ind+1]
if '-sav' in args:
verbose = 0
plots = 1
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = 0
plots = 1
if '-fmt' in args:
ind = args.index("-fmt")
fmt = args[ind+1]
meas_file = dir_path+'/'+meas_file
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(main.__doc__)
print('bad file')
sys.exit()
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
HDD['hyst'] = 1
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
hyst_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-HYS', 'has') # get all hysteresis data
for rec in hyst_data:
if 'er_synthetic_name' in rec.keys() and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
if 'measurement_temp' not in rec.keys():
# assume room T measurement unless otherwise specified
rec['measurement_temp'] = '300'
#
k = 0
if pltspec != "":
k = sids.index(pltspec)
intlist = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
while k < len(sids):
locname, site, sample, synth = '', '', '', ''
s = sids[k]
hmeths = []
if verbose:
print(s, k+1, 'out of ', len(sids))
#
#
B, M = [], [] # B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
# get all measurements for this specimen
spec = pmag.get_dictitem(hyst_data, 'er_specimen_name', s, 'T')
if 'er_location_name' in spec[0].keys():
locname = spec[0]['er_location_name']
if 'er_site_name' in spec[0].keys():
site = spec[0]['er_site_name']
if 'er_sample_name' in spec[0].keys():
sample = spec[0]['er_sample_name']
if 'er_synthetic_name' in spec[0].keys():
synth = spec[0]['er_synthetic_name']
for m in intlist:
# get all non-blank data for this specimen
meas_data = pmag.get_dictitem(spec, m, '', 'F')
if len(meas_data) > 0:
break
c = ['k-', 'b-', 'c-', 'g-', 'm-', 'r-', 'y-']
cnum = 0
if len(meas_data) > 0:
Temps = []
xlab, ylab, title = '', '', ''
for rec in meas_data:
if rec['measurement_temp'] not in Temps:
Temps.append(rec['measurement_temp'])
for t in Temps:
print('working on t: ', t)
t_data = pmag.get_dictitem(
meas_data, 'measurement_temp', t, 'T')
B, M = [], []
for rec in t_data:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec[m]))
# now plot the hysteresis curve(s)
#
if len(B) > 0:
B = numpy.array(B)
M = numpy.array(M)
if t == Temps[-1]:
xlab = 'Field (T)'
ylab = m
title = 'Hysteresis: '+s
if t == Temps[0]:
pmagplotlib.clearFIG(HDD['hyst'])
pmagplotlib.plot_xy(
HDD['hyst'], B, M, sym=c[cnum], xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [
1.1*B.min(), 1.1*B.max()], [0, 0], sym='k-', xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [0, 0], [
1.1*M.min(), 1.1*M.max()], sym='k-', xlab=xlab, ylab=ylab, title=title)
if verbose:
pmagplotlib.draw_figs(HDD)
cnum += 1
if cnum == len(c):
cnum = 0
#
files = {}
if plots:
if pltspec != "":
s = pltspec
files = {}
for key in HDD.keys():
if pmagplotlib.isServer: # use server plot naming convention
if synth == '':
filename = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else:
filename = 'SY:_'+synth+'_TY:_'+key+'_.'+fmt
files[key] = filename
else: # use more readable plot naming convention
if synth == '':
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
else:
filename = synth+'_'+key+'.fmt'
files[key] = filename
pmagplotlib.save_plots(HDD, files)
if pltspec != "":
sys.exit()
if verbose:
pmagplotlib.draw_figs(HDD)
ans = raw_input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in HDD.keys():
if pmagplotlib.isServer:
print('server')
files[key] = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else:
print('not server')
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
print('files', files)
pmagplotlib.save_plots(HDD, files)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
sys.exit()
if ans == 's':
keepon = 1
specimen = raw_input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = raw_input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
|
python
|
def key_value_contents(use_dict=None, as_class=dict, key_values=()):
"""Return the contents of an object as a dict."""
if _debug: key_value_contents._debug("key_value_contents use_dict=%r as_class=%r key_values=%r", use_dict, as_class, key_values)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# loop through the values and save them
for k, v in key_values:
if v is not None:
if hasattr(v, 'dict_contents'):
v = v.dict_contents(as_class=as_class)
use_dict.__setitem__(k, v)
# return what we built/updated
return use_dict
|
python
|
def use_comparative_objective_view(self):
"""Pass through to provider ObjectiveLookupSession.use_comparative_objective_view"""
self._object_views['objective'] = COMPARATIVE
# self._get_provider_session('objective_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_objective_view()
except AttributeError:
pass
|
java
|
private HeaderEntry getEntry(CharSequence name, CharSequence value) {
if (length() == 0 || name == null || value == null) {
return null;
}
int h = AsciiString.hashCode(name);
int i = index(h);
for (HeaderEntry e = headerFields[i]; e != null; e = e.next) {
// To avoid short circuit behavior a bitwise operator is used instead of a boolean operator.
if (e.hash == h && (equalsConstantTime(name, e.name) & equalsConstantTime(value, e.value)) != 0) {
return e;
}
}
return null;
}
|
python
|
def alt_triangle_coordinates(i, j, k):
"""
Computes coordinates of the constituent triangles of a triangulation for the
simplex. These triangules are parallel to the lower axis on the upper side.
Parameters
----------
i,j,k: enumeration of the desired triangle
Returns
-------
A numpy array of coordinates of the hexagon (unprojected)
"""
return [(i, j + 1, k - 1), (i + 1, j, k - 1), (i + 1, j + 1, k - 2)]
|
python
|
def sinh(x):
"""
Hyperbolic sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.sinh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sinh(x)
|
python
|
def login(self, broker_name, account_cookie, account=None):
"""login 登录到交易前置
2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态
Arguments:
broker_name {[type]} -- [description]
account_cookie {[type]} -- [description]
Keyword Arguments:
account {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
res = False
if account is None:
if account_cookie not in self.session.keys():
self.session[account_cookie] = QA_Account(
account_cookie=account_cookie,
broker=broker_name
)
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
#
self.order_handler.subscribe(
self.session[account_cookie],
self.broker[broker_name]
)
else:
if account_cookie not in self.session.keys():
account.broker = broker_name
self.session[account_cookie] = account
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
#
self.order_handler.subscribe(
account,
self.broker[broker_name]
)
if res:
return res
else:
try:
self.session.pop(account_cookie)
except:
pass
return False
|
java
|
public static boolean isExtender(int ch)
{
return 0x00B7 == ch || 0x02D0 == ch || 0x02D1 == ch || 0x0387 == ch || 0x0640 == ch || 0x0E46 == ch
|| 0x0EC6 == ch || 0x3005 == ch || (0x3031 <= ch && ch <= 0x3035) || (0x309D <= ch && ch <= 0x309E)
|| (0x30FC <= ch && ch <= 0x30FE);
}
|
python
|
def fillstats(args):
"""
%prog fillstats genome.fill
Build stats on .fill file from GapCloser.
"""
from jcvi.utils.cbook import SummaryStats, percentage, thousands
p = OptionParser(fillstats.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fillfile, = args
fp = open(fillfile)
scaffolds = 0
gaps = []
for row in fp:
if row[0] == ">":
scaffolds += 1
continue
fl = FillLine(row)
gaps.append(fl)
print("{0} scaffolds in total".format(scaffolds), file=sys.stderr)
closed = [x for x in gaps if x.closed]
closedbp = sum(x.before for x in closed)
notClosed = [x for x in gaps if not x.closed]
notClosedbp = sum(x.before for x in notClosed)
totalgaps = len(closed) + len(notClosed)
print("Closed gaps: {0} size: {1} bp".\
format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr)
ss = SummaryStats([x.after for x in closed])
print(ss, file=sys.stderr)
ss = SummaryStats([x.delta for x in closed])
print("Delta:", ss, file=sys.stderr)
print("Remaining gaps: {0} size: {1} bp".\
format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr)
ss = SummaryStats([x.after for x in notClosed])
print(ss, file=sys.stderr)
|
java
|
public static double getLPNormP(AbstractMaterializeKNNPreprocessor<?> kNN) {
DistanceFunction<?> distanceFunction = kNN.getDistanceQuery().getDistanceFunction();
if(LPNormDistanceFunction.class.isInstance(distanceFunction)) {
return ((LPNormDistanceFunction) distanceFunction).getP();
}
return Double.NaN;
}
|
java
|
private double factor(int dimension) {
return maxima[dimension] > minima[dimension] ? maxima[dimension] - minima[dimension] : maxima[dimension] > 0 ? maxima[dimension] : 1;
}
|
python
|
def _linux_brdel(br):
'''
Internal, deletes the bridge
'''
brctl = _tool_path('brctl')
return __salt__['cmd.run']('{0} delbr {1}'.format(brctl, br),
python_shell=False)
|
python
|
async def wait_for_all_empty(self, *queues):
"""
Wait for multiple queues to be empty at the same time.
Require delegate when calling from coroutines running in other containers
"""
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]
while matchers:
await self.wait_for_all(*matchers)
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]
|
java
|
public MDecimal getMMHg() {
MDecimal result = new MDecimal(currentUnit.getConverterTo(
MILLIMETER_OF_MERCURY).convert(doubleValue()));
logger.trace(MMarker.GETTER,
"Converting from {} to millimetres of Mecrcury : {}",
currentUnit, result);
return result;
}
|
java
|
public ParallelTaskBuilder setReplaceVarMapToSingleTargetSingleVar(
String variable, List<String> replaceList, String uniformTargetHost) {
if (Strings.isNullOrEmpty(uniformTargetHost)) {
logger.error("uniform target host is empty or null. skil setting.");
return this;
}
this.replacementVarMapNodeSpecific.clear();
this.targetHosts.clear();
int i = 0;
for (String replace : replaceList) {
if (replace == null){
logger.error("null replacement.. skip");
continue;
}
String hostName = PcConstants.API_PREFIX + i;
replacementVarMapNodeSpecific.put(
hostName,
new StrStrMap().addPair(variable, replace).addPair(
PcConstants.UNIFORM_TARGET_HOST_VAR,
uniformTargetHost));
targetHosts.add(hostName);
++i;
}
this.requestReplacementType = RequestReplacementType.TARGET_HOST_SPECIFIC_VAR_REPLACEMENT;
logger.info(
"Set requestReplacementType as {} for single target. Will disable the set target hosts."
+ "Also Simulated "
+ "Now Already set targetHost list with size {}. \nPLEASE NOT TO SET TARGET HOSTS AGAIN WITH THIS API.",
requestReplacementType.toString(), targetHosts.size());
return this;
}
|
java
|
public void setAttributeNS(
String namespaceURI, String qualifiedName, String value)
throws DOMException
{
error(XMLErrorResources.ER_FUNCTION_NOT_SUPPORTED); //"setAttributeNS not supported!");
}
|
java
|
static void populateResponse(final ModelNode response, final ServiceController<?> serviceController) {
response.set(serviceController.getState().toString());
}
|
python
|
async def add(self, useriden, query: str, reqs, incunit=None, incvals=None):
'''
Persistently adds an appointment
Args:
query (str):
storm query to run
reqs (Union[None, Dict[TimeUnit, Union[int, Tuple[int]], List[...]):
one or more dicts of the fixed aspects of the appointment. dict value may be a single or multiple.
May be an empty dict or None.
incunit (Union[None, TimeUnit]):
the unit that changes for recurring, or None for non-recurring. It is an error for this value to match
a key in reqdict.
incvals (Union[None, int, Iterable[int]): count of units of incunit or explicit day of week or day of month.
Not allowed for incunit == None, required for others (1 would be a typical
value)
Notes:
For values in reqs that are lists and incvals if a list, all combinations of all values (the product) are
used
Returns:
iden of new appointment
'''
iden = s_common.guid()
recur = incunit is not None
indx = self._next_indx
self._next_indx += 1
if reqs is None:
reqs = {}
if not query:
raise ValueError('empty query')
if not reqs and incunit is None:
raise ValueError('at least one of reqs and incunit must be non-empty')
if incunit is not None and incvals is None:
raise ValueError('incvals must be non-None if incunit is non-None')
if isinstance(reqs, Mapping):
reqs = [reqs]
# Find all combinations of values in reqdict values and incvals values
recs = []
for req in reqs:
reqdicts = self._dictproduct(req)
if not isinstance(incvals, Iterable):
incvals = (incvals, )
recs.extend(ApptRec(rd, incunit, v) for (rd, v) in itertools.product(reqdicts, incvals))
appt = _Appt(iden, recur, indx, query, useriden, recs)
self._addappt(iden, appt)
await self._storeAppt(appt)
return iden
|
python
|
def _load_records(self, record_type_idstrs):
"""Loads query records"""
for record_type_idstr in record_type_idstrs:
try:
self._init_record(record_type_idstr)
except (ImportError, KeyError):
pass
|
python
|
def _pool_put(pool_semaphore, tasks, put_to_pool_in, pool_size, id_self, \
is_stopping):
"""
(internal) Intended to be run in a seperate thread. Feeds tasks into
to the pool whenever semaphore permits. Finishes if self._stopping is
set.
"""
log.debug('NuMap(%s) started pool_putter.' % id_self)
last_tasks = {}
for task in xrange(tasks.lenght):
last_tasks[task] = -1
stop_tasks = []
while True:
# are we stopping the Weaver?
if is_stopping():
log.debug('NuMap(%s) pool_putter has been told to stop.' % \
id_self)
tasks.stop()
# try to get a task
try:
log.debug('NuMap(%s) pool_putter waits for next task.' % \
id_self)
task = tasks.next()
log.debug('NuMap(%s) pool_putter received next task.' % id_self)
except StopIteration:
# Weaver raised a StopIteration
stop_task = tasks.i # current task
log.debug('NuMap(%s) pool_putter caught StopIteration from task %s.' % \
(id_self, stop_task))
if stop_task not in stop_tasks:
# task raised stop for the first time.
log.debug('NuMap(%s) pool_putter task %s first-time finished.' % \
(id_self, stop_task))
stop_tasks.append(stop_task)
pool_semaphore.acquire()
log.debug('NuMap(%s) pool_putter sends a sentinel for task %s.' % \
(id_self, stop_task))
put_to_pool_in((stop_task, None, last_tasks[stop_task]))
if len(stop_tasks) == tasks.lenght:
log.debug('NuMap(%s) pool_putter sent sentinels for all tasks.' % \
id_self)
# all tasks have been stopped
for _worker in xrange(pool_size):
put_to_pool_in(None)
log.debug('NuMap(%s) pool_putter sent sentinel for %s workers' % \
(id_self, pool_size))
# this kills the pool_putter
break
# multiple StopIterations for a tasks are ignored.
# This is for stride.
continue
# got task
last_tasks[tasks.i] = task[-1][0] # last valid result
log.debug('NuMap(%s) pool_putter waits for semaphore for task %s' % \
(id_self, task))
pool_semaphore.acquire()
log.debug('NuMap(%s) pool_putter gets semaphore for task %s' % \
(id_self, task))
#gc.disable()
put_to_pool_in(task)
#gc.enable()
log.debug('NuMap(%s) pool_putter submits task %s to worker.' % \
(id_self, task))
log.debug('NuMap(%s) pool_putter returns' % id_self)
|
python
|
def _read_info_as_dict(fid, values):
"""Convenience function to read info in axon data to a nicely organized
dict.
"""
output = {}
for key, fmt in values:
val = unpack(fmt, fid.read(calcsize(fmt)))
if len(val) == 1:
output[key] = val[0]
else:
output[key] = val
return output
|
java
|
@Override
public CreatePolicyVersionResult createPolicyVersion(CreatePolicyVersionRequest request) {
request = beforeClientExecution(request);
return executeCreatePolicyVersion(request);
}
|
python
|
def convert_rst_to_basic_text(contents):
"""Convert restructured text to basic text output.
This function removes most of the decorations added
in restructured text.
This function is used to generate documentation we
can show to users in a cross platform manner.
Basic indentation and list formatting are kept,
but many RST features are removed (such as
section underlines).
"""
# The report_level override is so that we don't print anything
# to stdout/stderr on rendering issues.
converted = publish_string(
contents, writer=BasicTextWriter(),
settings_overrides={'report_level': 5})
return converted.decode('utf-8')
|
java
|
@BetaApi
public final AggregatedListAddressesPagedResponse aggregatedListAddresses(ProjectName project) {
AggregatedListAddressesHttpRequest request =
AggregatedListAddressesHttpRequest.newBuilder()
.setProject(project == null ? null : project.toString())
.build();
return aggregatedListAddresses(request);
}
|
java
|
public ServiceFuture<ClusterMonitoringResponseInner> getMonitoringStatusAsync(String resourceGroupName, String clusterName, final ServiceCallback<ClusterMonitoringResponseInner> serviceCallback) {
return ServiceFuture.fromResponse(getMonitoringStatusWithServiceResponseAsync(resourceGroupName, clusterName), serviceCallback);
}
|
java
|
public <T> T invokeRetry( Callable<T> invoker )
throws CStorageException
{
int currentTries = 0;
while ( true ) {
currentTries++;
try {
if ( currentTries > 1 ) {
LOGGER.debug( "Invocation #{}/{}", currentTries, nbTriesMax );
}
return invoker.call();
} catch ( CRetriableException ex ) {
if ( currentTries >= nbTriesMax ) {
LOGGER.debug( "Aborting invocation after {} attempts", nbTriesMax );
Throwable cause = ex.getCause();
if ( cause instanceof CStorageException ) {
throw ( CStorageException ) cause;
}
throw new CStorageException( "Invocation failure", cause );
}
LOGGER.debug( "Catching a CRetriableException, {} out of {} max attempts", currentTries, nbTriesMax );
doWait( currentTries, ex.getDelay() );
} catch ( CStorageException ex ) {
throw ex;
} catch ( Exception ex ) {
throw new CStorageException( "Invocation failure", ex );
}
}
}
|
python
|
def sortByIndex(self, index):
"""Implement a Index sort."""
self.table_level.horizontalHeader().setSortIndicatorShown(True)
sort_order = self.table_level.horizontalHeader().sortIndicatorOrder()
self.table_index.model().sort(index, sort_order)
self._sort_update()
|
python
|
def stop_capture(self, slot_number, port_number):
"""
Stops a packet capture.
:param slot_number: slot number
:param port_number: port number
"""
try:
adapter = self._slots[slot_number]
except IndexError:
raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name,
slot_number=slot_number))
if not adapter.port_exists(port_number):
raise DynamipsError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter,
port_number=port_number))
nio = adapter.get_nio(port_number)
if not nio:
raise DynamipsError("Port {slot_number}/{port_number} is not connected".format(slot_number=slot_number,
port_number=port_number))
yield from nio.unbind_filter("both")
log.info('Router "{name}" [{id}]: stopping packet capture on port {slot_number}/{port_number}'.format(name=self._name,
id=self._id,
nio_name=nio.name,
slot_number=slot_number,
port_number=port_number))
|
python
|
def get_theming_attribute(self, mode, name, part=None):
"""
looks up theming attribute
:param mode: ui-mode (e.g. `search`,`thread`...)
:type mode: str
:param name: identifier of the atttribute
:type name: str
:rtype: urwid.AttrSpec
"""
colours = int(self._config.get('colourmode'))
return self._theme.get_attribute(colours, mode, name, part)
|
java
|
private void computeLinkFanouts(TableDefinition tableDef,
Map<String, MutableFloat> tableLinkFanoutMap) {
m_logger.info("Computing link field fanouts for table: {}", tableDef.getTableName());
StringBuilder buffer = new StringBuilder();
for (FieldDefinition fieldDef : tableDef.getFieldDefinitions()) {
if (fieldDef.isLinkField()) {
if (buffer.length() > 0) {
buffer.append(",");
}
buffer.append(fieldDef.getName());
}
}
if (buffer.length() == 0) {
return;
}
Map<String, String> queryParams = new HashMap<>();
queryParams.put("q", "*");
queryParams.put("f", buffer.toString());
queryParams.put("s", Integer.toString(LINK_FANOUT_SAMPLE_SIZE));
if (m_session instanceof OLAPSession) {
queryParams.put("shards", m_config.shard);
}
QueryResult qResult = m_session.objectQuery(tableDef.getTableName(), queryParams);
Collection<DBObject> objectSet = qResult.getResultObjects();
if (objectSet.size() == 0) {
return;
}
Map<String, AtomicInteger> linkValueCounts = new HashMap<String, AtomicInteger>();
int totalObjs = 0;
for (DBObject dbObj : objectSet) {
totalObjs++;
for (String fieldName : dbObj.getFieldNames()) {
if (tableDef.isLinkField(fieldName)) {
Collection<String> linkValues = dbObj.getFieldValues(fieldName);
AtomicInteger totalLinkValues = linkValueCounts.get(fieldName);
if (totalLinkValues == null) {
linkValueCounts.put(fieldName, new AtomicInteger(linkValues.size()));
} else {
totalLinkValues.addAndGet(linkValues.size());
}
}
}
}
for (String fieldName : linkValueCounts.keySet()) {
AtomicInteger totalLinkValues = linkValueCounts.get(fieldName);
float linkFanout = totalLinkValues.get() / (float)totalObjs; // may round to 0
m_logger.info("Average fanout for link {}: {}", fieldName, linkFanout);
tableLinkFanoutMap.put(fieldName, new MutableFloat(linkFanout));
}
}
|
python
|
def apply_time_offset(time, years=0, months=0, days=0, hours=0):
"""Apply a specified offset to the given time array.
This is useful for GFDL model output of instantaneous values. For example,
3 hourly data postprocessed to netCDF files spanning 1 year each will
actually have time values that are offset by 3 hours, such that the first
value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the
subsequent year. This causes problems in xarray, e.g. when trying to group
by month. It is resolved by manually subtracting off those three hours,
such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired.
Parameters
----------
time : xarray.DataArray representing a timeseries
years, months, days, hours : int, optional
The number of years, months, days, and hours, respectively, to offset
the time array by. Positive values move the times later.
Returns
-------
pandas.DatetimeIndex
Examples
--------
Case of a length-1 input time array:
>>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21))
>>> apply_time_offset(times)
Timestamp('1900-01-01 00:00:00')
Case of input time array with length greater than one:
>>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21),
... datetime.datetime(1899, 1, 31, 21)])
>>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]',
freq=None)
"""
return (pd.to_datetime(time.values) +
pd.DateOffset(years=years, months=months, days=days, hours=hours))
|
python
|
async def eval(self, text, opts=None, user=None):
'''
Run a storm query and yield Node() objects.
'''
if user is None:
user = self.user
# maintained for backward compatibility
query = self.core.getStormQuery(text)
with self.getStormRuntime(opts=opts, user=user) as runt:
async for node, path in runt.iterStormQuery(query):
yield node
|
python
|
def last_updated(self):
'''
This is used only when SEND_IF_MODIFIED_LAST_MODIFIED_HEADERS is eet.
:return: A DateTime object
:rettype: datetime.datetime
'''
derived_path = self.context.request.url
timestamp_key = self.timestamp_key_for(derived_path)
if self.exists(self.timestamp_key_for, derived_path):
return self.storage.get(timestamp_key)
return None
|
python
|
def _remove_non_propagating_yields(self):
"""Remove yield with no variables e.g. `yield 123` and plain `yield` from vulnerability."""
for node in list(self.reassignment_nodes):
if isinstance(node, YieldNode) and len(node.right_hand_side_variables) == 1:
self.reassignment_nodes.remove(node)
|
python
|
def get_network(self, name, batch_size=None, callback=None):
'''Create a variable graph given network by name
Returns: NnpNetwork
'''
network_proto = nnabla_pb2.Network()
network_proto.CopyFrom(self.network_dict[name])
return NnpNetwork(network_proto, self._params, batch_size, callback=callback)
|
python
|
def validateInterfaceName(n):
"""
Verifies that the supplied name is a valid DBus Interface name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus interface name
"""
try:
if '.' not in n:
raise Exception('At least two components required')
if '..' in n:
raise Exception('".." not allowed in interface names')
if len(n) > 255:
raise Exception('Name exceeds maximum length of 255')
if n[0] == '.':
raise Exception('Names may not begin with a "."')
if n[0].isdigit():
raise Exception('Names may not begin with a digit')
if if_re.search(n):
raise Exception(
'Names contains a character outside the set [A-Za-z0-9_.]')
if dot_digit_re.search(n):
raise Exception(
'No components of an interface name may begin with a digit')
except Exception as e:
raise MarshallingError('Invalid interface name "%s": %s' % (n, str(e)))
|
python
|
def add(self, port, pkt):
'''
Add new input port + packet to buffer.
'''
id = len(self._buffer) + 1
if id > self._buffsize:
raise FullBuffer()
self._buffer[id] = (port, deepcopy(pkt))
return id
|
java
|
public void updateSizes(@ProgressDrawableSize int size) {
if (size == LARGE) {
setSizeParameters(CIRCLE_DIAMETER_LARGE, CIRCLE_DIAMETER_LARGE, CENTER_RADIUS_LARGE,
STROKE_WIDTH_LARGE, ARROW_WIDTH_LARGE, ARROW_HEIGHT_LARGE);
} else {
setSizeParameters(CIRCLE_DIAMETER, CIRCLE_DIAMETER, CENTER_RADIUS, STROKE_WIDTH,
ARROW_WIDTH, ARROW_HEIGHT);
}
}
|
python
|
def add_photo_to_observation(observation_id: int, file_object: BinaryIO, access_token: str):
"""Upload a picture and assign it to an existing observation.
:param observation_id: the ID of the observation
:param file_object: a file-like object for the picture. Example: open('/Users/nicolasnoe/vespa.jpg', 'rb')
:param access_token: the access token, as returned by :func:`get_access_token()`
"""
data = {'observation_photo[observation_id]': observation_id}
file_data = {'file': file_object}
response = requests.post(url="{base_url}/observation_photos".format(base_url=INAT_BASE_URL),
headers=_build_auth_header(access_token),
data=data,
files=file_data)
return response.json()
|
python
|
def formatter(self, key, value):
""" Format messages for collectd to consume. """
template = "PUTVAL {host}/fedmsg/fedmsg_wallboard-{key} " +\
"interval={interval} {timestamp}:{value}"
timestamp = int(time.time())
interval = self.hub.config['collectd_interval']
return template.format(
host=self.host,
timestamp=timestamp,
value=value,
interval=interval,
key=key,
)
|
python
|
def get_xname(self, var, coords=None):
"""Get the name of the x-dimension
This method gives the name of the x-dimension (which is not necessarily
the name of the coordinate if the variable has a coordinate attribute)
Parameters
----------
var: xarray.Variables
The variable to get the dimension for
coords: dict
The coordinates to use for checking the axis attribute. If None,
they are not used
Returns
-------
str
The coordinate name
See Also
--------
get_x"""
if coords is not None:
coord = self.get_variable_by_axis(var, 'x', coords)
if coord is not None and coord.name in var.dims:
return coord.name
dimlist = list(self.x.intersection(var.dims))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for x coordinate in the variable:"
"%s. I use %s" % (', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return dimlist[0]
# otherwise we return the coordinate in the last position
return var.dims[-1]
|
python
|
def filter(filter_creator):
"""
Creates a decorator that can be used as a filter.
.. warning::
This is currently not compatible with most other decorators, if
you are using a decorator that isn't part of `hurler` you should
take caution.
"""
filter_func = [None]
def function_getter(function):
if isinstance(function, Filter):
function.add_filter(filter)
return function
else:
return Filter(
filter=filter_func[0],
callback=function,
)
def filter_decorator(*args, **kwargs):
filter_function = filter_creator(*args, **kwargs)
filter_func[0] = filter_function
return function_getter
return filter_decorator
|
java
|
public void objectTerminated(Object object, Exception e)
{
if (e != null)
{
VdmDebugPlugin.log(e);
final Job job = new Job("ServerRestart")
{
protected IStatus run(IProgressMonitor monitor)
{
restartServer(serverPort);
return Status.OK_STATUS;
}
};
job.schedule(2000);
}
}
|
python
|
def create_language_model(list_words,
grammar_filename,
wdnet_filename):
"""Create a language model (wdnet) for HTK using a very simple single-word grammar"""
# Create the temporary grammar file
grammar = u''
grammar += u'$possible_words = '
grammar += u' | '.join([word for word in list_words])
grammar += u';\n'
grammar += u'( [SENT-START] ( $possible_words ) [SENT-END] )\n'
with codecs.open(grammar_filename, 'w', 'utf-8') as f:
f.write(grammar)
# Use HParse to create the wdnet file
config.htk_command('HParse -A {} {}'.format(grammar_filename, wdnet_filename))
return
|
java
|
protected PackageInfo processPackageInfo(InputStream packageInfoResource) {
if (packageInfoResource == null)
return null;
PackageInfo packageInfo = null;
try {
ClassReader cr = new ClassReader(packageInfoResource);
TraceConfigPackageVisitor packageVisitor = new TraceConfigPackageVisitor();
cr.accept(packageVisitor, ClassReader.SKIP_CODE | ClassReader.SKIP_DEBUG | ClassReader.SKIP_FRAMES);
packageInfo = packageVisitor.getPackageInfo();
} catch (IOException ioe) {
ioe.printStackTrace();
}
return packageInfo;
}
|
python
|
def get_next(self):
"""Return next task from the stack that has all dependencies resolved.
Return None if there are no tasks with resolved dependencies or is there are no more tasks on stack.
Use `count` to check is there are still some task left on the stack.
raise ValueError if total ordering is not possible."""
self.update_tasks_status()
if self.dirty:
self.tsort()
self.dirty = False
for key, task in self.tasks.iteritems():
if task.is_new() and task.has_resolved_dependencies():
return task
return None
|
python
|
def delete(self, redis):
''' Deletes this field's value from the databse. Should be implemented
in special cases '''
if self.index:
redis.hdel(self.key(), getattr(self.obj, self.name))
|
python
|
def start(self):
"""
Start worker processes and a control loop
"""
setproctitle('oq-zworkerpool %s' % self.ctrl_url[6:]) # strip tcp://
# start workers
self.workers = []
for _ in range(self.num_workers):
sock = z.Socket(self.task_out_port, z.zmq.PULL, 'connect')
proc = multiprocessing.Process(target=self.worker, args=(sock,))
proc.start()
sock.pid = proc.pid
self.workers.append(sock)
# start control loop accepting the commands stop and kill
with z.Socket(self.ctrl_url, z.zmq.REP, 'bind') as ctrlsock:
for cmd in ctrlsock:
if cmd in ('stop', 'kill'):
msg = getattr(self, cmd)()
ctrlsock.send(msg)
break
elif cmd == 'getpid':
ctrlsock.send(self.pid)
elif cmd == 'get_num_workers':
ctrlsock.send(self.num_workers)
|
java
|
@Override
public double readDouble() throws IOException {
long doubleAsLong = readLong();
if ((doubleAsLong >>> 63) > 0) {
doubleAsLong ^= 0x7FFFFFFFFFFFFFFFL;
}
return Double.longBitsToDouble(doubleAsLong);
}
|
java
|
public static Set<String> dotGetKeysIn(final Map map, final String pathString) {
return dotGetMap(map, pathString)
.map(m -> m
.keySet()
.stream()
.map(Object::toString)
.collect(Collectors.toSet())
).orElse(new HashSet<>());
}
|
java
|
public void set(int i, T label2, float d) {
labels[i] = label2;
scores[i] = d;
}
|
java
|
public static String getCharset(HttpEntity entity) {
final String guess = EntityUtils.getContentCharSet(entity);
return guess == null ? HTTP.DEFAULT_CONTENT_CHARSET : guess;
}
|
java
|
@JsonProperty("stack_trace")
public String getStackTraceString() {
if (_stackTrace != null)
return _stackTrace;
StackTraceElement[] ste = getStackTrace();
StringBuilder builder = new StringBuilder();
if (ste != null) {
for (int i = 0; i < ste.length; i++) {
if (builder.length() > 0)
builder.append(" ");
builder.append(ste[i].toString());
}
}
return builder.toString();
}
|
java
|
private Set<Node> checkNegationSpecifier(NegationSpecifier specifier) throws NodeSelectorException {
List<Selector> parts = new ArrayList<Selector>(1);
parts.add(specifier.getSelector());
return check(parts);
}
|
java
|
public void errFormat(String format, Object... args){
format(System.err, format, args);
}
|
python
|
def LDR(self, params):
"""
LDR Ra, [PC, #imm10_4]
LDR Ra, label
LDR Ra, =equate
LDR Ra, [Rb, Rc]
LDR Ra, [Rb, #imm7_4]
LDR Ra, [SP, #imm10_4]
Load a word from memory into Ra
Ra, Rb, and Rc must be low registers
"""
# TODO definition for PC is Ra <- M[PC + Imm10_4], Imm10_4 = PC - label, need to figure this one out
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
except iarm.exceptions.ParsingError:
Ra, label_name = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
if label_name.startswith('='):
# This is a pseudoinstructions
label_name = label_name[1:]
# TODO add check that label is a 32 bit number
# TODO This does not work on instruction loading. This interpreter follows a harvard like architecture,
# TODO while ARMv6-M (Cortex-M0+) is a Von Neumann architeture. Instructions will not be decompiled
self.check_arguments(low_registers=(Ra,))
if label_name in self.labels:
label_value = self.labels[label_name]
elif label_name in self.equates:
label_value = self.equates[label_name]
else:
try:
label_value = int(self.convert_to_integer(label_name))
except ValueError:
warnings.warn(iarm.exceptions.LabelDoesNotExist("Label `{}` does not exist or is not a parsable number. If it is a label, make sure it exists before running".format(label_name)))
label_value = None
if label_value is not None and int(label_value) % 4 != 0:
# Make sure we are word aligned
raise iarm.exceptions.IarmError("Memory access not word aligned; Immediate: {}".format(int(label_value)))
elif label_name.startswith('[') and label_name.endswith(']'):
# TODO improve this
Rb = label_name[1:-1]
if Rb == 'SP' or Rb == 'R13':
self.check_arguments(low_registers=(Ra,))
else:
self.check_arguments(low_registers=(Ra, label_name))
def LDR_func():
if self.memory[Rb] % 4 != 0:
raise iarm.exceptions.HardFault(
"Memory access not word aligned; Register: {} Immediate: {}".format(self.register[Rb],
self.convert_to_integer(
Rc[1:])))
self.register[Ra] = 0
for i in range(4):
self.register[Ra] |= (self.memory[self.register[Rb] + i] << (8 * i))
return LDR_func
else:
self.check_arguments(low_registers=(Ra,), label_exists=(label_name,))
try:
label_value = self.labels[label_name]
if label_value >= 1024:
raise iarm.exceptions.IarmError("Label {} has value {} and is greater than 1020".format(label_name, label_value))
if label_value % 4 != 0:
raise iarm.exceptions.IarmError("Label {} has value {} and is not word aligned".format(label_name, label_value))
except KeyError:
# Label doesn't exist, nothing we can do about that except maybe raise an exception now,
# But we're avoiding that elsewhere, might as well avoid it here too
pass
def LDR_func():
nonlocal label_value
# Since we can get a label that didn't exist in the creation step, We need to check it here
# TODO is there a way for label_value to not exist?
if label_value is None:
# Try to get it again
if label_name in self.labels:
label_value = self.labels[label_name]
elif label_name in self.equates:
label_value = self.equates[label_name]
# If it is still None, then it never got allocated
if label_value is None:
raise iarm.exceptions.IarmError("label `{}` does not exist. Was space allocated?".format(label_name))
# It does exist, make sure its word aligned
if int(label_value) % 4 != 0:
raise iarm.exceptions.IarmError("Memory access not word aligned; Immediate: {}".format(int(label_value)))
try:
self.register[Ra] = int(label_value)
except ValueError:
# TODO Can we even get to this path now?
self.register[Ra] = self.labels[label_name]
return LDR_func
if self.is_immediate(Rc):
if Rb == 'SP' or Rb == 'R15':
self.check_arguments(low_registers=(Ra,), imm10_4=(Rc,))
else:
self.check_arguments(low_registers=(Ra, Rb), imm7_4=(Rc,))
def LDR_func():
# TODO does memory read up?
if (self.register[Rb] + self.convert_to_integer(Rc[1:])) % 4 != 0:
raise iarm.exceptions.HardFault("Memory access not word aligned; Register: {} Immediate: {}".format(self.register[Rb], self.convert_to_integer(Rc[1:])))
self.register[Ra] = 0
for i in range(4):
self.register[Ra] |= (self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] << (8 * i))
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDR_func():
# TODO does memory read up?
if (self.register[Rb] + self.register[Rc]) % 4 != 0:
raise iarm.exceptions.HardFault(
"Memory access not word aligned; Register: {} Immediate: {}".format(self.register[Rb],
self.convert_to_integer(
Rc[1:])))
self.register[Ra] = 0
for i in range(4):
self.register[Ra] |= (self.memory[self.register[Rb] + self.register[Rc] + i] << (8 * i))
return LDR_func
|
java
|
public ScheduledFuture<?> schedule(Options options, Runnable task) {
if (!started) {
startThreads();
}
DelayedTask t = new DelayedTask(clock, options, task);
queue.put(t);
return t;
}
|
java
|
private double loglikelihoodAnomalous(DBIDs anomalousObjs) {
return anomalousObjs.isEmpty() ? 0 : anomalousObjs.size() * -FastMath.log(anomalousObjs.size());
}
|
java
|
@Override
public <T> Entity parseBean(T bean, boolean isToUnderlineCase, boolean ignoreNullValue) {
if (StrUtil.isBlank(this.tableName)) {
String simpleName = bean.getClass().getSimpleName();
this.setTableName(isToUnderlineCase ? StrUtil.toUnderlineCase(simpleName) : StrUtil.lowerFirst(simpleName));
}
return (Entity) super.parseBean(bean, isToUnderlineCase, ignoreNullValue);
}
|
java
|
@Override
protected Observable resumeWithFallback() {
if (commandActions.hasFallbackAction()) {
MetaHolder metaHolder = commandActions.getFallbackAction().getMetaHolder();
Throwable cause = getExecutionException();
if (cause instanceof CommandActionExecutionException) {
cause = cause.getCause();
}
Object[] args = createArgsForFallback(metaHolder, cause);
try {
Object res = commandActions.getFallbackAction().executeWithArgs(executionType, args);
if (res instanceof Observable) {
return (Observable) res;
} else if (res instanceof Single) {
return ((Single) res).toObservable();
} else if (res instanceof Completable) {
return ((Completable) res).toObservable();
} else {
return Observable.just(res);
}
} catch (Exception e) {
LOGGER.error(AbstractHystrixCommand.FallbackErrorMessageBuilder.create()
.append(commandActions.getFallbackAction(), e).build());
throw new FallbackInvocationException(e.getCause());
}
}
return super.resumeWithFallback();
}
|
python
|
def filename(self):
"""
Returns readable filename for a transcript
"""
client_id, __ = os.path.splitext(self.video.client_video_id)
file_name = u'{name}-{language}.{format}'.format(
name=client_id,
language=self.language_code,
format=self.file_format
).replace('\n', ' ')
return file_name
|
python
|
def indented_tree_line_generator(el, max_lines=None):
"""
Like tree_line_generator, but yields tuples (start_ref, end_ref, line),
where the line already takes the indentation into account by having "> "
prepended. If a line already starts with ">", it is escaped ("\\>"). This
makes it possible to reliably use methods that analyze plain text to detect
quoting.
"""
gen = tree_line_generator(el, max_lines)
for start_ref, end_ref, indentation_level, line in gen:
# Escape line
if line.startswith('>'):
line = '\\' + line
yield start_ref, end_ref, '> '*indentation_level + line
|
python
|
def run(self):
"""Render and display Python package documentation.
"""
os.environ['JARN_RUN'] = '1'
self.python.check_valid_python()
args = self.parse_options(self.args)
if args:
arg = args[0]
else:
arg = os.curdir
if arg:
arg = expanduser(arg)
if isfile(arg):
outfile = self.render_file(arg)
elif isdir(arg):
outfile = self.render_long_description(arg)
else:
err_exit('No such file or directory: %s' % arg)
self.open_in_browser(outfile)
|
python
|
def initialize(self, session=None, force=False):
"""
Initializes TensorFlow variables, which are returned by `initializables` property and
uses feed dictionary returned by `initializable_feeds` property defined at ICompilable
interface and implemented by descendants.
:param session: TensorFlow session used for initializing. In case when session is None,
default TensorFlow session will be checked first, if session is still None, then
default GPflowFlow session will used, but there is *no garuantee* that GPflow
session's graph is compliant with node's tensors graph.
:param force: inidicates either the initialized TensorFlow variables must be
re-initialized or not.
:raises: GPflowError exception if session's graph is different from the graph
used by node tensors.
"""
session = self.enquire_session(session)
initializables = self.initializables
if initializables:
misc.initialize_variables(
variables=initializables,
session=session,
force=force,
feed_dict=self.initializable_feeds)
|
python
|
def run(self, args=(), env={}):
"""
Calls the package manager with the arguments.
Returns decoded output of stdout and stderr; decoding determine
by locale.
"""
# the following will call self._get_exec_binary
return self._exec(self.binary, args=args, env=env)
|
python
|
def clear_to_enc_filename(fname):
"""
Converts the filename of a cleartext file and convert it to an encrypted filename
:param fname:
:return: filename of encrypted secret file if found, else None
"""
if not fname.lower().endswith('.json'):
raise CredkeepException('Invalid filetype')
if fname.lower().endswith('.enc.json'):
raise CredkeepException('File already encrypted')
enc_fname = fname[:-4] + 'enc.json'
return enc_fname if exists(enc_fname) else None
|
java
|
void update(final T item, final double weight, final boolean mark) {
if (item == null) {
return;
}
if (weight <= 0.0) {
throw new SketchesArgumentException("Item weights must be strictly positive: "
+ weight + ", for item " + item.toString());
}
++n_;
if (r_ == 0) {
// exact mode
updateWarmupPhase(item, weight, mark);
} else {
// sketch is in estimation mode, so we can make the following check
assert (h_ == 0) || (peekMin() >= getTau());
// what tau would be if deletion candidates turn out to be R plus the new item
// note: (r_ + 1) - 1 is intentional
final double hypotheticalTau = (weight + totalWtR_) / ((r_ + 1) - 1);
// is new item's turn to be considered for reservoir?
final boolean condition1 = (h_ == 0) || (weight <= peekMin());
// is new item light enough for reservoir?
final boolean condition2 = weight < hypotheticalTau;
if (condition1 && condition2) {
updateLight(item, weight, mark);
} else if (r_ == 1) {
updateHeavyREq1(item, weight, mark);
} else {
updateHeavyGeneral(item, weight, mark);
}
}
}
|
java
|
public static String toStringLow(long[] v) {
if(v == null) {
return "null";
}
final int mag = magnitude(v);
if(mag == 0) {
return "0";
}
char[] digits = new char[mag];
int pos = 0;
outer: for(int w = 0; w < v.length; w++) {
long f = 1L;
for(int i = 0; i < Long.SIZE; i++) {
digits[pos] = ((v[w] & f) == 0) ? '0' : '1';
f <<= 1;
++pos;
if(pos >= mag) {
break outer;
}
}
}
for(; pos < mag; ++pos) {
digits[pos] = '0';
}
return new String(digits);
}
|
python
|
def what_time_is_it_in(self, message, place):
"""what time is it in ___: Say the time in almost any city on earth."""
location = get_location(place)
if location is not None:
tz = get_timezone(location.lat, location.long)
if tz is not None:
ct = datetime.datetime.now(tz=pytz.timezone(tz))
self.say("It's %(time)s in %(place)s." % {'time': self.to_natural_day_and_time(ct),
'place': location.name}, message=message)
else:
self.say("I couldn't find timezone for %(place)s." % {'place': location.name}, message=message)
else:
self.say("I couldn't find anywhere named %(place)s." % {'place': location.name}, message=message)
|
python
|
def name(self) -> Optional[str]:
"""Returns name specified in Content-Disposition header or None
if missed or header is malformed.
"""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params, 'name')
|
java
|
private static int maxFinishIndexedBinarySearch(
List<? extends TemporalProposition> list, long tstamp) {
int low = 0;
int high = list.size() - 1;
while (low <= high) {
/*
* We use >>> instead of >> or /2 to avoid overflow. Sun's
* implementation of binary search actually doesn't do this (bug
* #5045582).
*/
int mid = (low + high) >>> 1;
TemporalProposition midVal = list.get(mid);
Long maxFinish = midVal.getInterval().getMaximumFinish();
int cmp = maxFinish != null ? maxFinish.compareTo(tstamp) : 1;
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
return mid + 1;
}
}
return high + 1;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.