language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
java
|
static List<String> queryStringToNamesAndValues(String encodedQuery) {
List<String> result = new ArrayList<>();
for (int pos = 0; pos <= encodedQuery.length(); ) {
int ampersandOffset = encodedQuery.indexOf('&', pos);
if (ampersandOffset == -1) ampersandOffset = encodedQuery.length();
int equalsOffset = encodedQuery.indexOf('=', pos);
if (equalsOffset == -1 || equalsOffset > ampersandOffset) {
result.add(encodedQuery.substring(pos, ampersandOffset));
result.add(null); // No value for this name.
} else {
result.add(encodedQuery.substring(pos, equalsOffset));
result.add(encodedQuery.substring(equalsOffset + 1, ampersandOffset));
}
pos = ampersandOffset + 1;
}
return result;
}
|
java
|
protected ClusterClient<ApplicationId> deployInternal(
ClusterSpecification clusterSpecification,
String applicationName,
String yarnClusterEntrypoint,
@Nullable JobGraph jobGraph,
boolean detached) throws Exception {
// ------------------ Check if configuration is valid --------------------
validateClusterSpecification(clusterSpecification);
if (UserGroupInformation.isSecurityEnabled()) {
// note: UGI::hasKerberosCredentials inaccurately reports false
// for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786),
// so we check only in ticket cache scenario.
boolean useTicketCache = flinkConfiguration.getBoolean(SecurityOptions.KERBEROS_LOGIN_USETICKETCACHE);
UserGroupInformation loginUser = UserGroupInformation.getCurrentUser();
if (loginUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.KERBEROS
&& useTicketCache && !loginUser.hasKerberosCredentials()) {
LOG.error("Hadoop security with Kerberos is enabled but the login user does not have Kerberos credentials");
throw new RuntimeException("Hadoop security with Kerberos is enabled but the login user " +
"does not have Kerberos credentials");
}
}
isReadyForDeployment(clusterSpecification);
// ------------------ Check if the specified queue exists --------------------
checkYarnQueues(yarnClient);
// ------------------ Add dynamic properties to local flinkConfiguraton ------
Map<String, String> dynProperties = getDynamicProperties(dynamicPropertiesEncoded);
for (Map.Entry<String, String> dynProperty : dynProperties.entrySet()) {
flinkConfiguration.setString(dynProperty.getKey(), dynProperty.getValue());
}
// ------------------ Check if the YARN ClusterClient has the requested resources --------------
// Create application via yarnClient
final YarnClientApplication yarnApplication = yarnClient.createApplication();
final GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse();
Resource maxRes = appResponse.getMaximumResourceCapability();
final ClusterResourceDescription freeClusterMem;
try {
freeClusterMem = getCurrentFreeClusterResources(yarnClient);
} catch (YarnException | IOException e) {
failSessionDuringDeployment(yarnClient, yarnApplication);
throw new YarnDeploymentException("Could not retrieve information about free cluster resources.", e);
}
final int yarnMinAllocationMB = yarnConfiguration.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
final ClusterSpecification validClusterSpecification;
try {
validClusterSpecification = validateClusterResources(
clusterSpecification,
yarnMinAllocationMB,
maxRes,
freeClusterMem);
} catch (YarnDeploymentException yde) {
failSessionDuringDeployment(yarnClient, yarnApplication);
throw yde;
}
LOG.info("Cluster specification: {}", validClusterSpecification);
final ClusterEntrypoint.ExecutionMode executionMode = detached ?
ClusterEntrypoint.ExecutionMode.DETACHED
: ClusterEntrypoint.ExecutionMode.NORMAL;
flinkConfiguration.setString(ClusterEntrypoint.EXECUTION_MODE, executionMode.toString());
ApplicationReport report = startAppMaster(
flinkConfiguration,
applicationName,
yarnClusterEntrypoint,
jobGraph,
yarnClient,
yarnApplication,
validClusterSpecification);
String host = report.getHost();
int port = report.getRpcPort();
// Correctly initialize the Flink config
flinkConfiguration.setString(JobManagerOptions.ADDRESS, host);
flinkConfiguration.setInteger(JobManagerOptions.PORT, port);
flinkConfiguration.setString(RestOptions.ADDRESS, host);
flinkConfiguration.setInteger(RestOptions.PORT, port);
// the Flink cluster is deployed in YARN. Represent cluster
return createYarnClusterClient(
this,
validClusterSpecification.getNumberTaskManagers(),
validClusterSpecification.getSlotsPerTaskManager(),
report,
flinkConfiguration,
true);
}
|
java
|
@SuppressWarnings("nullness")
private static <T> T castNonNull(@javax.annotation.Nullable T arg) {
return arg;
}
|
java
|
public static <T> String getCustomDeleteSQL(T t, List<Object> values, String setSql) {
StringBuilder sql = new StringBuilder();
sql.append("UPDATE ");
Table table = DOInfoReader.getTable(t.getClass());
List<Field> fields = DOInfoReader.getColumns(t.getClass());
List<Field> keyFields = DOInfoReader.getKeyColumns(t.getClass());
sql.append(getTableName(table)).append(" ");
if(setSql.trim().toLowerCase().startsWith("set ")) {
sql.append(setSql);
} else {
sql.append("SET ").append(setSql);
}
// 加上删除时间
for(Field field : fields) {
Column column = field.getAnnotation(Column.class);
if(column.setTimeWhenDelete() && Date.class.isAssignableFrom(field.getType())) {
sql.append(",").append(getColumnName(column))
.append("=").append(getDateString(new Date()));
}
}
List<Object> whereValues = new ArrayList<Object>();
String where = "WHERE " + joinWhereAndGetValue(keyFields, "AND", whereValues, t);
for(Object value : whereValues) {
if(value == null) {
throw new NullKeyValueException();
}
}
values.addAll(whereValues);
sql.append(autoSetSoftDeleted(where, t.getClass()));
return sql.toString();
}
|
python
|
def zscan(self, key, cursor=0, match=None, count=None):
"""Incrementally iterate sorted sets elements and associated scores."""
args = []
if match is not None:
args += [b'MATCH', match]
if count is not None:
args += [b'COUNT', count]
fut = self.execute(b'ZSCAN', key, cursor, *args)
def _converter(obj):
return (int(obj[0]), pairs_int_or_float(obj[1]))
return wait_convert(fut, _converter)
|
python
|
def parents(self):
# type: () -> List[CommitDetails]
""" Parents of the this commit. """
if self._parents is None:
self._parents = [CommitDetails.get(x) for x in self.parents_sha1]
return self._parents
|
java
|
@Override
public void injectServices(ServiceRegistryImplementor serviceRegistry) {
if ( gridDialect instanceof ServiceRegistryAwareService ) {
( (ServiceRegistryAwareService) gridDialect ).injectServices( serviceRegistry );
}
}
|
python
|
def set_link_status(link_id, status, **kwargs):
"""
Set the status of a link
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link_id).one()
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id))
link_i.network.check_write_permission(user_id)
link_i.status = status
db.DBSession.flush()
|
python
|
def get_power_status() -> SystemPowerStatus:
"""Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus
"""
get_system_power_status = ctypes.windll.kernel32.GetSystemPowerStatus
get_system_power_status.argtypes = [ctypes.POINTER(SystemPowerStatus)]
get_system_power_status.restype = wintypes.BOOL
status = SystemPowerStatus()
if not get_system_power_status(ctypes.pointer(status)):
raise ctypes.WinError()
else:
return status
|
java
|
private void updateDecorationPainterClippedBounds(JLayeredPane layeredPane, Point relativeLocationToOwner) {
if (layeredPane == null) {
decorationPainter.setClipBounds(null);
} else {
JComponent clippingComponent = getEffectiveClippingAncestor();
if (clippingComponent == null) {
LOGGER.error("No decoration clipping component can be found for decorated component: " +
decoratedComponent);
decorationPainter.setClipBounds(null);
} else if (clippingComponent.isShowing()) {
Rectangle ownerBoundsInParent = decoratedComponent.getBounds();
Rectangle decorationBoundsInParent = new Rectangle(ownerBoundsInParent.x + relativeLocationToOwner.x,
ownerBoundsInParent.y + relativeLocationToOwner.y, getWidth(), getHeight());
Rectangle decorationBoundsInAncestor = SwingUtilities.convertRectangle(decoratedComponent.getParent()
, decorationBoundsInParent, clippingComponent);
Rectangle decorationVisibleBoundsInAncestor;
Rectangle ancestorVisibleRect = clippingComponent.getVisibleRect();
decorationVisibleBoundsInAncestor = ancestorVisibleRect.intersection(decorationBoundsInAncestor);
if ((decorationVisibleBoundsInAncestor.width == 0) || (decorationVisibleBoundsInAncestor.height == 0)) {
// No bounds, no painting
decorationPainter.setClipBounds(null);
} else {
Rectangle decorationVisibleBoundsInLayeredPane = SwingUtilities.convertRectangle
(clippingComponent, decorationVisibleBoundsInAncestor, layeredPane);
// Clip graphics context
Rectangle clipBounds = SwingUtilities.convertRectangle(decorationPainter.getParent(),
decorationVisibleBoundsInLayeredPane, decorationPainter);
decorationPainter.setClipBounds(clipBounds);
}
} else {
// This could happen for example when a dialog is closed, so no need to log anything
decorationPainter.setClipBounds(null);
}
}
}
|
java
|
@Override
void doSetValueAsQueryToken(FhirContext theContext, String theParamName, String theQualifier, String theParameter) {
setValue(ParameterUtil.unescape(theParameter));
}
|
python
|
def _do_batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Batched write commands entry point."""
if ctx.sock_info.compression_context:
return _batched_write_command_compressed(
namespace, operation, command, docs, check_keys, opts, ctx)
return _batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx)
|
python
|
def bootstrap_methods(self) -> BootstrapMethod:
"""
Returns the bootstrap methods table from the BootstrapMethods attribute,
if one exists. If it does not, one will be created.
:returns: Table of `BootstrapMethod` objects.
"""
bootstrap = self.attributes.find_one(name='BootstrapMethods')
if bootstrap is None:
bootstrap = self.attributes.create(
ATTRIBUTE_CLASSES['BootstrapMethods']
)
return bootstrap.table
|
java
|
boolean scanIsEmpty() {
// This 'slow' implementation is still faster than any external one
// could be
// (e.g.: (bitSet.length() == 0 || bitSet.nextSetBit(0) == -1))
// especially for small BitSets
// Depends on the ghost bits being clear!
final int count = numWords;
for (int i = 0; i < count; i++) {
if (bits[i] != 0)
return false;
}
return true;
}
|
java
|
public static void generateWhereCondition(MethodSpec.Builder methodBuilder, SQLiteModelMethod method, Pair<String, List<Pair<String, TypeName>>> where) {
boolean nullable;
// methodBuilder.addStatement("$T<String>
// _sqlWhereParams=getWhereParamsArray()", ArrayList.class);
for (Pair<String, TypeName> item : where.value1) {
String resolvedParamName = method.findParameterNameByAlias(item.value0);
// methodBuilder.addCode("_sqlWhereParams.add(");
methodBuilder.addCode("_contentValues.addWhereArgs(");
nullable = isNullable(item.value1);
if (nullable && !method.hasAdapterForParam(item.value0)) {
// transform null in ""
methodBuilder.addCode("($L==null?\"\":", resolvedParamName);
}
// check for string conversion
TypeUtility.beginStringConversion(methodBuilder, item.value1);
SQLTransformer.javaMethodParam2WhereConditions(methodBuilder, method, resolvedParamName, item.value1);
// check for string conversion
TypeUtility.endStringConversion(methodBuilder, item.value1);
if (nullable && !method.hasAdapterForParam(item.value0)) {
methodBuilder.addCode(")");
}
methodBuilder.addCode(");\n");
}
}
|
java
|
private Path find(String namespace, String name) {
Path expectedPath = pathForMetadata(namespace, name);
if (DEFAULT_NAMESPACE.equals(namespace)) {
// when using the default namespace, the namespace may not be in the path
try {
checkExists(rootFileSystem, expectedPath);
return expectedPath;
} catch (DatasetNotFoundException e) {
try {
Path backwardCompatiblePath = new Path(rootDirectory, new Path(
name.replace('.', Path.SEPARATOR_CHAR), METADATA_DIRECTORY));
checkExists(rootFileSystem, backwardCompatiblePath);
return backwardCompatiblePath;
} catch (DatasetNotFoundException _) {
throw e; // throw the original
}
}
} else {
// no need to check other locations
checkExists(rootFileSystem, expectedPath);
return expectedPath;
}
}
|
java
|
static void lockCloud() {
if( _cloudLocked ) return; // Fast-path cutout
synchronized(Paxos.class) {
while( !_commonKnowledge )
try { Paxos.class.wait(); } catch( InterruptedException ie ) { }
_cloudLocked = true;
}
}
|
python
|
def _AddForwardedIps(self, forwarded_ips, interface):
"""Configure the forwarded IP address on the network interface.
Args:
forwarded_ips: list, the forwarded IP address strings to configure.
interface: string, the output device to use.
"""
for address in forwarded_ips:
self.ip_forwarding_utils.AddForwardedIp(address, interface)
|
java
|
@SuppressWarnings("unchecked")
public <T> List<T> getContextualInstances(final Class<T> type) {
List<T> result = new ArrayList<T>();
for (Bean<?> bean : manager.getBeans(type)) {
CreationalContext<T> context = (CreationalContext<T>) manager.createCreationalContext(bean);
if (context != null) {
result.add((T) manager.getReference(bean, type, context));
}
}
return result;
}
|
python
|
def to_capitalized_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case with the first letter capitalized. For example, "some_var"
would become "SomeVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
"""
parts = snake_case_string.split('_')
return ''.join([i.title() for i in parts])
|
java
|
@Override
public EClass getIfcSurfaceStyleRendering() {
if (ifcSurfaceStyleRenderingEClass == null) {
ifcSurfaceStyleRenderingEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI)
.getEClassifiers().get(681);
}
return ifcSurfaceStyleRenderingEClass;
}
|
python
|
def write_to(fpath, to_write, aslines=False, verbose=None,
onlyifdiff=False, mode='w', n=None):
""" Writes text to a file. Automatically encodes text as utf8.
Args:
fpath (str): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
onlyifdiff (bool): only writes if needed!
checks hash of to_write vs the hash of the contents of fpath
mode (unicode): (default = u'w')
n (int): (default = 2)
CommandLine:
python -m utool.util_io --exec-write_to --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import utool as ut
>>> fpath = ut.unixjoin(ut.get_app_resource_dir('utool'), 'testwrite.txt')
>>> ut.delete(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> aslines = False
>>> verbose = True
>>> onlyifdiff = False
>>> mode = u'w'
>>> n = 2
>>> write_to(fpath, to_write, aslines, verbose, onlyifdiff, mode, n)
>>> read_ = ut.read_from(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
"""
if onlyifdiff:
import utool as ut
if ut.hashstr(read_from(fpath)) == ut.hashstr(to_write):
print('[util_io] * no difference')
return
verbose = _rectify_verb_write(verbose)
if verbose:
# n = None if verbose > 1 else 2
# print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath, n=n))
print('[util_io] * Writing to text file: {}'.format(fpath))
backup = False and exists(fpath)
if backup:
util_path.copy(fpath, fpath + '.backup')
if not isinstance(fpath, six.string_types):
# Assuming a file object with a name attribute
# Should just read from the file
fpath = fpath.name
with open(fpath, mode) as file_:
if aslines:
file_.writelines(to_write)
else:
# Ensure python2 writes in bytes
if six.PY2:
if isinstance(to_write, unicode): # NOQA
to_write = to_write.encode('utf8')
try:
file_.write(to_write)
except UnicodeEncodeError as ex:
start = max(ex.args[2] - 10, 0)
end = ex.args[3] + 10
context = to_write[start:end]
print(repr(context))
print(context)
from utool import util_dbg
util_dbg.printex(ex, keys=[(type, 'to_write')])
file_.close()
if backup:
# restore
util_path.copy(fpath + '.backup', fpath)
# import utool
# utool.embed()
raise
|
java
|
public ServerList filterOutObservers()
{
Iterable<ServerSpec> filtered = Iterables.filter
(
specs,
new Predicate<ServerSpec>()
{
@Override
public boolean apply(ServerSpec spec)
{
return spec.getServerType() != ServerType.OBSERVER;
}
}
);
return new ServerList(Lists.newArrayList(filtered));
}
|
python
|
def stddev_samples(data, xcol, ycollist, delta=1.0):
"""Create a sample list that contains the mean and standard deviation of the original list. Each element in the returned list contains following values: [MEAN, STDDEV, MEAN - STDDEV*delta, MEAN + STDDEV*delta].
>>> chart_data.stddev_samples([ [1, 10, 15, 12, 15], [2, 5, 10, 5, 10], [3, 32, 33, 35, 36], [4,16,66, 67, 68] ], 0, range(1,5))
[(1, 13.0, 2.1213203435596424, 10.878679656440358, 15.121320343559642), (2, 7.5, 2.5, 5.0, 10.0), (3, 34.0, 1.5811388300841898, 32.418861169915807, 35.581138830084193), (4, 54.25, 22.094965489902897, 32.155034510097103, 76.344965489902904)]
"""
out = []
numcol = len(ycollist)
try:
for elem in data:
total = 0
for col in ycollist:
total += elem[col]
mean = float(total) / numcol
variance = 0
for col in ycollist:
variance += (mean - elem[col]) ** 2
stddev = math.sqrt(variance / numcol) * delta
out.append((elem[xcol], mean, stddev, mean - stddev, mean + stddev))
except IndexError:
raise IndexError("bad data: %s,xcol=%d,ycollist=%s" % (data, xcol, ycollist))
return out
|
java
|
@Override
public JspConfigDescriptor getJspConfigDescriptor() {
if (withinContextInitOfProgAddListener) {
throw new UnsupportedOperationException(MessageFormat.format(
nls.getString("Unsupported.op.from.servlet.context.listener"),
new Object[] {"getJspConfigDescriptor", lastProgAddListenerInitialized, getApplicationName()})); // PI41941
}
JspConfigDescriptorImpl jspConfigDescriptor = new JspConfigDescriptorImpl(this);
if (jspConfigDescriptor.getJspPropertyGroups().isEmpty() && jspConfigDescriptor.getTaglibs().isEmpty()) {
return null;
}
return jspConfigDescriptor;
}
|
java
|
public ProcessDefinitionCacheEntry resolveProcessDefinition(ProcessDefinition processDefinition) {
String processDefinitionId = processDefinition.getId();
String deploymentId = processDefinition.getDeploymentId();
ProcessDefinitionCacheEntry cachedProcessDefinition = processDefinitionCache.get(processDefinitionId);
if (cachedProcessDefinition == null) {
CommandContext commandContext = Context.getCommandContext();
DeploymentEntity deployment = deploymentEntityManager.findById(deploymentId);
deployment.setNew(false);
deploy(deployment, null);
cachedProcessDefinition = processDefinitionCache.get(processDefinitionId);
if (cachedProcessDefinition == null) {
throw new ActivitiException("deployment '" + deploymentId + "' didn't put process definition '" + processDefinitionId + "' in the cache");
}
}
return cachedProcessDefinition;
}
|
python
|
def organization(self):
"""
| Comment: The ID of the organization associated with this user, in this membership
"""
if self.api and self.organization_id:
return self.api._get_organization(self.organization_id)
|
java
|
@Deprecated
public static SSRC createSsrc(String url, KeyManager[] kms, TrustManager[] tms)
throws InitializationException {
return new SsrcImpl(url, kms, tms, 120 * 1000);
}
|
java
|
public static void removeSharedFlow( String sharedFlowClassName, HttpServletRequest request,
ServletContext servletContext )
{
StorageHandler sh = Handlers.get( servletContext ).getStorageHandler();
HttpServletRequest unwrappedRequest = unwrapMultipart( request );
RequestContext rc = new RequestContext( unwrappedRequest, null );
String attrName = ScopedServletUtils.getScopedSessionAttrName(InternalConstants.SHARED_FLOW_ATTR_PREFIX
+ sharedFlowClassName, request);
sh.removeAttribute(rc, attrName);
}
|
java
|
public synchronized PacketBuilder withShort(final short s)
{
checkBuilt();
try
{
dataOutputStream.writeShort(s);
}
catch (final IOException e)
{
logger.error("Unable to add short: {} : {}", e.getClass(), e.getMessage());
}
return this;
}
|
java
|
protected URLConnection openConnection(URL url)
throws IOException
{
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
if (_basicAuth != null)
conn.setRequestProperty("Authorization", _basicAuth);
else if (_user != null && _password != null) {
_basicAuth = "Basic " + base64(_user + ":" + _password);
conn.setRequestProperty("Authorization", _basicAuth);
}
return conn;
}
|
python
|
def dump(self, path):
"""Saves the pushdb as a properties file to the given path."""
with open(path, 'w') as props:
Properties.dump(self._props, props)
|
java
|
public static String escapeJavaScript(final String text,
final JavaScriptEscapeType type, final JavaScriptEscapeLevel level) {
if (type == null) {
throw new IllegalArgumentException("The 'type' argument cannot be null");
}
if (level == null) {
throw new IllegalArgumentException("The 'level' argument cannot be null");
}
return JavaScriptEscapeUtil.escape(text, type, level);
}
|
python
|
def gen_typedefs(self) -> str:
""" Generate python type declarations for all defined types """
rval = []
for typ in self.schema.types.values():
typname = self.python_name_for(typ.name)
parent = self.python_name_for(typ.typeof)
rval.append(f'class {typname}({parent}):\n\tpass')
return '\n\n\n'.join(rval) + ('\n' if rval else '')
|
java
|
public List<ActivitySpreadType.Period> getPeriod()
{
if (period == null)
{
period = new ArrayList<ActivitySpreadType.Period>();
}
return this.period;
}
|
python
|
def get_files_to_check(self):
"""Generate files and error codes to check on each one.
Walk dir trees under `self._arguments` and yield file names
that `match` under each directory that `match_dir`.
The method locates the configuration for each file name and yields a
tuple of (filename, [error_codes]).
With every discovery of a new configuration file `IllegalConfiguration`
might be raised.
"""
def _get_matches(conf):
"""Return the `match` and `match_dir` functions for `config`."""
match_func = re(conf.match + '$').match
match_dir_func = re(conf.match_dir + '$').match
return match_func, match_dir_func
def _get_ignore_decorators(conf):
"""Return the `ignore_decorators` as None or regex."""
return (re(conf.ignore_decorators) if conf.ignore_decorators
else None)
for name in self._arguments:
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
config = self._get_config(os.path.abspath(root))
match, match_dir = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
# Skip any dirs that do not match match_dir
dirs[:] = [d for d in dirs if match_dir(d)]
for filename in filenames:
if match(filename):
full_path = os.path.join(root, filename)
yield (full_path, list(config.checked_codes),
ignore_decorators)
else:
config = self._get_config(os.path.abspath(name))
match, _ = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
if match(name):
yield (name, list(config.checked_codes), ignore_decorators)
|
java
|
@Override
public DeleteResourcePolicyResult deleteResourcePolicy(DeleteResourcePolicyRequest request) {
request = beforeClientExecution(request);
return executeDeleteResourcePolicy(request);
}
|
python
|
def exception(self, timeout=None):
"""Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any.
"""
# Wait until the future is done.
if not self._completed.wait(timeout=timeout):
raise exceptions.TimeoutError("Timed out waiting for result.")
# If the batch completed successfully, this should return None.
if self._result != self._SENTINEL:
return None
# Okay, this batch had an error; this should return it.
return self._exception
|
python
|
def _recv_timeout_loop(self):
"""
A loop to check timeout of receiving remote BFD packet.
"""
while self._detect_time:
last_wait = time.time()
self._lock = hub.Event()
self._lock.wait(timeout=self._detect_time)
if self._lock.is_set():
# Authentication variable check (RFC5880 Section 6.8.1.)
if getattr(self, "_auth_seq_known", 0):
if last_wait > time.time() + 2 * self._detect_time:
self._auth_seq_known = 0
else:
# Check Detection Time expiration (RFC5880 section 6.8.4.)
LOG.info("[BFD][%s][RECV] BFD Session timed out.",
hex(self._local_discr))
if self._session_state not in [bfd.BFD_STATE_DOWN,
bfd.BFD_STATE_ADMIN_DOWN]:
self._set_state(bfd.BFD_STATE_DOWN,
bfd.BFD_DIAG_CTRL_DETECT_TIME_EXPIRED)
# Authentication variable check (RFC5880 Section 6.8.1.)
if getattr(self, "_auth_seq_known", 0):
self._auth_seq_known = 0
|
java
|
public boolean isTextPresentInDropDown(final By by, final String text) {
WebElement element = driver.findElement(by);
List<WebElement> options = element.findElements(By
.xpath(".//option[normalize-space(.) = " + escapeQuotes(text)
+ "]"));
return options != null && !options.isEmpty();
}
|
java
|
private void waitForCompletion() {
for (ListenableFuture<?> future : pendingTasks.values()) {
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
LOG.error("[" + this + "] Error waiting for writes to complete: " + e.getMessage());
}
}
}
|
java
|
@Override
public boolean isTabu(Move<? super SolutionType> move, SolutionType currentSolution) {
// apply move
move.apply(currentSolution);
// check: contained in tabu memory?
boolean tabu = memory.contains(currentSolution);
// undo move
move.undo(currentSolution);
// return result
return tabu;
}
|
java
|
@Nullable
public static String extractFullServiceName(String fullMethodName) {
int index = checkNotNull(fullMethodName, "fullMethodName").lastIndexOf('/');
if (index == -1) {
return null;
}
return fullMethodName.substring(0, index);
}
|
java
|
private void start(boolean isResume) {
long startTime = 0;
long lastPauseTime = 0;
if (taskInfo != null && taskInfo.getStatus() == HttpDownStatus.WAIT) {
startTime = taskInfo.getStartTime();
lastPauseTime = System.currentTimeMillis();
}
taskInfo = new TaskInfo();
taskInfo.setStartTime(startTime);
taskInfo.setLastPauseTime(lastPauseTime);
if (downConfig.getFilePath() == null || "".equals(downConfig.getFilePath().trim())) {
throw new BootstrapPathEmptyException("下载路径不能为空");
}
String filePath = HttpDownUtil.getTaskFilePath(this);
try {
if (!FileUtil.exists(downConfig.getFilePath())) {
try {
Files.createDirectories(Paths.get(downConfig.getFilePath()));
} catch (IOException e) {
throw new BootstrapCreateDirException("创建目录失败,请重试", e);
}
}
if (!FileUtil.canWrite(downConfig.getFilePath())) {
throw new BootstrapNoPermissionException("无权访问下载路径,请修改路径或开放目录写入权限");
}
//磁盘空间不足
if (response.getTotalSize() > FileUtil.getDiskFreeSize(downConfig.getFilePath())) {
throw new BootstrapNoSpaceException("磁盘空间不足,请修改路径");
}
//有文件同名
File downFile = new File(filePath);
if (downFile.exists()) {
//没有进度记录继续下载时,如果存在相同文件则删除重新下载
if (isResume) {
downFile.delete();
} else if (downConfig.isAutoRename()) {
response.setFileName(FileUtil.renameIfExists(filePath));
filePath = HttpDownUtil.getTaskFilePath(this);
} else {
throw new BootstrapFileAlreadyExistsException("文件名已存在,请修改文件名");
}
}
} catch (BootstrapException e) {
if (loopGroup != null) {
loopGroup.shutdownGracefully();
loopGroup = null;
}
if (progressThread != null) {
progressThread.close();
progressThread = null;
}
throw e;
}
try {
//创建文件
if (response.isSupportRange()) {
String fileSystemType = FileUtil.getSystemFileType(filePath);
if (OsUtil.isUnix()
|| "NTFS".equalsIgnoreCase(fileSystemType)
|| "UFS".equalsIgnoreCase(fileSystemType)
|| "APFS".equalsIgnoreCase(fileSystemType)) {
FileUtil.createFileWithSparse(filePath, response.getTotalSize());
} else {
FileUtil.createFileWithDefault(filePath, response.getTotalSize());
}
} else {
FileUtil.createFile(filePath);
}
} catch (IOException e) {
throw new BootstrapException("创建文件失败,请重试", e);
}
buildChunkInfoList();
commonStart();
if (taskInfo.getStartTime() <= 0) {
taskInfo.setStartTime(System.currentTimeMillis());
}
//文件下载开始回调
if (callback != null) {
request.headers().remove(HttpHeaderNames.RANGE);
callback.onStart(this);
}
for (int i = 0; i < taskInfo.getConnectInfoList().size(); i++) {
ConnectInfo connectInfo = taskInfo.getConnectInfoList().get(i);
connect(connectInfo);
}
}
|
python
|
def rand(self, count=1):
""" Gets @count random members from the set
@count: #int number of members to return
-> @count set members
"""
result = self._client.srandmember(self.key_prefix, count)
return set(map(self._loads, result))
|
java
|
public static Object[] copyArray(Object[] array, int from, int to) {
Object[] result = new Object[to - from];
System.arraycopy(array, from, result, 0, to - from);
return result;
}
|
python
|
def vhost_exists(name, runas=None):
'''
Return whether the vhost exists based on rabbitmqctl list_vhosts.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.vhost_exists rabbit_host
'''
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
return name in list_vhosts(runas=runas)
|
java
|
public CertificateInner createOrUpdate(String resourceGroupName, String name, CertificateInner certificateEnvelope) {
return createOrUpdateWithServiceResponseAsync(resourceGroupName, name, certificateEnvelope).toBlocking().single().body();
}
|
java
|
static Finding createFinding(SourceName sourceName, String findingId) {
try (SecurityCenterClient client = SecurityCenterClient.create()) {
// SourceName sourceName = SourceName.of(/*organization=*/"123234324",/*source=*/
// "423432321");
// String findingId = "samplefindingid";
// Use the current time as the finding "event time".
Instant eventTime = Instant.now();
// The resource this finding applies to. The CSCC UI can link
// the findings for a resource to the corresponding Asset of a resource
// if there are matches.
String resourceName = "//cloudresourcemanager.googleapis.com/organizations/11232";
// Start setting up a request to create a finding in a source.
Finding finding =
Finding.newBuilder()
.setParent(sourceName.toString())
.setState(State.ACTIVE)
.setResourceName(resourceName)
.setEventTime(
Timestamp.newBuilder()
.setSeconds(eventTime.getEpochSecond())
.setNanos(eventTime.getNano()))
.setCategory("MEDIUM_RISK_ONE")
.build();
// Call the API.
Finding response = client.createFinding(sourceName, findingId, finding);
System.out.println("Created Finding: " + response);
return response;
} catch (IOException e) {
throw new RuntimeException("Couldn't create client.", e);
}
}
|
python
|
def later(timeout, f, *args, **kwargs):
'''
Sets a timer that will call the *f* function past *timeout* seconds.
See example in :ref:`sample_inter`
:return: :class:`Timer`
'''
t = Timer(timeout, f, args, kwargs)
t.start()
return t
|
java
|
public void widthTable(XWPFTable table, float widthCM, int rows, int cols) {
TableTools.widthTable(table, widthCM, cols);
TableTools.borderTable(table, 4);
}
|
python
|
def date_convert(string, match, ymd=None, mdy=None, dmy=None,
d_m_y=None, hms=None, am=None, tz=None, mm=None, dd=None):
'''Convert the incoming string containing some date / time info into a
datetime instance.
'''
groups = match.groups()
time_only = False
if mm and dd:
y=datetime.today().year
m=groups[mm]
d=groups[dd]
elif ymd is not None:
y, m, d = re.split(r'[-/\s]', groups[ymd])
elif mdy is not None:
m, d, y = re.split(r'[-/\s]', groups[mdy])
elif dmy is not None:
d, m, y = re.split(r'[-/\s]', groups[dmy])
elif d_m_y is not None:
d, m, y = d_m_y
d = groups[d]
m = groups[m]
y = groups[y]
else:
time_only = True
H = M = S = u = 0
if hms is not None and groups[hms]:
t = groups[hms].split(':')
if len(t) == 2:
H, M = t
else:
H, M, S = t
if '.' in S:
S, u = S.split('.')
u = int(float('.' + u) * 1000000)
S = int(S)
H = int(H)
M = int(M)
if am is not None:
am = groups[am]
if am:
am = am.strip()
if am == 'AM' and H == 12:
# correction for "12" hour functioning as "0" hour: 12:15 AM = 00:15 by 24 hr clock
H -= 12
elif am == 'PM' and H == 12:
# no correction needed: 12PM is midday, 12:00 by 24 hour clock
pass
elif am == 'PM':
H += 12
if tz is not None:
tz = groups[tz]
if tz == 'Z':
tz = FixedTzOffset(0, 'UTC')
elif tz:
tz = tz.strip()
if tz.isupper():
# TODO use the awesome python TZ module?
pass
else:
sign = tz[0]
if ':' in tz:
tzh, tzm = tz[1:].split(':')
elif len(tz) == 4: # 'snnn'
tzh, tzm = tz[1], tz[2:4]
else:
tzh, tzm = tz[1:3], tz[3:5]
offset = int(tzm) + int(tzh) * 60
if sign == '-':
offset = -offset
tz = FixedTzOffset(offset, tz)
if time_only:
d = time(H, M, S, u, tzinfo=tz)
else:
y = int(y)
if m.isdigit():
m = int(m)
else:
m = MONTHS_MAP[m]
d = int(d)
d = datetime(y, m, d, H, M, S, u, tzinfo=tz)
return d
|
java
|
public static DynamicMessage getDefaultInstance(final Descriptor type) {
return wrap(com.google.protobuf.DynamicMessage.getDefaultInstance(type));
}
|
python
|
def get_acgt_geno_marker(self, marker):
"""Gets the genotypes for a given marker (ACGT format).
Args:
marker (str): The name of the marker.
Returns:
numpy.ndarray: The genotypes of the marker (ACGT format).
"""
# Getting the marker's genotypes
geno, snp_position = self.get_geno_marker(marker, return_index=True)
# Returning the ACGT's format of the genotypes
return self._allele_encoding[snp_position][geno]
|
java
|
@Override
public CommerceCountry findByPrimaryKey(Serializable primaryKey)
throws NoSuchCountryException {
CommerceCountry commerceCountry = fetchByPrimaryKey(primaryKey);
if (commerceCountry == null) {
if (_log.isDebugEnabled()) {
_log.debug(_NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey);
}
throw new NoSuchCountryException(_NO_SUCH_ENTITY_WITH_PRIMARY_KEY +
primaryKey);
}
return commerceCountry;
}
|
python
|
def as_iso8601(self):
"""
example: 2016-08-13T00:38:05.210+00:00
"""
if self.__date is None or self.__time is None:
return None
return "20%s-%s-%sT%s:%s:%s0Z" % \
(self.__date[4:], self.__date[2:4], self.__date[:2], self.__time[:2], self.__time[2:4], self.__time[4:])
|
java
|
public double sum() {
double sum = 0;
for (WeightedDirectedTypedEdge<T> e : inEdges.values())
sum += e.weight();
for (WeightedDirectedTypedEdge<T> e : outEdges.values())
sum += e.weight();
return sum;
}
|
python
|
def get_project(self, owner, id, **kwargs):
"""
Retrieve a project
Return details on a project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_project(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required)
:param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required)
:return: ProjectSummaryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_project_with_http_info(owner, id, **kwargs)
else:
(data) = self.get_project_with_http_info(owner, id, **kwargs)
return data
|
java
|
protected List<AcademicTermDetail> getAcademicTermsAfter(DateTime start) {
final List<AcademicTermDetail> terms =
this.eventAggregationManagementDao.getAcademicTermDetails();
final int index =
Collections.binarySearch(
terms,
new AcademicTermDetailImpl(
start.toDateMidnight(), start.plusDays(1).toDateMidnight(), ""));
if (index > 0) {
return terms.subList(index, terms.size());
} else if (index < 0) {
return terms.subList(-(index + 1), terms.size());
}
return terms;
}
|
python
|
def find_import_star(node):
"""Finds import stars"""
return (
isinstance(node, ast.ImportFrom)
and '*' in h.importfrom_names(node.names)
)
|
java
|
@Deprecated
@Override
public SQLTransaction beginTransaction(IsolationLevel isolationLevel, boolean forUpdateOnly) {
return super.beginTransaction(isolationLevel, forUpdateOnly);
}
|
python
|
def _compute_fluxes(self):
''' All fluxes are band by band'''
self.emission = self._compute_emission()
self.emission_sfc = self._compute_emission_sfc()
fromspace = self._from_space()
self.flux_down = self.trans.flux_down(fromspace, self.emission)
self.flux_reflected_up = self.trans.flux_reflected_up(self.flux_down, self.albedo_sfc)
# this ensure same dimensions as other fields
self.flux_to_sfc = self.flux_down[..., -1, np.newaxis]
self.flux_from_sfc = (self.emission_sfc +
self.flux_reflected_up[..., -1, np.newaxis])
self.flux_up = self.trans.flux_up(self.flux_from_sfc,
self.emission + self.flux_reflected_up[...,0:-1])
self.flux_net = self.flux_up - self.flux_down
# absorbed radiation (flux convergence) in W / m**2 (per band)
self.absorbed = np.diff(self.flux_net, axis=-1)
self.absorbed_total = np.sum(self.absorbed, axis=-1)
self.flux_to_space = self._compute_flux_top()
|
java
|
public StructuredQueryDefinition valueConstraint(String constraintName, double weight, String... values) {
return new ValueConstraintQuery(constraintName, weight, values);
}
|
python
|
def column_to_bq_schema(self):
"""Convert a column to a bigquery schema object.
"""
kwargs = {}
if len(self.fields) > 0:
fields = [field.column_to_bq_schema() for field in self.fields]
kwargs = {"fields": fields}
return google.cloud.bigquery.SchemaField(self.name, self.dtype,
self.mode, **kwargs)
|
java
|
@Override
public Request<DescribeSpotPriceHistoryRequest> getDryRunRequest() {
Request<DescribeSpotPriceHistoryRequest> request = new DescribeSpotPriceHistoryRequestMarshaller().marshall(this);
request.addParameter("DryRun", Boolean.toString(true));
return request;
}
|
java
|
public ArrayList<String> getDataRows() {
ArrayList<String> rows = new ArrayList<String>();
for (String row : rowLookup.keySet()) {
if (this.isMetaDataRow(row)) {
continue;
}
HeaderInfo hi = rowLookup.get(row);
if (!hi.isHide()) {
rows.add(row);
}
}
return rows;
}
|
python
|
def _parse_api_options(self, options, query_string=False):
"""Select API options out of the provided options object.
Selects API string options out of the provided options object and
formats for either request body (default) or query string.
"""
api_options = self._select_options(options, self.API_OPTIONS)
if query_string:
# Prefix all options with "opt_"
query_api_options = {}
for key in api_options:
# Transform list/tuples into comma separated list
if isinstance(api_options[key], (list, tuple)):
query_api_options[
'opt_' + key] = ','.join(api_options[key])
else:
query_api_options[
'opt_' + key] = api_options[key]
return query_api_options
else:
return api_options
|
java
|
public final void mT__144() throws RecognitionException {
try {
int _type = T__144;
int _channel = DEFAULT_TOKEN_CHANNEL;
// InternalSARL.g:130:8: ( 'true' )
// InternalSARL.g:130:10: 'true'
{
match("true");
}
state.type = _type;
state.channel = _channel;
}
finally {
}
}
|
python
|
def _parse_record(self, record_type):
"""Parse a record."""
if self._next_token() in ['{', '(']:
key = self._next_token()
self.records[key] = {
u'id': key,
u'type': record_type.lower()
}
if self._next_token() == ',':
while True:
field = self._parse_field()
if field:
k, v = field[0], field[1]
if k in self.keynorms:
k = self.keynorms[k]
if k == 'pages':
v = v.replace(' ', '').replace('--', '-')
if k == 'author' or k == 'editor':
v = self.parse_names(v)
# Recapitalizing the title generally causes more problems than it solves
# elif k == 'title':
# v = latex_to_unicode(v, capitalize='title')
else:
v = latex_to_unicode(v)
self.records[key][k] = v
if self._token != ',':
break
|
python
|
def append(self, offset, timestamp, key, value, headers=None):
""" Append message to batch.
"""
assert not headers, "Headers not supported in v0/v1"
# Check types
if type(offset) != int:
raise TypeError(offset)
if self._magic == 0:
timestamp = self.NO_TIMESTAMP
elif timestamp is None:
timestamp = int(time.time() * 1000)
elif type(timestamp) != int:
raise TypeError(
"`timestamp` should be int, but {} provided".format(
type(timestamp)))
if not (key is None or
isinstance(key, (bytes, bytearray, memoryview))):
raise TypeError(
"Not supported type for key: {}".format(type(key)))
if not (value is None or
isinstance(value, (bytes, bytearray, memoryview))):
raise TypeError(
"Not supported type for value: {}".format(type(value)))
# Check if we have room for another message
pos = len(self._buffer)
size = self.size_in_bytes(offset, timestamp, key, value)
# We always allow at least one record to be appended
if offset != 0 and pos + size >= self._batch_size:
return None
# Allocate proper buffer length
self._buffer.extend(bytearray(size))
# Encode message
crc = self._encode_msg(pos, offset, timestamp, key, value)
return LegacyRecordMetadata(offset, crc, size, timestamp)
|
python
|
def set_state(self, state, speed=None):
"""
:param state: bool
:param speed: a string one of ["lowest", "low",
"medium", "high", "auto"] defaults to last speed
:return: nothing
"""
desired_state = {"powered": state}
if state:
brightness = self._to_brightness.get(speed or self.current_fan_speed(), 0.33)
desired_state.update({'brightness': brightness})
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response)
|
python
|
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
|
python
|
def aggr(self, group, **named_attributes):
"""
Aggregation of the type U('attr1','attr2').aggr(group, computation="QueryExpression")
has the primary key ('attr1','attr2') and performs aggregation computations for all matching elements of `group`.
:param group: The query expression to be aggregated.
:param named_attributes: computations of the form new_attribute="sql expression on attributes of group"
:return: The derived query expression
"""
return (
GroupBy.create(self, group=group, keep_all_rows=False, attributes=(), named_attributes=named_attributes)
if self.primary_key else
Projection.create(group, attributes=(), named_attributes=named_attributes, include_primary_key=False))
|
java
|
public static DMatrixSparseCSC diag(double... values ) {
int N = values.length;
return diag(new DMatrixSparseCSC(N,N,N),values,0,N);
}
|
python
|
def set_coordinates(self, x, y, z=None):
"""Set all coordinate dimensions at once."""
self.x = x
self.y = y
self.z = z
|
python
|
def visibleColumns(self):
"""
Returns a list of the visible column names for this widget.
:return [<str>, ..]
"""
return [self.columnOf(c) for c in range(self.columnCount()) \
if not self.isColumnHidden(c)]
|
java
|
public void rotation(TextureRotationMode mode){
float[][] tmp = corner.clone();
switch (mode) {
case HALF:
corner[0] = tmp[2];
corner[1] = tmp[3];
corner[2] = tmp[0];
corner[3] = tmp[1];
break;
case CLOCKWIZE:
corner[0] = tmp[3];
corner[1] = tmp[0];
corner[2] = tmp[1];
corner[3] = tmp[2];
break;
case COUNTERCLOCKWIZE:
corner[0] = tmp[1];
corner[1] = tmp[2];
corner[2] = tmp[3];
corner[3] = tmp[0];
break;
default:
break;
}
}
|
python
|
def rel_paths(self, *args, **kwargs):
"""
Fix the paths in the given dictionary to get relative paths
Parameters
----------
%(ExperimentsConfig.rel_paths.parameters)s
Returns
-------
%(ExperimentsConfig.rel_paths.returns)s
Notes
-----
d is modified in place!"""
return self.config.experiments.rel_paths(*args, **kwargs)
|
python
|
def accuracy(current, predicted):
"""
Computes the accuracy of the TM at time-step t based on the prediction
at time-step t-1 and the current active columns at time-step t.
@param current (array) binary vector containing current active columns
@param predicted (array) binary vector containing predicted active columns
@return acc (float) prediction accuracy of the TM at time-step t
"""
acc = 0
if np.count_nonzero(predicted) > 0:
acc = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))
return acc
|
python
|
def alarm(
cls,
template,
default_params={},
stack_depth=0,
log_context=None,
**more_params
):
"""
:param template: *string* human readable string with placeholders for parameters
:param default_params: *dict* parameters to fill in template
:param stack_depth: *int* how many calls you want popped off the stack to report the *true* caller
:param log_context: *dict* extra key:value pairs for your convenience
:param more_params: more parameters (which will overwrite default_params)
:return:
"""
timestamp = datetime.utcnow()
format = ("*" * 80) + CR + indent(template, prefix="** ").strip() + CR + ("*" * 80)
Log._annotate(
LogItem(
context=exceptions.ALARM,
format=format,
template=template,
params=dict(default_params, **more_params)
),
timestamp,
stack_depth + 1
)
|
python
|
def dependentItems(store, tableClass, comparisonFactory):
"""
Collect all the items that should be deleted when an item or items
of a particular item type are deleted.
@param tableClass: An L{Item} subclass.
@param comparison: A one-argument callable taking an attribute and
returning an L{iaxiom.IComparison} describing the items to
collect.
@return: An iterable of items to delete.
"""
for cascadingAttr in (_cascadingDeletes.get(tableClass, []) +
_cascadingDeletes.get(None, [])):
for cascadedItem in store.query(cascadingAttr.type,
comparisonFactory(cascadingAttr)):
yield cascadedItem
|
python
|
def static_partial_tile_sizes(width, height, tilesize, scale_factors):
"""Generator for partial tile sizes for zoomed in views.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
scale_factors -- iterable of scale factors, typically [1,2,4..]
Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
"""
for sf in scale_factors:
if (sf * tilesize >= width and sf * tilesize >= height):
continue # avoid any full-region tiles
rts = tilesize * sf # tile size in original region
xt = (width - 1) // rts + 1
yt = (height - 1) // rts + 1
for nx in range(xt):
rx = nx * rts
rxe = rx + rts
if (rxe > width):
rxe = width
rw = rxe - rx
# same as sw = int(math.ceil(rw/float(sf)))
sw = (rw + sf - 1) // sf
for ny in range(yt):
ry = ny * rts
rye = ry + rts
if (rye > height):
rye = height
rh = rye - ry
# same as sh = int(math.ceil(rh/float(sf)))
sh = (rh + sf - 1) // sf
yield([rx, ry, rw, rh], [sw, sh])
|
python
|
def set_dict_none_default(dict_item, default_value):
"""
对字典中为None的值,重新设置默认值
:param dict_item:
:param default_value:
:return:
"""
for (k, v) in iteritems(dict_item):
if v is None:
dict_item[k] = default_value
|
java
|
public synchronized int chooseGetCursorIndex(int previous)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "chooseGetCursorIndex", new Object[] {Integer.valueOf(previous)});
// The zeroth index represents the default cursor for non-classified messages.
int classPos = 0;
if(classifications.getNumberOfClasses()>0)
{
// Need to determine the class of message to process
if(previous == -1)
{
// First time through, get the initial weightings table
weightMap = classifications.getWeightings();
}
else
{
// Need to remove previous entries from the weightMap
weightMap.remove(Integer.valueOf(previous));
}
if(!weightMap.isEmpty())
{
classPos = classifications.findClassIndex(weightMap);
} // eof non-empty weightmap
else if(unitTestOperation)
{
// In a production environment we'd return zero in this case so that the
// default cursor would be used. In a Unit test environment, where we are
// classifying messages and where we have configured the test so that a
// cursor associated with a specific classification should be used then
// we'll alert the test to an error by throwing this exception.
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "chooseGetCursorIndex", "SIErrorException");
throw new SIErrorException();
}
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "chooseGetCursorIndex", classPos);
return classPos;
}
|
python
|
def filterAcceptsRow(self, source_row, source_parent):
"""The filter method
.. note:: This filter hides top-level items of unsupported branches
and also leaf items containing xml files.
Enabled root items: QgsDirectoryItem, QgsFavouritesItem,
QgsPGRootItem.
Disabled root items: QgsMssqlRootItem, QgsSLRootItem,
QgsOWSRootItem, QgsWCSRootItem, QgsWFSRootItem, QgsWMSRootItem.
Disabled leaf items: QgsLayerItem and QgsOgrLayerItem with path
ending with '.xml'
:param source_row: Parent widget of the model
:type source_row: int
:param source_parent: Parent item index
:type source_parent: QModelIndex
:returns: Item validation result
:rtype: bool
"""
source_index = self.sourceModel().index(source_row, 0, source_parent)
item = self.sourceModel().dataItem(source_index)
if item.metaObject().className() not in [
'QgsPGRootItem',
'QgsPGConnectionItem',
'QgsPGSchemaItem',
'QgsPGLayerItem',
'QgsFavoritesItem',
'QgsDirectoryItem',
'QgsLayerItem',
'QgsGdalLayerItem',
'QgsOgrLayerItem']:
return False
if item.path().endswith('.xml'):
return False
return True
|
python
|
def deprecated_conditional(predicate,
removal_version,
entity_description,
hint_message=None,
stacklevel=4):
"""Marks a certain configuration as deprecated.
The predicate is used to determine if that configuration is deprecated. It is a function that
will be called, if true, then the deprecation warning will issue.
:param () -> bool predicate: A function that returns True if the deprecation warning should be on.
:param string removal_version: The pants version which will remove the deprecated functionality.
:param string entity_description: A description of the deprecated entity.
:param string hint_message: An optional hint pointing to alternatives to the deprecation.
:param int stacklevel: How far up in the stack do we go to find the calling fn to report
:raises DeprecationApplicationError if the deprecation is applied improperly.
"""
validate_deprecation_semver(removal_version, 'removal version')
if predicate():
warn_or_error(removal_version, entity_description, hint_message, stacklevel=stacklevel)
|
python
|
def _parse_heading(self):
"""Parse a section heading at the head of the wikicode string."""
self._global |= contexts.GL_HEADING
reset = self._head
self._head += 1
best = 1
while self._read() == "=":
best += 1
self._head += 1
context = contexts.HEADING_LEVEL_1 << min(best - 1, 5)
try:
title, level = self._parse(context)
except BadRoute:
self._head = reset + best - 1
self._emit_text("=" * best)
else:
self._emit(tokens.HeadingStart(level=level))
if level < best:
self._emit_text("=" * (best - level))
self._emit_all(title)
self._emit(tokens.HeadingEnd())
finally:
self._global ^= contexts.GL_HEADING
|
python
|
def send_at_point(self, what, row, col):
"""Ask the server to perform an operation at a given point."""
pos = self.get_position(row, col)
self.send_request(
{"typehint": what + "AtPointReq",
"file": self._file_info(),
"point": pos})
|
python
|
def empty_mets():
"""
Create an empty METS file from bundled template.
"""
tpl = METS_XML_EMPTY.decode('utf-8')
tpl = tpl.replace('{{ VERSION }}', VERSION)
tpl = tpl.replace('{{ NOW }}', '%s' % datetime.now())
return OcrdMets(content=tpl.encode('utf-8'))
|
python
|
def surface2image(surface):
"""
Convert a cairo surface into a PIL image
"""
# TODO(Jflesch): Python 3 problem
# cairo.ImageSurface.get_data() raises NotImplementedYet ...
# import PIL.ImageDraw
#
# if surface is None:
# return None
# dimension = (surface.get_width(), surface.get_height())
# img = PIL.Image.frombuffer("RGBA", dimension,
# surface.get_data(), "raw", "BGRA", 0, 1)
#
# background = PIL.Image.new("RGB", img.size, (255, 255, 255))
# background.paste(img, mask=img.split()[3]) # 3 is the alpha channel
# return background
global g_lock
with g_lock:
img_io = io.BytesIO()
surface.write_to_png(img_io)
img_io.seek(0)
img = PIL.Image.open(img_io)
img.load()
if "A" not in img.getbands():
return img
img_no_alpha = PIL.Image.new("RGB", img.size, (255, 255, 255))
img_no_alpha.paste(img, mask=img.split()[3]) # 3 is the alpha channel
return img_no_alpha
|
java
|
private void mainLoop() {
for (@AutoreleasePool int i = 0;;) {
try {
TimerTask task;
boolean taskFired;
synchronized(queue) {
// Wait for queue to become non-empty
while (queue.isEmpty() && newTasksMayBeScheduled)
queue.wait();
if (queue.isEmpty())
break; // Queue is empty and will forever remain; die
// Queue nonempty; look at first evt and do the right thing
long currentTime, executionTime;
task = queue.getMin();
synchronized(task.lock) {
if (task.state == TimerTask.CANCELLED) {
queue.removeMin();
continue; // No action required, poll queue again
}
currentTime = System.currentTimeMillis();
executionTime = task.nextExecutionTime;
if (taskFired = (executionTime<=currentTime)) {
if (task.period == 0) { // Non-repeating, remove
queue.removeMin();
task.state = TimerTask.EXECUTED;
} else { // Repeating task, reschedule
queue.rescheduleMin(
task.period<0 ? currentTime - task.period
: executionTime + task.period);
}
}
}
if (!taskFired) // Task hasn't yet fired; wait
queue.wait(executionTime - currentTime);
}
if (taskFired) // Task fired; run it, holding no locks
task.run();
} catch(InterruptedException e) {
}
}
}
|
python
|
def faces_unique_edges(self):
"""
For each face return which indexes in mesh.unique_edges constructs
that face.
Returns
---------
faces_unique_edges : (len(self.faces), 3) int
Indexes of self.edges_unique that
construct self.faces
Examples
---------
In [0]: mesh.faces[0:2]
Out[0]:
TrackedArray([[ 1, 6946, 24224],
[ 6946, 1727, 24225]])
In [1]: mesh.edges_unique[mesh.faces_unique_edges[0:2]]
Out[1]:
array([[[ 1, 6946],
[ 6946, 24224],
[ 1, 24224]],
[[ 1727, 6946],
[ 1727, 24225],
[ 6946, 24225]]])
"""
# make sure we have populated unique edges
populate = self.edges_unique
# we are relying on the fact that edges are stacked in triplets
result = self._cache['edges_unique_inverse'].reshape((-1, 3))
return result
|
python
|
def update_generic_password(client, path):
"""Will update a single key in a generic secret backend as
thought it were a password"""
vault_path, key = path_pieces(path)
mount = mount_for_path(vault_path, client)
if not mount:
client.revoke_self_token()
raise aomi.exceptions.VaultConstraint('invalid path')
if backend_type(mount, client) != 'generic':
client.revoke_self_token()
raise aomi.exceptions.AomiData("Unsupported backend type")
LOG.debug("Updating generic password at %s", path)
existing = client.read(vault_path)
if not existing or 'data' not in existing:
LOG.debug("Nothing exists yet at %s!", vault_path)
existing = {}
else:
LOG.debug("Updating %s at %s", key, vault_path)
existing = existing['data']
new_password = get_password()
if key in existing and existing[key] == new_password:
client.revoke_self_token()
raise aomi.exceptions.AomiData("Password is same as existing")
existing[key] = new_password
client.write(vault_path, **existing)
|
python
|
def get_R_mod(options, rho0):
"""Compute synthetic measurements over a homogeneous half-space
"""
tomodir = tdManager.tdMan(
elem_file=options.elem_file,
elec_file=options.elec_file,
config_file=options.config_file,
)
# set model
tomodir.add_homogeneous_model(magnitude=rho0)
# only interested in magnitudes
Z = tomodir.measurements()[:, 0]
return Z
|
java
|
public CompletableFuture<T> except(Consumer<Throwable> consumer) {
return whenComplete((result, error) -> {
if (error != null) {
consumer.accept(error);
}
});
}
|
java
|
public static FieldMapping parseFieldMapping(String source, JsonNode mappingNode) {
ValidationException.check(mappingNode.isObject(),
"A column mapping must be a JSON record");
ValidationException.check(mappingNode.has(TYPE),
"Column mappings must have a %s.", TYPE);
String type = mappingNode.get(TYPE).asText();
// return easy cases
if ("occVersion".equals(type)) {
return FieldMapping.version(source);
} else if ("key".equals(type)) {
return FieldMapping.key(source);
}
String family = null;
String qualifier = null;
String prefix = null;
// for backward-compatibility, check for "value": "fam:qual"
if (mappingNode.has(VALUE)) {
// avoids String#split because of odd cases, like ":".split(":")
String value = mappingNode.get(VALUE).asText();
Iterator<String> values = VALUE_SPLITTER.split(value).iterator();
if (values.hasNext()) {
family = values.next();
}
if (values.hasNext()) {
if ("keyAsColumn".equals(type)) {
prefix = values.next();
if (prefix.isEmpty()) {
prefix = null;
}
} else {
qualifier = values.next();
}
}
}
// replace any existing values with explicit family and qualifier
if (mappingNode.has(FAMILY)) {
family = mappingNode.get(FAMILY).textValue();
}
if (mappingNode.has(QUALIFIER)) {
qualifier = mappingNode.get(QUALIFIER).textValue();
}
if ("column".equals(type)) {
ValidationException.check(family != null && !family.isEmpty(),
"Column mapping %s must have a %s", source, FAMILY);
ValidationException.check(qualifier != null && !qualifier.isEmpty(),
"Column mapping %s must have a %s", source, QUALIFIER);
return FieldMapping.column(source, family, qualifier);
} else if ("keyAsColumn".equals(type)) {
ValidationException.check(family != null && !family.isEmpty(),
"Column mapping %s must have a %s", source, FAMILY);
ValidationException.check(qualifier == null,
"Key-as-column mapping %s cannot have a %s", source, QUALIFIER);
if (mappingNode.has(PREFIX)) {
prefix = mappingNode.get(PREFIX).asText();
if (prefix.isEmpty()) {
prefix = null;
}
}
return FieldMapping.keyAsColumn(source, family, prefix);
} else if ("counter".equals(type)) {
ValidationException.check(family != null && !family.isEmpty(),
"Counter mapping %s must have a %s", source, FAMILY);
ValidationException.check(qualifier != null && !qualifier.isEmpty(),
"Counter mapping %s must have a %s", source, QUALIFIER);
return FieldMapping.counter(source, family, qualifier);
} else {
throw new ValidationException("Invalid mapping type: " + type);
}
}
|
java
|
@FFDCIgnore(ArithmeticException.class)
public static long asClampedNanos(Duration duration) {
try {
return duration.toNanos();
} catch (ArithmeticException e) {
// Treat long overflow as an exceptional case
if (duration.isNegative()) {
return Long.MIN_VALUE;
} else {
return Long.MAX_VALUE;
}
}
}
|
java
|
public alluxio.proto.dataserver.Protocol.OpenUfsBlockOptions getOpenUfsBlockOptions() {
return openUfsBlockOptions_ == null ? alluxio.proto.dataserver.Protocol.OpenUfsBlockOptions.getDefaultInstance() : openUfsBlockOptions_;
}
|
python
|
def GetMostRecentClient(client_list, token=None):
"""Return most recent client from list of clients."""
last = rdfvalue.RDFDatetime(0)
client_urn = None
for client in aff4.FACTORY.MultiOpen(client_list, token=token):
client_last = client.Get(client.Schema.LAST)
if client_last > last:
last = client_last
client_urn = client.urn
return client_urn
|
java
|
public void loadTileSets (InputStream source, Map<String, TileSet> tilesets)
throws IOException
{
// stick an array list on the top of the stack for collecting
// parsed tilesets
List<TileSet> setlist = Lists.newArrayList();
_digester.push(setlist);
// now fire up the digester to parse the stream
try {
_digester.parse(source);
} catch (SAXException saxe) {
log.warning("Exception parsing tile set descriptions.", saxe);
}
// stick the tilesets from the list into the hashtable
for (int ii = 0; ii < setlist.size(); ii++) {
TileSet set = setlist.get(ii);
if (set.getName() == null) {
log.warning("Tileset did not receive name during " +
"parsing process [set=" + set + "].");
} else {
tilesets.put(set.getName(), set);
}
}
// and clear out the list for next time
setlist.clear();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.