language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
python
|
def _mkstemp_copy(path,
preserve_inode=True):
'''
Create a temp file and move/copy the contents of ``path`` to the temp file.
Return the path to the temp file.
path
The full path to the file whose contents will be moved/copied to a temp file.
Whether it's moved or copied depends on the value of ``preserve_inode``.
preserve_inode
Preserve the inode of the file, so that any hard links continue to share the
inode with the original filename. This works by *copying* the file, reading
from the copy, and writing to the file at the original inode. If ``False``, the
file will be *moved* rather than copied, and a new file will be written to a
new inode, but using the original filename. Hard links will then share an inode
with the backup, instead (if using ``backup`` to create a backup copy).
Default is ``True``.
'''
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to create temp file. "
"Exception: {0}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to copy file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
try:
shutil.move(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
return temp_file
|
java
|
public List getAllCachedImages() {
List ret = new ArrayList(m_variations.keySet());
Collections.sort(ret);
return ret;
}
|
java
|
@Override
public void rot90(INDArray toRotate) {
if (!toRotate.isMatrix())
throw new IllegalArgumentException("Only rotating matrices");
INDArray start = toRotate.transpose();
for (int i = 0; i < start.rows(); i++)
start.putRow(i, reverse(start.getRow(i)));
}
|
python
|
def format_decimal(self, altitude=None):
"""
Format decimal degrees with altitude
"""
coordinates = [str(self.latitude), str(self.longitude)]
if altitude is None:
altitude = bool(self.altitude)
if altitude:
if not isinstance(altitude, string_compare):
altitude = 'km'
coordinates.append(self.format_altitude(altitude))
return ", ".join(coordinates)
|
java
|
protected boolean handleCustomTypeToJson(TypedElementDefinition<?> column, String getter, String columnType, String javaMemberName, JavaWriter out) {
return false;
}
|
python
|
def setProduct(self, cache=False, *args, **kwargs):
"""Adds the product for this loan to a 'product' field.
Product is a MambuProduct object.
cache argument allows to use AllMambuProducts singleton to
retrieve the products. See mambuproduct.AllMambuProducts code
and pydoc for further information.
Returns the number of requests done to Mambu.
"""
if cache:
try:
prods = self.allmambuproductsclass(*args, **kwargs)
except AttributeError as ae:
from .mambuproduct import AllMambuProducts
self.allmambuproductsclass = AllMambuProducts
prods = self.allmambuproductsclass(*args, **kwargs)
for prod in prods:
if prod['encodedKey'] == self['productTypeKey']:
self['product'] = prod
try:
# asked for cache, but cache was originally empty
prods.noinit
except AttributeError:
return 1
return 0
try:
product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs)
except AttributeError as ae:
from .mambuproduct import MambuProduct
self.mambuproductclass = MambuProduct
product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs)
self['product'] = product
return 1
|
java
|
private ModelAndView generateSuccessView(final Assertion assertion, final String proxyIou,
final WebApplicationService service, final HttpServletRequest request,
final Optional<MultifactorAuthenticationProvider> contextProvider,
final TicketGrantingTicket proxyGrantingTicket) {
val modelAndView = serviceValidateConfigurationContext.getValidationViewFactory().getModelAndView(request, true, service, getClass());
modelAndView.addObject(CasViewConstants.MODEL_ATTRIBUTE_NAME_ASSERTION, assertion);
modelAndView.addObject(CasViewConstants.MODEL_ATTRIBUTE_NAME_SERVICE, service);
if (StringUtils.isNotBlank(proxyIou)) {
modelAndView.addObject(CasViewConstants.MODEL_ATTRIBUTE_NAME_PROXY_GRANTING_TICKET_IOU, proxyIou);
}
if (proxyGrantingTicket != null) {
modelAndView.addObject(CasViewConstants.MODEL_ATTRIBUTE_NAME_PROXY_GRANTING_TICKET, proxyGrantingTicket.getId());
}
contextProvider.ifPresent(provider -> modelAndView.addObject(serviceValidateConfigurationContext.getAuthnContextAttribute(), provider.getId()));
val augmentedModelObjects = augmentSuccessViewModelObjects(assertion);
modelAndView.addAllObjects(augmentedModelObjects);
return modelAndView;
}
|
java
|
public Matrix multiply(double c)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableMultiply(c);
return toReturn;
}
|
python
|
def from_msgpack(b, *, max_bin_len=MAX_BIN_LEN, max_str_len=MAX_STR_LEN):
"""
Convert a msgpack byte array into Python objects (including rpcq objects)
"""
# Docs for raw parameter are somewhat hard to find so they're copied here:
# If true, unpack msgpack raw to Python bytes (default).
# Otherwise, unpack to Python str (or unicode on Python 2) by decoding with UTF-8 encoding (recommended).
# In msgpack >= 0.6, max_xxx_len is reduced from 2 GB to 1 MB, so we set the relevant ones
# to 2 GB as to not run into issues with the size of the values returned from rpcq
return msgpack.loads(b, object_hook=_object_hook, raw=False,
max_bin_len=max_bin_len, max_str_len=max_str_len)
|
python
|
def _op(self, operation, other, *allowed):
"""A basic operation operating on a single value."""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._op(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(self=self, op=operation))
if other is not None:
other = f.transformer.foreign(other, (f, self._document))
return Filter({self._name: {operation: other}})
|
python
|
def _parse(self, init_info):
"""Initialize a FCP device object from several lines of string
describing properties of the FCP device.
Here is a sample:
opnstk1: FCP device number: B83D
opnstk1: Status: Free
opnstk1: NPIV world wide port number: NONE
opnstk1: Channel path ID: 59
opnstk1: Physical world wide port number: 20076D8500005181
The format comes from the response of xCAT, do not support
arbitrary format.
"""
if isinstance(init_info, list) and (len(init_info) == 5):
self._dev_no = self._get_dev_number_from_line(init_info[0])
self._npiv_port = self._get_wwpn_from_line(init_info[2])
self._chpid = self._get_chpid_from_line(init_info[3])
self._physical_port = self._get_wwpn_from_line(init_info[4])
|
java
|
@Deprecated
public static StringBuffer convertToASCII(StringBuffer src, int options)
throws StringPrepParseException{
UCharacterIterator iter = UCharacterIterator.getInstance(src);
return convertToASCII(iter,options);
}
|
python
|
def _fetch_url_data(self, url, username, password, verify, custom_headers):
''' Hit a given http url and return the stats lines '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
custom_headers.update(headers(self.agentConfig))
self.log.debug("Fetching haproxy stats from url: %s" % url)
response = requests.get(
url, auth=auth, headers=custom_headers, verify=verify, timeout=self.default_integration_http_timeout
)
response.raise_for_status()
# it only needs additional decoding in py3, so skip it if it's py2
if PY2:
return response.content.splitlines()
else:
content = response.content
# If the content is a string, it can't be decoded again
# But if it's bytes, it can be decoded.
# So, check if it has the decode method
decode_fn = getattr(content, "decode", None)
if callable(decode_fn):
content = content.decode('utf-8')
return content.splitlines()
|
python
|
def resetTimeout(self):
"""Reset the timeout count down"""
if self.__timeoutCall is not None and self.timeOut is not None:
self.__timeoutCall.reset(self.timeOut)
|
java
|
public Matrix4x3d normalize3x3(Matrix4x3d dest) {
double invXlen = 1.0 / Math.sqrt(m00 * m00 + m01 * m01 + m02 * m02);
double invYlen = 1.0 / Math.sqrt(m10 * m10 + m11 * m11 + m12 * m12);
double invZlen = 1.0 / Math.sqrt(m20 * m20 + m21 * m21 + m22 * m22);
dest.m00 = m00 * invXlen; dest.m01 = m01 * invXlen; dest.m02 = m02 * invXlen;
dest.m10 = m10 * invYlen; dest.m11 = m11 * invYlen; dest.m12 = m12 * invYlen;
dest.m20 = m20 * invZlen; dest.m21 = m21 * invZlen; dest.m22 = m22 * invZlen;
return dest;
}
|
java
|
public static <T extends XMLObject> T transformSamlObject(final OpenSamlConfigBean configBean, final byte[] data,
final Class<T> clazz) {
try (InputStream in = new ByteArrayInputStream(data)) {
val document = configBean.getParserPool().parse(in);
val root = document.getDocumentElement();
val marshaller = configBean.getUnmarshallerFactory().getUnmarshaller(root);
if (marshaller != null) {
val result = marshaller.unmarshall(root);
if (!clazz.isAssignableFrom(result.getClass())) {
throw new ClassCastException("Result [" + result + " is of type " + result.getClass() + " when we were expecting " + clazz);
}
return (T) result;
}
} catch (final Exception e) {
throw new SamlException(e.getMessage(), e);
}
return null;
}
|
java
|
public RouteFilterRuleInner beginUpdate(String resourceGroupName, String routeFilterName, String ruleName, PatchRouteFilterRule routeFilterRuleParameters) {
return beginUpdateWithServiceResponseAsync(resourceGroupName, routeFilterName, ruleName, routeFilterRuleParameters).toBlocking().single().body();
}
|
java
|
public static void copyZipWithoutEmptyDirectories(final File inputFile, final File outputFile) throws IOException
{
final byte[] buf = new byte[0x2000];
final ZipFile inputZip = new ZipFile(inputFile);
final ZipOutputStream outputStream = new ZipOutputStream(new FileOutputStream(outputFile));
try
{
// read a the entries of the input zip file and sort them
final Enumeration<? extends ZipEntry> e = inputZip.entries();
final ArrayList<ZipEntry> sortedList = new ArrayList<ZipEntry>();
while (e.hasMoreElements()) {
final ZipEntry entry = e.nextElement();
sortedList.add(entry);
}
Collections.sort(sortedList, new Comparator<ZipEntry>()
{
public int compare(ZipEntry o1, ZipEntry o2)
{
String n1 = o1.getName(), n2 = o2.getName();
if (metaOverride(n1, n2)) {
return -1;
}
if (metaOverride(n2, n1)) {
return 1;
}
return n1.compareTo(n2);
}
// make sure that META-INF/MANIFEST.MF is always the first entry after META-INF/
private boolean metaOverride(String n1, String n2) {
return (n1.startsWith("META-INF/") && !n2.startsWith("META-INF/"))
|| (n1.equals("META-INF/MANIFEST.MF") && !n2.equals(n1) && !n2.equals("META-INF/"))
|| (n1.equals("META-INF/") && !n2.equals(n1));
}
});
// treat them again and write them in output, wenn they not are empty directories
for (int i = sortedList.size()-1; i>=0; i--)
{
final ZipEntry inputEntry = sortedList.get(i);
final String name = inputEntry.getName();
final boolean isEmptyDirectory;
if (inputEntry.isDirectory())
{
if (i == sortedList.size()-1)
{
// no item afterwards; it was an empty directory
isEmptyDirectory = true;
}
else
{
final String nextName = sortedList.get(i+1).getName();
isEmptyDirectory = !nextName.startsWith(name);
}
}
else
{
isEmptyDirectory = false;
}
if (isEmptyDirectory)
{
sortedList.remove(i);
}
}
// finally write entries in normal order
for (int i = 0; i < sortedList.size(); i++)
{
final ZipEntry inputEntry = sortedList.get(i);
final ZipEntry outputEntry = new ZipEntry(inputEntry);
outputStream.putNextEntry(outputEntry);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final InputStream is = inputZip.getInputStream(inputEntry);
IoUtil.pipe(is, baos, buf);
is.close();
outputStream.write(baos.toByteArray());
}
} finally {
outputStream.close();
inputZip.close();
}
}
|
java
|
public void marshall(RelationalDatabaseParameter relationalDatabaseParameter, ProtocolMarshaller protocolMarshaller) {
if (relationalDatabaseParameter == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(relationalDatabaseParameter.getAllowedValues(), ALLOWEDVALUES_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getApplyMethod(), APPLYMETHOD_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getApplyType(), APPLYTYPE_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getDataType(), DATATYPE_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getDescription(), DESCRIPTION_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getIsModifiable(), ISMODIFIABLE_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getParameterName(), PARAMETERNAME_BINDING);
protocolMarshaller.marshall(relationalDatabaseParameter.getParameterValue(), PARAMETERVALUE_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
java
|
@Override
public Name getName() {
final NameableVisitor visitor = new NameableVisitor();
this.accept(visitor);
return visitor.getNameAttribute();
}
|
java
|
private static <T> T fromJSON(byte[] src, Class<T> valueType) throws IOException {
if (src == null)
return null;
return Manager.getObjectMapper().readValue(src, valueType);
}
|
python
|
def _chglog(amend: bool = False, stage: bool = False, next_version: str = None, auto_next_version: bool = False):
"""
Writes the changelog
Args:
amend: amend last commit with changes
stage: stage changes
"""
if config.CHANGELOG_DISABLE():
LOGGER.info('skipping changelog update as per config')
else:
epab.utils.ensure_exe('git')
epab.utils.ensure_exe('gitchangelog')
LOGGER.info('writing changelog')
if auto_next_version:
next_version = epab.utils.get_next_version()
with gitchangelog_config():
with temporary_tag(next_version):
changelog, _ = elib_run.run('gitchangelog', mute=True)
# changelog = changelog.encode('utf8').replace(b'\r\n', b'\n').decode('utf8')
changelog = re.sub(BOGUS_LINE_PATTERN, '\\1\n', changelog)
Path(config.CHANGELOG_FILE_PATH()).write_text(changelog, encoding='utf8')
if amend:
CTX.repo.amend_commit(
append_to_msg='update changelog [auto]', files_to_add=str(config.CHANGELOG_FILE_PATH())
)
elif stage:
CTX.repo.stage_subset(str(config.CHANGELOG_FILE_PATH()))
|
java
|
private void ensureParentValues(CmsObject cms, String valuePath, Locale locale) {
if (valuePath.contains("/")) {
String parentPath = valuePath.substring(0, valuePath.lastIndexOf("/"));
if (!hasValue(parentPath, locale)) {
ensureParentValues(cms, parentPath, locale);
int index = CmsXmlUtils.getXpathIndexInt(parentPath) - 1;
addValue(cms, parentPath, locale, index);
}
}
}
|
python
|
def indices_within_times(times, start, end):
"""
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
"""
# coalesce the start/end segments
start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce())
tsort = times.argsort()
times_sorted = times[tsort]
left = numpy.searchsorted(times_sorted, start)
right = numpy.searchsorted(times_sorted, end)
if len(left) == 0:
return numpy.array([], dtype=numpy.uint32)
return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))]
|
java
|
public SeaGlassPainter getBackgroundPainter(SynthContext ctx) {
Values v = getValues(ctx);
int xstate = getExtendedState(ctx, v);
SeaGlassPainter p = null;
// check the cache
tmpKey.init("backgroundPainter$$instance", xstate);
p = (SeaGlassPainter) v.cache.get(tmpKey);
if (p != null)
return p;
// not in cache, so lookup and store in cache
RuntimeState s = null;
int[] lastIndex = new int[] { -1 };
while ((s = getNextState(v.states, lastIndex, xstate)) != null) {
if (s.backgroundPainter != null) {
p = s.backgroundPainter;
break;
}
}
if (p == null)
p = (SeaGlassPainter) get(ctx, "backgroundPainter");
if (p != null) {
v.cache.put(new CacheKey("backgroundPainter$$instance", xstate), p);
}
return p;
}
|
java
|
public static void validate(final String bic) throws BicFormatException,
UnsupportedCountryException {
try {
validateEmpty(bic);
validateLength(bic);
validateCase(bic);
validateBankCode(bic);
validateCountryCode(bic);
validateLocationCode(bic);
if(hasBranchCode(bic)) {
validateBranchCode(bic);
}
} catch (UnsupportedCountryException e) {
throw e;
} catch (RuntimeException e) {
throw new BicFormatException(UNKNOWN, e.getMessage());
}
}
|
java
|
public PointerHierarchyRepresentationResult run(Database database, Relation<O> relation) {
DBIDs ids = relation.getDBIDs();
WritableDBIDDataStore pi = DataStoreUtil.makeDBIDStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_STATIC);
WritableDoubleDataStore lambda = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_STATIC, Double.POSITIVE_INFINITY);
// Temporary storage for m.
WritableDoubleDataStore m = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP);
final Logging log = getLogger(); // To allow CLINK logger override
FiniteProgress progress = log.isVerbose() ? new FiniteProgress("Running SLINK", ids.size(), log) : null;
ArrayDBIDs aids = DBIDUtil.ensureArray(ids);
// First element is trivial/special:
DBIDArrayIter id = aids.iter(), it = aids.iter();
// Step 1: initialize
for(; id.valid(); id.advance()) {
// P(n+1) = n+1:
pi.put(id, id);
// L(n+1) = infinity already.
}
// First element is finished already (start at seek(1) below!)
log.incrementProcessed(progress);
// Optimized branch
if(getDistanceFunction() instanceof PrimitiveDistanceFunction) {
PrimitiveDistanceFunction<? super O> distf = (PrimitiveDistanceFunction<? super O>) getDistanceFunction();
for(id.seek(1); id.valid(); id.advance()) {
step2primitive(id, it, id.getOffset(), relation, distf, m);
process(id, aids, it, id.getOffset(), pi, lambda, m); // SLINK or CLINK
log.incrementProcessed(progress);
}
}
else {
// Fallback branch
DistanceQuery<O> distQ = database.getDistanceQuery(relation, getDistanceFunction());
for(id.seek(1); id.valid(); id.advance()) {
step2(id, it, id.getOffset(), distQ, m);
process(id, aids, it, id.getOffset(), pi, lambda, m); // SLINK or CLINK
log.incrementProcessed(progress);
}
}
log.ensureCompleted(progress);
// We don't need m anymore.
m.destroy();
m = null;
return new PointerHierarchyRepresentationResult(ids, pi, lambda, getDistanceFunction().isSquared());
}
|
java
|
public void execute(final FifoTask<E> task) throws InterruptedException {
final int id;
synchronized (this) {
id = idCounter++;
taskMap.put(id, task);
while (activeCounter >= maxThreads) {
wait();
}
activeCounter++;
}
this.threadPoolExecutor.execute(new Runnable() {
public void run() {
try {
try {
final E outcome = task.runParallel();
synchronized (resultMap) {
resultMap.put(id, new Result(outcome));
}
} catch (Throwable th) {
synchronized (resultMap) {
resultMap.put(id, new Result(null, th));
}
} finally {
processResults();
synchronized (FifoTaskExecutor.this) {
activeCounter--;
FifoTaskExecutor.this.notifyAll();
}
}
} catch (Exception ex) {
Logger.getLogger(FifoTaskExecutor.class.getName()).log(Level.SEVERE, ex.getMessage(), ex);
}
}
});
}
|
python
|
def verify(
self, headers, serialized_request_env, deserialized_request_env):
# type: (Dict[str, Any], str, RequestEnvelope) -> None
"""Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit
"""
local_now = datetime.now(tz.tzutc())
request_timestamp = deserialized_request_env.request.timestamp
if (abs((local_now - request_timestamp).seconds) >
(self._tolerance_in_millis / 1000)):
raise VerificationException("Timestamp verification failed")
|
java
|
private void generateClassFiles(ClassTree classtree) throws DocletException {
SortedSet<PackageElement> packages = configuration.typeElementCatalog.packages();
for (PackageElement pkg : packages) {
generateClassFiles(configuration.typeElementCatalog.allClasses(pkg), classtree);
}
}
|
java
|
private void checkDestructuringAssignment(
NodeTraversal t, Node nodeToWarn, Node pattern, JSType rightType, String msg) {
for (DestructuredTarget target :
DestructuredTarget.createAllNonEmptyTargetsInPattern(typeRegistry, rightType, pattern)) {
// TODO(b/77597706): this is not very efficient because it re-infers the types below,
// which we already did once in TypeInference. don't repeat the work.
checkCanAssignToWithScope(
t, nodeToWarn, target.getNode(), target.inferType(), /* info= */ null, msg);
}
}
|
java
|
@SuppressWarnings("unchecked")
@Override
public EList<ModelCheckerInstance> getModelCheckers() {
return (EList<ModelCheckerInstance>) eGet(StorePackage.Literals.PROJECT__MODEL_CHECKERS, true);
}
|
python
|
def fetch(args):
"""fetch a feed"""
session = args['session']
for feed, filename in zip(args['feeds'], args['filenames']):
try:
resp = session.get(feed, timeout=5)
content = resp.content
except Exception: # pragma: no cover
pass
else:
with open(filename, 'wb') as fd:
fd.write(content)
return args['name']
|
java
|
private MultimediaInfo parseMultimediaInfo(File source,
RBufferedReader reader) throws InputFormatException,
EncoderException {
Pattern p1 = Pattern.compile("^\\s*Input #0, (\\w+).+$\\s*",
Pattern.CASE_INSENSITIVE);
Pattern p2 = Pattern.compile(
"^\\s*Duration: (\\d\\d):(\\d\\d):(\\d\\d)\\.(\\d\\d).*$",
Pattern.CASE_INSENSITIVE);
Pattern p3 = Pattern.compile(
"^\\s*Stream #\\S+: ((?:Audio)|(?:Video)|(?:Data)): (.*)\\s*$",
Pattern.CASE_INSENSITIVE);
Pattern p4 = Pattern.compile(
"^\\s*Metadata:",
Pattern.CASE_INSENSITIVE);
MultimediaInfo info = null;
try
{
int step = 0;
while (true)
{
String line = reader.readLine();
LOG.debug("Output line: " + line);
if (line == null)
{
break;
}
switch (step)
{
case 0:
{
String token = source.getAbsolutePath() + ": ";
if (line.startsWith(token))
{
String message = line.substring(token.length());
throw new InputFormatException(message);
}
Matcher m = p1.matcher(line);
if (m.matches())
{
String format = m.group(1);
info = new MultimediaInfo();
info.setFormat(format);
step++;
}
break;
}
case 1:
{
Matcher m = p2.matcher(line);
if (m.matches())
{
long hours = Integer.parseInt(m.group(1));
long minutes = Integer.parseInt(m.group(2));
long seconds = Integer.parseInt(m.group(3));
long dec = Integer.parseInt(m.group(4));
long duration = (dec * 10L) + (seconds * 1000L)
+ (minutes * 60L * 1000L)
+ (hours * 60L * 60L * 1000L);
info.setDuration(duration);
step++;
} else
{
// step = 3;
}
break;
}
case 2:
{
Matcher m = p3.matcher(line);
Matcher m4 = p4.matcher(line);
if (m.matches())
{
String type = m.group(1);
String specs = m.group(2);
if ("Video".equalsIgnoreCase(type))
{
VideoInfo video = new VideoInfo();
StringTokenizer st = new StringTokenizer(specs, ",");
for (int i = 0; st.hasMoreTokens(); i++)
{
String token = st.nextToken().trim();
if (i == 0)
{
video.setDecoder(token);
} else
{
boolean parsed = false;
// Video size.
Matcher m2 = SIZE_PATTERN.matcher(token);
if (!parsed && m2.find())
{
int width = Integer.parseInt(m2
.group(1));
int height = Integer.parseInt(m2
.group(2));
video.setSize(new VideoSize(width,
height));
parsed = true;
}
// Frame rate.
m2 = FRAME_RATE_PATTERN.matcher(token);
if (!parsed && m2.find())
{
try
{
float frameRate = Float
.parseFloat(m2.group(1));
video.setFrameRate(frameRate);
} catch (NumberFormatException e)
{
LOG.info("Invalid frame rate value: " + m2.group(1), e);
}
parsed = true;
}
// Bit rate.
m2 = BIT_RATE_PATTERN.matcher(token);
if (!parsed && m2.find())
{
int bitRate = Integer.parseInt(m2
.group(1));
video.setBitRate(bitRate*1000);
parsed = true;
}
}
}
info.setVideo(video);
} else if ("Audio".equalsIgnoreCase(type))
{
AudioInfo audio = new AudioInfo();
StringTokenizer st = new StringTokenizer(specs, ",");
for (int i = 0; st.hasMoreTokens(); i++)
{
String token = st.nextToken().trim();
if (i == 0)
{
audio.setDecoder(token);
} else
{
boolean parsed = false;
// Sampling rate.
Matcher m2 = SAMPLING_RATE_PATTERN
.matcher(token);
if (!parsed && m2.find())
{
int samplingRate = Integer.parseInt(m2
.group(1));
audio.setSamplingRate(samplingRate);
parsed = true;
}
// Channels.
m2 = CHANNELS_PATTERN.matcher(token);
if (!parsed && m2.find())
{
String ms = m2.group(1);
if ("mono".equalsIgnoreCase(ms))
{
audio.setChannels(1);
} else if ("stereo"
.equalsIgnoreCase(ms))
{
audio.setChannels(2);
} else if ("quad"
.equalsIgnoreCase(ms))
{
audio.setChannels(4);
}
parsed = true;
}
// Bit rate.
m2 = BIT_RATE_PATTERN.matcher(token);
if (!parsed && m2.find())
{
int bitRate = Integer.parseInt(m2
.group(1));
audio.setBitRate(bitRate*1000);
parsed = true;
}
}
}
info.setAudio(audio);
}
} else // if (m4.matches())
{
// Stay on level 2
}
/*
else
{
step = 3;
}
*/ break;
}
default:
break;
}
if (line.startsWith("frame="))
{
reader.reinsertLine(line);
break;
}
}
} catch (IOException e)
{
throw new EncoderException(e);
}
if (info == null)
{
throw new InputFormatException();
}
return info;
}
|
java
|
public static void updateProperty(Configuration conf, String prefix, String[] altPrefix,
String key, Properties props, String propsKey,
boolean required) throws ConfigurationParseException {
String val = conf.get(prefix + key);
String altKey = prefix + key;
if (val == null) {
// try alternative key
for (String alternativePrefix : altPrefix) {
altKey = alternativePrefix + key;
val = conf.get(altKey);
}
}
if (required && val == null) {
throw new ConfigurationParseException("Missing mandatory configuration: " + key);
}
if (val != null) {
LOG.trace("Found alternative key {} value {}", altKey, val);
props.setProperty(propsKey, val.trim());
}
}
|
python
|
def _clear_cache(url, ts=None):
'''
Helper function used by precache and clearcache that clears the cache
of a given URL and type
'''
if ts is None:
# Clears an entire ForeignResource cache
res = ForeignResource(url)
if not os.path.exists(res.cache_path_base):
cli.printerr('%s is not cached (looked at %s)'
% (url, res.cache_path_base))
return
cli.print('%s: clearing ALL at %s'
% (url, res.cache_path_base))
res.cache_remove_all()
else:
# Clears an entire ForeignResource cache
res = TypedResource(url, ts)
if not res.cache_exists():
cli.printerr('%s is not cached for type %s (looked at %s)'
% (url, str(ts), res.cache_path))
return
cli.print('%s: clearing "%s" at %s'
% (url, str(ts), res.cache_path))
if os.path.isdir(res.cache_path):
res.cache_remove_as_dir()
else:
res.cache_remove()
|
java
|
public static Chunk get(char c, Font font) {
char greek = SpecialSymbol.getCorrespondingSymbol(c);
if (greek == ' ') {
return new Chunk(String.valueOf(c), font);
}
Font symbol = new Font(Font.SYMBOL, font.getSize(), font.getStyle(), font.getColor());
String s = String.valueOf(greek);
return new Chunk(s, symbol);
}
|
python
|
async def get(self, public_key):
""" Receive account data
Accepts:
Query string:
- "public_key" - str
Query string params:
- message ( signed dictionary ):
- "timestamp" - str
Returns:
- "device_id" - str
- "phone" - str
- "public_key" - str
- "count" - int ( wallets amount )
- "level" - int (2 by default)
- "news_count" - int (0 by default)
- "email" - str
- "wallets" - list
Verified: True
"""
# Signature verification
if settings.SIGNATURE_VERIFICATION:
super().verify()
# Get users request source
compiler = re.compile(r"\((.*?)\)")
match = compiler.search(self.request.headers.get("User-Agent"))
try:
source = match.group(1)
except:
source = None
# Write source to database
await self.account.logsource(public_key=public_key, source=source)
# Get account
logging.debug("\n\n [+] -- Get account data.")
response = await self.account.getaccountdata(public_key=public_key)
logging.debug("\n")
logging.debug(response)
logging.debug("\n")
if "error" in response.keys():
self.set_status(response["error"])
self.write(response)
raise tornado.web.Finish
# Receive balances from balance host
wallets = await self.account.balance.get_wallets(uid=response["id"])
if isinstance(wallets, dict):
if "error" in wallets.keys():
self.set_status(wallets["error"])
self.write(wallets)
raise tornado.web.Finish
# Filter wallets
response.update({"wallets":json.dumps(
[i for i in wallets["wallets"]
if i.get("coinid") not in ["BTC", "LTC", "ETH"]])})
# Return account data
self.write(response)
|
java
|
public String text(String delimiter)
{
if (delimiter == null) delimiter = "";
StringBuilder sb = new StringBuilder(size() * 3);
for (IWord word : this)
{
if (word instanceof CompoundWord)
{
for (Word child : ((CompoundWord) word).innerList)
{
sb.append(child.getValue()).append(delimiter);
}
}
else
{
sb.append(word.getValue()).append(delimiter);
}
}
sb.setLength(sb.length() - delimiter.length());
return sb.toString();
}
|
java
|
public void getTraceSummaryLine(StringBuilder buff) {
// Get the common fields for control messages
super.getTraceSummaryLine(buff);
buff.append("requestID=");
buff.append(getRequestID());
}
|
python
|
def _init_orient(self):
"""Retrieve the quadrature points and weights if needed.
"""
if self.orient == orientation.orient_averaged_fixed:
(self.beta_p, self.beta_w) = quadrature.get_points_and_weights(
self.or_pdf, 0, 180, self.n_beta)
self._set_orient_signature()
|
python
|
def get_identities(self, item):
"""Return the identities from an item"""
item = item['data']
for field in ["assignee", "reporter", "creator"]:
if field not in item["fields"]:
continue
if item["fields"][field]:
user = self.get_sh_identity(item["fields"][field])
yield user
comments = item.get('comments_data', [])
for comment in comments:
if 'author' in comment and comment['author']:
user = self.get_sh_identity(comment['author'])
yield user
if 'updateAuthor' in comment and comment['updateAuthor']:
user = self.get_sh_identity(comment['updateAuthor'])
yield user
|
python
|
def multiplyC(self, alpha):
"""multiply C with a scalar and update all related internal variables (dC, D,...)"""
self.C *= alpha
if self.dC is not self.C:
self.dC *= alpha
self.D *= alpha**0.5
|
python
|
def set_params(self, prog=None, params=""):
"""
Add --params options for given command line programs
"""
dest_prog = "to {0}".format(prog) if prog else ""
self.add_option("--params", dest="extra", default=params,
help="Extra parameters to pass {0}".format(dest_prog) + \
" (these WILL NOT be validated)")
|
java
|
public static void dumpIf(String name, Object obj, Predicate<String> evalPredicate, Predicate<Map.Entry<String, Object>> dumpPredicate, StringPrinter printer) {
printer.println(name + ": " + obj.getClass().getName());
fieldsAndGetters(obj, evalPredicate).filter(dumpPredicate).forEach(entry -> {
printer.println("\t" + entry.getKey() + " = " + entry.getValue());
});
}
|
python
|
def evaluate(self, reference_scene_list, estimated_scene_list=None, estimated_scene_probabilities=None):
"""Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self
"""
if estimated_scene_list is None and estimated_scene_probabilities is None:
raise ValueError("Nothing to evaluate, give at least estimated_scene_list or estimated_scene_probabilities")
# Make sure reference_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
reference_scene_list = dcase_util.containers.MetaDataContainer(reference_scene_list)
# Make sure estimated_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
estimated_scene_list = dcase_util.containers.MetaDataContainer(estimated_scene_list)
# Make sure estimated_tag_probabilities is dcase_util.containers.ProbabilityContainer
if estimated_scene_probabilities is not None:
if not isinstance(estimated_scene_probabilities, dcase_util.containers.ProbabilityContainer):
estimated_scene_probabilities = dcase_util.containers.ProbabilityContainer(estimated_scene_probabilities)
# Translate "file" field to "filename"
for item in reference_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
for item in estimated_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
y_true = []
y_pred = []
for estimated_item in estimated_scene_list:
reference_item_matched = {}
for reference_item in reference_scene_list:
if estimated_item['filename'] == reference_item['filename']:
reference_item_matched = reference_item
break
if not reference_item_matched:
raise ValueError(
"Cannot find reference_item for estimated item [{item}]".format(item=estimated_item['file'])
)
y_true.append(reference_item_matched['scene_label'])
y_pred.append(estimated_item['scene_label'])
y_true = numpy.array(y_true)
y_pred = numpy.array(y_pred)
Ncorr_overall = 0
for scene_id, scene_label in enumerate(self.scene_label_list):
true_id = numpy.where(y_true == scene_label)[0]
pred_id = numpy.where(y_pred == scene_label)[0]
Ncorr = 0
for id in true_id:
if id in pred_id:
Ncorr += 1
Ncorr_overall += Ncorr
self.scene_wise[scene_label]['Ncorr'] += Ncorr
self.scene_wise[scene_label]['Nref'] += true_id.shape[0]
self.scene_wise[scene_label]['Nsys'] += pred_id.shape[0]
self.overall['Ncorr'] += Ncorr_overall
self.overall['Nref'] += y_true.shape[0]
self.overall['Nsys'] += y_pred.shape[0]
return self
|
java
|
@Override
public void write(ByteCodeWriter out)
throws IOException
{
out.writeUTF8Const(getName());
TempOutputStream ts = new TempOutputStream();
//ts.openWrite();
//WriteStream ws = new WriteStream(ts);
ByteCodeWriter o2 = new ByteCodeWriter(ts, out.getJavaClass());
o2.writeShort(_methods.size());
for (int i = 0; i < _methods.size(); i++) {
BootstrapMethod method = _methods.get(i);
o2.writeShort(method.getMethodRef());
o2.writeShort(method.getArgumentSize());
for (ConstantPoolEntry entry : method.getArguments()) {
o2.writeShort(entry.getIndex());
}
}
ts.close();
out.writeInt(ts.getLength());
TempBuffer ptr = ts.getHead();
for (; ptr != null; ptr = ptr.next()) {
out.write(ptr.buffer(), 0, ptr.length());
}
ts.destroy();
}
|
java
|
public int DamerauLevenshteinDistance(String string2, int maxDistance) {
if (baseString == null) return string2 == null ? 0 : string2.length(); //string2 ?? "").Length;
if (string2 == null || string2.isEmpty()) return baseString.length();
if(maxDistance == 0) return baseString.equals(string2) ? 0 : -1;
// if strings of different lengths, ensure shorter string is in string1. This can result in a little
// faster speed by spending more time spinning just the inner loop during the main processing.
String string1;
if (baseString.length() > string2.length()) {
string1 = string2;
string2 = baseString;
} else {
string1 = baseString;
}
int sLen = string1.length(); // this is also the minimun length of the two strings
int tLen = string2.length();
// suffix common to both strings can be ignored
while ((sLen > 0) && (string1.charAt(sLen - 1) == string2.charAt(tLen - 1))) { sLen--; tLen--; }
int start = 0;
if ((string1.charAt(0) == string2.charAt(0)) || (sLen == 0)) { // if there'string1 a shared prefix, or all string1 matches string2'string1 suffix
// prefix common to both strings can be ignored
while ((start < sLen) && (string1.charAt(start) == string2.charAt(start))) start++;
sLen -= start; // length of the part excluding common prefix and suffix
tLen -= start;
// if all of shorter string matches prefix and/or suffix of longer string, then
// edit distance is just the delete of additional characters present in longer string
if (sLen == 0) return tLen;
string2 = string2.substring(start, start + tLen); // faster than string2[start+j] in inner loop below
}
int lenDiff = tLen - sLen;
if ((maxDistance < 0) || (maxDistance > tLen)) {
maxDistance = tLen;
} else if (lenDiff > maxDistance) return -1;
if (tLen > v0.length)
{
v0 = new int[tLen];
v2 = new int[tLen];
} else {
for(int i = 0; i < tLen; i++) v2[i] = 0; // Substituting Array.clear(v2, 0, tLen)
}
int j;
for (j = 0; j < maxDistance; j++) v0[j] = j + 1;
for (; j < tLen; j++) v0[j] = maxDistance + 1;
int jStartOffset = maxDistance - (tLen - sLen);
boolean haveMax = maxDistance < tLen;
int jStart = 0;
int jEnd = maxDistance;
char sChar = string1.charAt(0);
int current = 0;
for (int i = 0; i < sLen; i++) {
char prevsChar = sChar;
sChar = string1.charAt(start+i);
char tChar = string2.charAt(0);
int left = i;
current = left + 1;
int nextTransCost = 0;
// no need to look beyond window of lower right diagonal - maxDistance cells (lower right diag is i - lenDiff)
// and the upper left diagonal + maxDistance cells (upper left is i)
jStart += (i > jStartOffset) ? 1 : 0;
jEnd += (jEnd < tLen) ? 1 : 0;
for (j = jStart; j < jEnd; j++) {
int above = current;
int thisTransCost = nextTransCost;
nextTransCost = v2[j];
v2[j] = current = left; // cost of diagonal (substitution)
left = v0[j]; // left now equals current cost (which will be diagonal at next iteration)
char prevtChar = tChar;
tChar = string2.charAt(j);
if (sChar != tChar) {
if (left < current) current = left; // insertion
if (above < current) current = above; // deletion
current++;
if ((i != 0) && (j != 0)
&& (sChar == prevtChar)
&& (prevsChar == tChar)) {
thisTransCost++;
if (thisTransCost < current) current = thisTransCost; // transposition
}
}
v0[j] = current;
}
if (haveMax && (v0[i + lenDiff] > maxDistance)) return -1;
}
return (current <= maxDistance) ? current : -1;
}
|
python
|
def update_attribute_value_items(self):
"""
Returns an iterator of items for an attribute value map to use for
an UPDATE operation.
The iterator ignores collection attributes as these are processed
implicitly by the traversal algorithm.
:returns: iterator yielding tuples with objects implementing
:class:`everest.resources.interfaces.IResourceAttribute` as the
first and the proxied attribute value as the second argument.
"""
for attr in self._attribute_iterator():
if attr.kind != RESOURCE_ATTRIBUTE_KINDS.COLLECTION:
try:
attr_val = self._get_proxied_attribute_value(attr)
except AttributeError:
continue
else:
yield (attr, attr_val)
|
java
|
private int getLRDefinedWindow()
{
int leastRU = Integer.MAX_VALUE;
int whichWindow = INVALIDWINDOW;
// find least recently used window
// supposedly faster to count down
//for( int i = 0; i < NUMWINDOWS; i++ ) {
for(int i = NUMWINDOWS - 1; i >= 0; --i ) {
if( fTimeStamps[i] < leastRU ) {
leastRU = fTimeStamps[i];
whichWindow = i;
}
}
return whichWindow;
}
|
java
|
public static String showPasswordDialog(WindowBasedTextGUI textGUI, String title, String description, String initialContent) {
TextInputDialog textInputDialog = new TextInputDialogBuilder()
.setTitle(title)
.setDescription(description)
.setInitialContent(initialContent)
.setPasswordInput(true)
.build();
return textInputDialog.showDialog(textGUI);
}
|
java
|
@SuppressWarnings({ "unchecked", "cast" })
public static <K, V> MapFieldLite<K, V> emptyMapField() {
return (MapFieldLite<K, V>) EMPTY_MAP_FIELD;
}
|
python
|
def list(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
if self.is_p:
self.render('admin/log_ajax/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
|
java
|
public static <E> Optional<E> maybeFirst(Iterable<E> iterable) {
dbc.precondition(iterable != null, "cannot call maybeFirst with a null iterable");
return new MaybeFirstElement<E>().apply(iterable.iterator());
}
|
python
|
def listen_tta(self, target, timeout):
"""Listen as Type A Target.
Waits to receive a SENS_REQ command at the bitrate set by
**target.brty** and sends the **target.sens_res**
response. Depending on the SENS_RES bytes, the Initiator then
sends an RID_CMD (SENS_RES coded for a Type 1 Tag) or SDD_REQ
and SEL_REQ (SENS_RES coded for a Type 2/4 Tag). Responses are
then generated from the **rid_res** or **sdd_res** and
**sel_res** attributes in *target*.
Note that none of the currently supported hardware can
actually receive an RID_CMD, thus Type 1 Tag emulation is
impossible.
Arguments:
target (nfc.clf.LocalTarget): Supplies bitrate and mandatory
response data to reply when being discovered.
timeout (float): The maximum number of seconds to wait for a
discovery command.
Returns:
nfc.clf.LocalTarget: Command data received from the remote
Initiator if being discovered and to the extent supported
by the device. The first command received after discovery
is returned as one of the **tt1_cmd**, **tt2_cmd** or
**tt4_cmd** attribute (note that unset attributes are
always None).
Raises:
nfc.clf.UnsupportedTargetError: The method is not supported
or the *target* argument requested an unsupported bitrate
(or has a wrong technology type identifier).
~exceptions.ValueError: A required target response attribute
is not present or does not supply the number of bytes
expected.
"""
fname = "listen_tta"
cname = self.__class__.__module__ + '.' + self.__class__.__name__
raise NotImplementedError("%s.%s() is required" % (cname, fname))
|
python
|
def info_gain_nominal(x, y, separate_max):
"""
Function calculates information gain for discrete features. If feature is continuous it is firstly discretized.
x: numpy array - numerical or discrete feature
y: numpy array - labels
ft: string - feature type ("c" - continuous, "d" - discrete)
split_fun: function - function for discretization of numerical features
"""
x_vals = np.unique(x) # unique values
if len(x_vals) < 3: # if there is just one unique value
return None
y_dist = Counter(y) # label distribution
h_y = h(y_dist.values()) # class entropy
# calculate distributions and splits in accordance with feature type
dist, splits = nominal_splits(x, y, x_vals, y_dist, separate_max)
indices, repeat = (range(1, len(dist)), 1) if len(dist) < 50 else (range(1, len(dist), len(dist) / 10), 3)
interval = len(dist) / 10
max_ig, max_i, iteration = 0, 1, 0
while iteration < repeat:
for i in indices:
dist0 = np.sum([el for el in dist[:i]]) # iter 0: take first distribution
dist1 = np.sum([el for el in dist[i:]]) # iter 0: take the other distributions without first
coef = np.true_divide([np.sum(dist0.values()), np.sum(dist1.values())], len(y))
ig = h_y - np.dot(coef, [h(dist0.values()), h(dist1.values())]) # calculate information gain
if ig > max_ig:
max_ig, max_i = ig, i # store index and value of maximal information gain
iteration += 1
if repeat > 1:
interval = int(interval * 0.5)
if max_i in indices and interval > 0:
middle_index = indices.index(max_i)
else:
break
min_index = middle_index if middle_index == 0 else middle_index - 1
max_index = middle_index if middle_index == len(indices) - 1 else middle_index + 1
indices = range(indices[min_index], indices[max_index], interval)
# store splits of maximal information gain in accordance with feature type
return float(max_ig), [splits[:max_i], splits[max_i:]]
|
python
|
def decrypt(data, key):
'''decrypt the data with the key'''
data_len = len(data)
data = ffi.from_buffer(data)
key = ffi.from_buffer(__tobytes(key))
out_len = ffi.new('size_t *')
result = lib.xxtea_decrypt(data, data_len, key, out_len)
ret = ffi.buffer(result, out_len[0])[:]
lib.free(result)
return ret
|
python
|
def __convertIp6PrefixStringToIp6Address(self, strIp6Prefix):
"""convert IPv6 prefix string to IPv6 dotted-quad format
for example:
2001000000000000 -> 2001::
Args:
strIp6Prefix: IPv6 address string
Returns:
IPv6 address dotted-quad format
"""
prefix1 = strIp6Prefix.rstrip('L')
prefix2 = prefix1.lstrip("0x")
hexPrefix = str(prefix2).ljust(16,'0')
hexIter = iter(hexPrefix)
finalMac = ':'.join(a + b + c + d for a,b,c,d in zip(hexIter, hexIter,hexIter,hexIter))
prefix = str(finalMac)
strIp6Prefix = prefix[:20]
return strIp6Prefix +':'
|
python
|
def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional')
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise ValueError('invalid ImageJ hyperstack: not a RGB image')
if not rgb and ndim == 6 and shape[-1] != 1:
raise ValueError('invalid ImageJ hyperstack: not a non-RGB image')
if rgb or shape[-1] == 1:
return (1, ) * (6 - ndim) + shape
return (1, ) * (5 - ndim) + shape + (1,)
|
python
|
def _GetDatabaseConfig(self):
"""
Get all configuration from database.
This includes values from the Config table as well as populating lists
for supported formats and ignored directories from their respective
database tables.
"""
goodlogging.Log.Seperator()
goodlogging.Log.Info("CLEAR", "Getting configuration variables...")
goodlogging.Log.IncreaseIndent()
# SOURCE DIRECTORY
if self._sourceDir is None:
self._sourceDir = self._GetConfigValue('SourceDir', 'source directory')
# TV DIRECTORY
if self._inPlaceRename is False and self._tvDir is None:
self._tvDir = self._GetConfigValue('TVDir', 'tv directory')
# ARCHIVE DIRECTORY
self._archiveDir = self._GetConfigValue('ArchiveDir', 'archive directory', isDir = False)
# SUPPORTED FILE FORMATS
self._supportedFormatsList = self._GetSupportedFormats()
# IGNORED DIRECTORIES
self._ignoredDirsList = self._GetIgnoredDirs()
goodlogging.Log.NewLine()
goodlogging.Log.Info("CLEAR", "Configuation is:")
goodlogging.Log.IncreaseIndent()
goodlogging.Log.Info("CLEAR", "Source directory = {0}".format(self._sourceDir))
goodlogging.Log.Info("CLEAR", "TV directory = {0}".format(self._tvDir))
goodlogging.Log.Info("CLEAR", "Supported formats = {0}".format(self._supportedFormatsList))
goodlogging.Log.Info("CLEAR", "Ignored directory list = {0}".format(self._ignoredDirsList))
goodlogging.Log.ResetIndent()
|
java
|
private String[] computeValueFromMappingValues(MtasParserObject object,
List<Map<String, String>> mappingValues,
Map<String, List<MtasParserObject>> currentList,
boolean containsVariables)
throws MtasParserException, MtasConfigException {
String[] value = { "" };
for (Map<String, String> mappingValue : mappingValues) {
// directly
if (mappingValue.get(MAPPING_VALUE_SOURCE)
.equals(MtasParserMapping.SOURCE_STRING)) {
if (mappingValue.get("type")
.equals(MtasParserMapping.PARSER_TYPE_STRING)) {
String subvalue = computeFilteredPrefixedValue(
mappingValue.get(MAPPING_VALUE_TYPE),
mappingValue.get(MAPPING_VALUE_TEXT), null, null);
if (subvalue != null) {
for (int i = 0; i < value.length; i++) {
value[i] = addAndEncodeValue(value[i], subvalue,
containsVariables);
}
}
}
// from objects
} else {
MtasParserObject[] checkObjects = computeObjectFromMappingValue(object,
mappingValue, currentList);
// create value
if (checkObjects != null && checkObjects.length > 0) {
MtasParserType checkType = checkObjects[0].getType();
// add name to value
if (mappingValue.get(MAPPING_VALUE_TYPE)
.equals(MtasParserMapping.PARSER_TYPE_NAME)) {
String subvalue = computeFilteredPrefixedValue(
mappingValue.get(MAPPING_VALUE_TYPE), checkType.getName(),
mappingValue.get(MAPPING_VALUE_FILTER),
mappingValue.get(MAPPING_VALUE_PREFIX) == null
|| mappingValue.get(MAPPING_VALUE_PREFIX).isEmpty() ? null
: mappingValue.get(MAPPING_VALUE_PREFIX));
if (subvalue != null) {
for (int i = 0; i < value.length; i++) {
value[i] = addAndEncodeValue(value[i], subvalue,
containsVariables);
}
}
// add attribute to value
} else if (mappingValue.get(MAPPING_VALUE_TYPE)
.equals(MtasParserMapping.PARSER_TYPE_ATTRIBUTE)) {
String tmpValue = null;
if (mappingValue.get(MAPPING_VALUE_NAME).equals("#")) {
tmpValue = checkObjects[0].getId();
} else {
String namespace = mappingValue.get(MAPPING_VALUE_NAMESPACE);
if(namespace==null) {
tmpValue = checkObjects[0]
.getAttribute(mappingValue.get(MAPPING_VALUE_NAME));
} else {
tmpValue = checkObjects[0]
.getOtherAttribute(namespace, mappingValue.get(MAPPING_VALUE_NAME));
}
}
String subvalue = computeFilteredPrefixedValue(
mappingValue.get(MAPPING_VALUE_TYPE), tmpValue,
mappingValue.get(MAPPING_VALUE_FILTER),
mappingValue.get(MAPPING_VALUE_PREFIX) == null
|| mappingValue.get(MAPPING_VALUE_PREFIX).isEmpty() ? null
: mappingValue.get(MAPPING_VALUE_PREFIX));
if (subvalue != null) {
for (int i = 0; i < value.length; i++) {
value[i] = addAndEncodeValue(value[i], subvalue,
containsVariables);
}
}
// value from text
} else if (mappingValue.get("type")
.equals(MtasParserMapping.PARSER_TYPE_TEXT)) {
String subvalue = computeFilteredPrefixedValue(
mappingValue.get(MAPPING_VALUE_TYPE), checkObjects[0].getText(),
mappingValue.get(MAPPING_VALUE_FILTER),
mappingValue.get(MAPPING_VALUE_PREFIX) == null
|| mappingValue.get(MAPPING_VALUE_PREFIX).isEmpty() ? null
: mappingValue.get(MAPPING_VALUE_PREFIX));
if (subvalue != null) {
for (int i = 0; i < value.length; i++) {
value[i] = addAndEncodeValue(value[i], subvalue,
containsVariables);
}
}
} else if (mappingValue.get("type")
.equals(MtasParserMapping.PARSER_TYPE_TEXT_SPLIT)) {
String[] textValues = checkObjects[0].getText()
.split(Pattern.quote(mappingValue.get(MAPPING_VALUE_SPLIT)));
textValues = computeFilteredSplitValues(textValues,
mappingValue.get(MAPPING_VALUE_FILTER));
if (textValues != null && textValues.length > 0) {
String[] nextValue = new String[value.length * textValues.length];
boolean nullValue = false;
int number = 0;
for (int k = 0; k < textValues.length; k++) {
String subvalue = computeFilteredPrefixedValue(
mappingValue.get(MAPPING_VALUE_TYPE), textValues[k],
mappingValue.get(MAPPING_VALUE_FILTER),
mappingValue.get(MAPPING_VALUE_PREFIX) == null
|| mappingValue.get(MAPPING_VALUE_PREFIX).isEmpty()
? null : mappingValue.get(MAPPING_VALUE_PREFIX));
if (subvalue != null) {
for (int i = 0; i < value.length; i++) {
nextValue[number] = addAndEncodeValue(value[i], subvalue,
containsVariables);
number++;
}
} else if (!nullValue) {
for (int i = 0; i < value.length; i++) {
nextValue[number] = value[i];
number++;
}
nullValue = true;
}
}
value = new String[number];
System.arraycopy(nextValue, 0, value, 0, number);
}
} else if (mappingValue.get("type")
.equals(MtasParserMapping.PARSER_TYPE_VARIABLE)) {
if (containsVariables) {
String variableName = mappingValue.get(MAPPING_VALUE_NAME);
String variableValue = mappingValue.get(MAPPING_VALUE_VALUE);
String prefix = mappingValue.get(MAPPING_VALUE_PREFIX);
if (variableName != null && variableValue != null
&& mappingValue.get(MAPPING_VALUE_SOURCE)
.equals(MtasParserMapping.SOURCE_OWN)) {
String subvalue = object.getAttribute(variableValue);
if (subvalue != null && subvalue.startsWith("#")) {
subvalue = subvalue.substring(1);
}
if (subvalue != null) {
for (int i = 0; i < value.length; i++) {
if (prefix != null && !prefix.isEmpty()) {
value[i] = addAndEncodeValue(value[i], prefix,
containsVariables);
}
value[i] = addAndEncodeVariable(value[i], variableName,
subvalue, containsVariables);
}
}
}
} else {
throw new MtasParserException("unexpected variable");
}
} else {
throw new MtasParserException(
"unknown type " + mappingValue.get("type"));
}
}
}
}
if (value.length == 1 && value[0].isEmpty()) {
return new String[] {};
} else {
return value;
}
}
|
python
|
async def _set_state(self, state: int) -> bool:
"""Set the state of the device."""
try:
set_state_resp = await self.api._request(
'put',
DEVICE_SET_ENDPOINT,
json={
'attributeName': 'desireddoorstate',
'myQDeviceId': self.device_id,
'AttributeValue': state,
})
except RequestError as err:
_LOGGER.error('%s: Setting state failed (and halting): %s',
self.name, err)
return False
if set_state_resp is None:
return False
if int(set_state_resp.get('ReturnCode', 1)) != 0:
_LOGGER.error(
'%s: Error setting the device state: %s', self.name,
set_state_resp.get('ErrorMessage', 'Unknown Error'))
return False
return True
|
python
|
def check_data(labels, data, args):
"""Check that all data were inserted correctly. Return the colors."""
len_categories = len(data[0])
# Check that there are data for all labels.
if len(labels) != len(data):
print(">> Error: Label and data array sizes don't match")
sys.exit(1)
# Check that there are data for all categories per label.
for dat in data:
if len(dat) != len_categories:
print(">> Error: There are missing values")
sys.exit(1)
colors = []
# If user inserts colors, they should be as many as the categories.
if args['color'] is not None:
if len(args['color']) != len_categories:
print(">> Error: Color and category array sizes don't match")
sys.exit(1)
for color in args['color']:
colors.append(AVAILABLE_COLORS.get(color))
# Vertical graph for multiple series of same scale is not supported yet.
if args['vertical'] and len_categories > 1 and not args['different_scale']:
print(">> Error: Vertical graph for multiple series of same "
"scale is not supported yet.")
sys.exit(1)
# If user hasn't inserted colors, pick the first n colors
# from the dict (n = number of categories).
if args['stacked'] and not colors:
colors = [v for v in list(AVAILABLE_COLORS.values())[:len_categories]]
return colors
|
java
|
public void marshall(ProjectStatus projectStatus, ProtocolMarshaller protocolMarshaller) {
if (projectStatus == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(projectStatus.getState(), STATE_BINDING);
protocolMarshaller.marshall(projectStatus.getReason(), REASON_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def number_of_records_per_hour(self, value=None):
"""Corresponds to IDD Field `number_of_records_per_hour`
Args:
value (int): value for IDD Field `number_of_records_per_hour`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `number_of_records_per_hour`'.format(value))
self._number_of_records_per_hour = value
|
python
|
def obtain_access_token(self):
"""Returns an OAuth 2 access token to make OAuth 2 authenticated
read-only calls.
:rtype: string
"""
if self.oauth_version != 2:
raise TwythonError('This method can only be called when your \
OAuth version is 2.0.')
data = {'grant_type': 'client_credentials'}
basic_auth = HTTPBasicAuth(self.app_key, self.app_secret)
try:
response = self.client.post(self.request_token_url,
data=data, auth=basic_auth)
content = response.content.decode('utf-8')
try:
content = content.json()
except AttributeError:
content = json.loads(content)
access_token = content['access_token']
except (KeyError, ValueError, requests.exceptions.RequestException):
raise TwythonAuthError('Unable to obtain OAuth 2 access token.')
else:
return access_token
|
java
|
static DiyFp minus(DiyFp a, DiyFp b) {
DiyFp result = new DiyFp(a.f, a.e);
result.subtract(b);
return result;
}
|
java
|
public String[] getRuleSetDisplayNames(ULocale loc) {
String[] names = getNameListForLocale(loc);
if (names != null) {
return names.clone();
}
names = getRuleSetNames();
for (int i = 0; i < names.length; ++i) {
names[i] = names[i].substring(1);
}
return names;
}
|
java
|
public void addGLL( GLLSentence gll ) {
try {
if (gll.isValid())
position = gll.getPosition();
} catch (Exception e) {
// ignore it, this should be handled in the isValid,
// if an exception is thrown, we can't deal with it here.
}
}
|
python
|
def break_iterable(iterable, pred):
"""Break a iterable on the item that matches the predicate into lists.
The item that matched the predicate is not included in the result.
>>> list(break_iterable([1, 2, 3, 4], lambda x: x == 3))
[[1, 2], [4]]
"""
sublist = []
for i in iterable:
if pred(i):
yield sublist
sublist = []
else:
sublist.append(i)
yield sublist
|
python
|
def parse_lcov_file_info(args, filepath, line_iter, line_coverage_re, file_end_string):
""" Parse the file content in lcov info file
"""
coverage = []
lines_covered = []
for line in line_iter:
if line != "end_of_record":
line_coverage_match = line_coverage_re.match(line)
if line_coverage_match:
line_no = line_coverage_match.group(1)
cov_count = int(line_coverage_match.group(2))
if args.max_cov_count:
if cov_count > args.max_cov_count:
cov_count = args.max_cov_count + 1
lines_covered.append((line_no, cov_count))
else:
break
num_code_lines = len([line.rstrip('\n') for line in open(filepath, 'r')])
coverage = [None] * num_code_lines
for line_covered in lines_covered:
coverage[int(line_covered[0]) - 1] = line_covered[1]
return coverage
|
java
|
public Launcher env(String key, String value) {
builder.environment().put(key, value);
return this;
}
|
python
|
def requiv_to_pot_contact(requiv, q, sma, compno=1):
"""
:param requiv: user-provided equivalent radius
:param q: mass ratio
:param sma: semi-major axis (d = sma because we explicitly assume circular orbits for contacts)
:param compno: 1 for primary, 2 for secondary
:return: potential and fillout factor
"""
logger.debug("requiv_to_pot_contact(requiv={}, q={}, sma={}, compno={})".format(requiv, q, sma, compno))
# since the functions called here work with normalized r, we need to set d=D=sma=1.
# or provide sma as a function parameter and normalize r here as requiv = requiv/sma
requiv = requiv/sma
vequiv = 4./3*np.pi*requiv**3
d = 1.
F = 1.
logger.debug("libphoebe.roche_contact_Omega_at_partial_vol(vol={}, phi=pi/2, q={}, d={}, choice={})".format(vequiv, q, d, compno-1))
return libphoebe.roche_contact_Omega_at_partial_vol(vequiv, np.pi/2, q, d, choice=compno-1)
|
java
|
public int getGroupForPrimary(long p) {
p >>= 16;
if(p < scriptStarts[1] || scriptStarts[scriptStarts.length - 1] <= p) {
return -1;
}
int index = 1;
while(p >= scriptStarts[index + 1]) { ++index; }
for(int i = 0; i < numScripts; ++i) {
if(scriptsIndex[i] == index) {
return i;
}
}
for(int i = 0; i < MAX_NUM_SPECIAL_REORDER_CODES; ++i) {
if(scriptsIndex[numScripts + i] == index) {
return Collator.ReorderCodes.FIRST + i;
}
}
return -1;
}
|
java
|
@Override
public JSObject execute(String command) {
Object returnValue = engine.executeScript(command);
if (returnValue instanceof JSObject) {
return (JSObject) returnValue;
}
return null;
}
|
java
|
public void setBoardAlpha(float alpha) {
if (mListeners != null) {
for (StateListener l : mListeners) {
l.onBoardAlpha(this, alpha);
}
}
mContentView.setAlpha(alpha);
}
|
java
|
public JavaPairRDD<K, Collection<V>> spanByKey(ClassTag<K> keyClassTag) {
ClassTag<Tuple2<K, Collection<V>>> tupleClassTag = classTag(Tuple2.class);
ClassTag<Collection<V>> vClassTag = classTag(Collection.class);
RDD<Tuple2<K, Collection<V>>> newRDD = pairRDDFunctions.spanByKey()
.map(JavaApiHelper.<K, V, Seq<V>>valuesAsJavaCollection(), tupleClassTag);
return new JavaPairRDD<>(newRDD, keyClassTag, vClassTag);
}
|
java
|
public static String deidentify(String text, int left, int right, int fromLeft, int fromRight) {
if (left == 0 && right == 0 && fromLeft == 0 && fromRight == 0) {
return StringUtils.repeat('*', text.length());
} else if (left > 0 && right == 0 && fromLeft == 0 && fromRight == 0) {
return deidentifyLeft(text, left);
} else if (left == 0 && right > 0 && fromLeft == 0 && fromRight == 0) {
return deidentifyRight(text, right);
}else if (left > 0 && right > 0 && fromLeft == 0 && fromRight == 0) {
return deidentifyEdge(text, left, right);
}else if (left == 0 && right == 0 && fromLeft > 0 && fromRight == 0) {
return deidentifyFromLeft(text, fromLeft);
}else if (left == 0 && right == 0 && fromLeft == 0 && fromRight > 0) {
return deidentifyFromRight(text, fromRight);
}else if (left == 0 && right == 0 && fromLeft > 0 && fromRight > 0) {
return deidentifyMiddle(text, fromLeft, fromRight);
}
return text;
}
|
java
|
public static String toPGString(byte[] buf) {
if (buf == null) {
return null;
}
StringBuilder stringBuilder = new StringBuilder(2 * buf.length);
for (byte element : buf) {
int elementAsInt = (int) element;
if (elementAsInt < 0) {
elementAsInt = 256 + elementAsInt;
}
// we escape the same non-printable characters as the backend
// we must escape all 8bit characters otherwise when convering
// from java unicode to the db character set we may end up with
// question marks if the character set is SQL_ASCII
if (elementAsInt < 040 || elementAsInt > 0176) {
// escape charcter with the form \000, but need two \\ because of
// the Java parser
stringBuilder.append("\\");
stringBuilder.append((char) (((elementAsInt >> 6) & 0x3) + 48));
stringBuilder.append((char) (((elementAsInt >> 3) & 0x7) + 48));
stringBuilder.append((char) ((elementAsInt & 0x07) + 48));
} else if (element == (byte) '\\') {
// escape the backslash character as \\, but need four \\\\ because
// of the Java parser
stringBuilder.append("\\\\");
} else {
// other characters are left alone
stringBuilder.append((char) element);
}
}
return stringBuilder.toString();
}
|
java
|
public static boolean is_kharanta(String str)
{
String s1 = VarnaUtil.getAntyaVarna(str);
if (is_khar(s1)) return true;
return false;
}
|
python
|
def weave_layers(infiles, output_file, log, context):
"""Apply text layer and/or image layer changes to baseline file
This is where the magic happens. infiles will be the main PDF to modify,
and optional .text.pdf and .image-layer.pdf files, organized however ruffus
organizes them.
From .text.pdf, we copy the content stream (which contains the Tesseract
OCR results), and rotate it into place. The first time we do this, we also
copy the GlyphlessFont, and then reference that font again.
For .image-layer.pdf, we check if this is a "pointer" to the original file,
or a new file. If a new file, we replace the page and remember that we
replaced this page.
Every 100 open files, we save intermediate results, to avoid any resource
limits, since pikepdf/qpdf need to keep a lot of open file handles in the
background. When objects are copied from one file to another qpdf, qpdf
doesn't actually copy the data until asked to write, so all the resources
it may need to remain available.
For completeness, we set up a /ProcSet on every page, although it's
unlikely any PDF viewer cares about this anymore.
"""
def input_sorter(key):
try:
return page_number(key)
except ValueError:
return -1
flat_inputs = sorted(flatten_groups(infiles), key=input_sorter)
groups = groupby(flat_inputs, key=input_sorter)
# Extract first item
_, basegroup = next(groups)
base = list(basegroup)[0]
path_base = Path(base).resolve()
pdf_base = pikepdf.open(path_base)
font, font_key, procset = None, None, None
pdfinfo = context.get_pdfinfo()
pagerefs = {}
procset = pdf_base.make_indirect(
pikepdf.Object.parse(b'[ /PDF /Text /ImageB /ImageC /ImageI ]')
)
replacements = 0
# Iterate rest
for page_num, layers in groups:
layers = list(layers)
log.debug(page_num)
log.debug(layers)
text = next((ii for ii in layers if ii.endswith('.text.pdf')), None)
image = next((ii for ii in layers if ii.endswith('.image-layer.pdf')), None)
if text and not font:
font, font_key = _find_font(text, pdf_base)
replacing = False
content_rotation = pdfinfo[page_num - 1].rotation
path_image = Path(image).resolve() if image else None
if path_image is not None and path_image != path_base:
# We are replacing the old page with a rasterized PDF of the new
# page
log.debug("Replace")
old_objgen = pdf_base.pages[page_num - 1].objgen
with pikepdf.open(image) as pdf_image:
replacements += 1
image_page = pdf_image.pages[0]
pdf_base.pages[page_num - 1] = image_page
# We're adding a new page, which will get a new objgen number pair,
# so we need to update any references to it. qpdf did not like
# my attempt to update the old object in place, but that is an
# option to consider
pagerefs[old_objgen] = pdf_base.pages[page_num - 1].objgen
replacing = True
autorotate_correction = context.get_rotation(page_num - 1)
if replacing:
content_rotation = autorotate_correction
text_rotation = autorotate_correction
text_misaligned = (text_rotation - content_rotation) % 360
log.debug(
'%r',
[text_rotation, autorotate_correction, text_misaligned, content_rotation],
)
if text and font:
# Graft the text layer onto this page, whether new or old
strip_old = context.get_options().redo_ocr
_weave_layers_graft(
pdf_base=pdf_base,
page_num=page_num,
text=text,
font=font,
font_key=font_key,
rotation=text_misaligned,
procset=procset,
strip_old_text=strip_old,
log=log,
)
# Correct the rotation if applicable
pdf_base.pages[page_num - 1].Rotate = (
content_rotation - autorotate_correction
) % 360
if replacements % MAX_REPLACE_PAGES == 0:
# Periodically save and reload the Pdf object. This will keep a
# lid on our memory usage for very large files. Attach the font to
# page 1 even if page 1 doesn't use it, so we have a way to get it
# back.
# TODO refactor this to outside the loop
page0 = pdf_base.pages[0]
_update_page_resources(
page=page0, font=font, font_key=font_key, procset=procset
)
interim = output_file + f'_working{page_num}.pdf'
pdf_base.save(interim)
pdf_base.close()
pdf_base = pikepdf.open(interim)
procset = pdf_base.pages[0].Resources.ProcSet
font, font_key = None, None # Reacquire this information
_fix_toc(pdf_base, pagerefs, log)
pdf_base.save(output_file)
pdf_base.close()
|
python
|
def in6_cidr2mask(m):
"""
Return the mask (bitstring) associated with provided length
value. For instance if function is called on 48, return value is
'\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'.
"""
if m > 128 or m < 0:
raise Kamene_Exception("value provided to in6_cidr2mask outside [0, 128] domain (%d)" % m)
t = []
for i in range(0, 4):
t.append(max(0, 2**32 - 2**(32-min(32, m))))
m -= 32
return b"".join([ struct.pack('!I', i) for i in t ])
|
java
|
public void addLine(String line) {
int sep = line.indexOf('=');
if(sep > 0) {
put(line.substring(0,sep),line.substring(sep+1));
}
}
|
java
|
public int setBytes(final long pos, final byte[] bytes) throws SQLException {
final int arrayPos = (int) pos - 1;
final int bytesWritten;
if (blobContent == null) {
this.blobContent = new byte[arrayPos + bytes.length];
bytesWritten = blobContent.length;
this.actualSize = bytesWritten;
} else if (blobContent.length > arrayPos + bytes.length) {
bytesWritten = bytes.length;
} else {
blobContent = Utils.copyWithLength(blobContent, arrayPos + bytes.length);
actualSize = blobContent.length;
bytesWritten = bytes.length;
}
System.arraycopy(bytes, 0, this.blobContent, arrayPos, bytes.length);
return bytesWritten;
}
|
java
|
public static synchronized void mockStatic(Class<?> type, Method... methods) {
doMock(type, true, new DefaultMockStrategy(), null, methods);
}
|
java
|
public static void synchronizeViewers(ImageViewer first, ImageViewer... others) {
Synchronizer mainSynchronizer=first.getSynchronizer();
for (ImageViewer other: others) {
mainSynchronizer.add(other);
}
}
|
java
|
public SkillDetails withBulletPoints(String... bulletPoints) {
if (this.bulletPoints == null) {
setBulletPoints(new java.util.ArrayList<String>(bulletPoints.length));
}
for (String ele : bulletPoints) {
this.bulletPoints.add(ele);
}
return this;
}
|
java
|
public String printShortLocaleTime()
{
_date.setTime(_localTimeOfEpoch);
if (_shortTimeFormat == null)
_shortTimeFormat = DateFormat.getTimeInstance(DateFormat.SHORT);
return _shortTimeFormat.format(_date);
}
|
python
|
def transform_generator(fn):
"""A decorator that marks transform pipes that should be called to create the real transform"""
if six.PY2:
fn.func_dict['is_transform_generator'] = True
else:
# py3
fn.__dict__['is_transform_generator'] = True
return fn
|
python
|
def prep_parallel(self, binary_args, other_args):
"""Prepare the parallel calculations
Prepares the arguments to be run in parallel.
It will divide up arrays according to num_splits.
Args:
binary_args (list): List of binary arguments for input into the SNR function.
other_args (tuple of obj): tuple of other args for input into parallel snr function.
"""
if self.length < 100:
raise Exception("Run this across 1 processor by setting num_processors kwarg to None.")
if self.num_processors == -1:
self.num_processors = mp.cpu_count()
split_val = int(np.ceil(self.length/self.num_splits))
split_inds = [self.num_splits*i for i in np.arange(1, split_val)]
inds_split_all = np.split(np.arange(self.length), split_inds)
self.args = []
for i, ind_split in enumerate(inds_split_all):
trans_args = []
for arg in binary_args:
try:
trans_args.append(arg[ind_split])
except TypeError:
trans_args.append(arg)
self.args.append((i, tuple(trans_args)) + other_args)
return
|
java
|
public static <T> ListStore<T> list(@NonNull File file, @NonNull Converter converter,
@NonNull Type type) {
return new RealListStore<T>(file, converter, type);
}
|
java
|
private void scaleToOutputResolution(Image image) {
float factor = m_sharedContext.getDotsPerPixel();
if (factor != 1.0f) {
image.scaleAbsolute(image.getPlainWidth() * factor, image.getPlainHeight() * factor);
}
}
|
python
|
def Initialize(api_key, api_secret, api_host="localhost", api_port=443, api_ssl=True, asyncblock=False, timeout=10, req_method="get"):
""" Initializes the Cloudstack API
Accepts arguments:
api_host (localhost)
api_port (443)
api_ssl (True)
api_key
api_secret
"""
config = Config()
if api_ssl:
proto = "https"
else:
proto = "http"
api_url = "%s://%s:%s/client/api" % (proto, api_host, api_port)
if os.access(os.path.expanduser("~") , os.W_OK):
d = os.path.expanduser("~")
else:
d = tempfile.gettempdir()
cache_file = os.getenv('MOLNCTRL_CACHE') or '.molnctrl_cache'
if os.path.exists(os.path.join(d, cache_file)):
apicache = pickle.load(open( os.path.join(d, cache_file), "rb" ))
else:
method = {'description': u'lists all available apis on the server, provided by the Api Discovery plugin',
'isasync': False,
'name': u'listApis',
'params': [{'description': u'API name',
'length': 255,
'name': u'name',
'related': [],
'required': False,
'type': u'string'}],
'related': [],
'requiredparams': []}
_create_api_method(CSApi, "list_apis", method)
c = CSApi(api_url, api_key, api_secret, asyncblock)
apicache = cachemaker.monkeycache(c.list_apis())
pickle.dump(apicache, open(os.path.join(d, cache_file), "wb"))
for verb, methods in six.iteritems(apicache):
if isinstance(methods, dict):
for method in six.iterkeys(methods):
_create_api_method(CSApi, "%s_%s" % (verb, method), methods[method])
return CSApi(api_url, api_key, api_secret, asyncblock, timeout, req_method)
|
java
|
public void resetCursor() {
if(buffer == null) {
buffer = Pointer.create(new byte[bufsize], 0);
}
buffer.buffer[buffer.start] = 0;
cursor = -1;
lineptr = -1;
linectptr = -1;
token = -1;
toktmp = -1;
marker = -1;
limit = -1;
root = null;
root_on_error = null;
linect = 0;
eof = false;
last_token = 0;
force_token = 0;
}
|
java
|
protected boolean isFloatingPointType(Object left, Object right) {
return left instanceof Float || left instanceof Double || right instanceof Float || right instanceof Double;
}
|
python
|
def delta_E( reactants, products, check_balance=True ):
"""
Calculate the change in energy for reactants --> products.
Args:
reactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.
products (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.
check_balance (bool:optional): Check that the reaction stoichiometry is balanced. Default: True.
Returns:
(float) The change in energy.
"""
if check_balance:
if delta_stoichiometry( reactants, products ) != {}:
raise ValueError( "reaction is not balanced: {}".format( delta_stoichiometry( reactants, products) ) )
return sum( [ r.energy for r in products ] ) - sum( [ r.energy for r in reactants ] )
|
python
|
def delete_vault(self, vault_id):
"""Deletes a ``Vault``.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` to
remove
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.delete_bin_template
if self._catalog_session is not None:
return self._catalog_session.delete_catalog(catalog_id=vault_id)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
if not isinstance(vault_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
for object_catalog in ['Authorization', 'Function', 'Qualifier', 'Vault']:
obj_collection = JSONClientValidated('authorization',
collection=object_catalog,
runtime=self._runtime)
if obj_collection.find({'assignedVaultIds': {'$in': [str(vault_id)]}}).count() != 0:
raise errors.IllegalState('catalog is not empty')
collection.delete_one({'_id': ObjectId(vault_id.get_identifier())})
|
java
|
@Override
public <K extends Serializable, V> Map<K, V> getCache() {
return this.getCache(DEFAULT);
}
|
java
|
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "Ring", substitutionHeadNamespace = "http://www.opengis.net/gml", substitutionHeadName = "_Ring")
public JAXBElement<RingType> createRing(RingType value) {
return new JAXBElement<RingType>(_Ring_QNAME, RingType.class, null, value);
}
|
python
|
def getGenomeList() :
"""Return the names of all imported genomes"""
import rabaDB.filters as rfilt
f = rfilt.RabaQuery(Genome_Raba)
names = []
for g in f.iterRun() :
names.append(g.name)
return names
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.