language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
python
|
def plot_distance_landscape_projection(self, x_axis, y_axis, ax=None, *args, **kwargs):
"""
Plots the projection of distance landscape (if it was returned), onto the
parameters specified
:param x_axis: symbol to plot on x axis
:param y_axis: symbol to plot on y axis
:param ax: axis object to plot onto
:param args: arguments to pass to :func:`matplotlib.pyplot.contourf`
:param kwargs: keyword arguments to pass to :func:`matplotlib.pyplot.contourf`
:return:
"""
x, y, z = self.distance_landscape_as_3d_data(x_axis, y_axis)
plot_contour(x, y, z, x_axis, y_axis, ax=ax, *args, **kwargs)
|
java
|
public void marshall(DescribeAssociationRequest describeAssociationRequest, ProtocolMarshaller protocolMarshaller) {
if (describeAssociationRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(describeAssociationRequest.getName(), NAME_BINDING);
protocolMarshaller.marshall(describeAssociationRequest.getInstanceId(), INSTANCEID_BINDING);
protocolMarshaller.marshall(describeAssociationRequest.getAssociationId(), ASSOCIATIONID_BINDING);
protocolMarshaller.marshall(describeAssociationRequest.getAssociationVersion(), ASSOCIATIONVERSION_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def make_query(args, other=None, limit=None, strand=None, featuretype=None,
extra=None, order_by=None, reverse=False,
completely_within=False):
"""
Multi-purpose, bare-bones ORM function.
This function composes queries given some commonly-used kwargs that can be
passed to FeatureDB methods (like .parents(), .children(), .all_features(),
.features_of_type()). It handles, in one place, things like restricting to
featuretype, limiting to a genomic range, limiting to one strand, or
returning results ordered by different criteria.
Additional filtering/subsetting/sorting behavior should be added here.
(Note: this ended up having better performance (and flexibility) than
sqlalchemy)
This function also provides support for additional JOINs etc (supplied via
the `other` kwarg) and extra conditional clauses (`extra` kwarg). See the
`_QUERY` var below for the order in which they are used.
For example, FeatureDB._relation uses `other` to supply the JOIN
substatment, and that same method also uses `extra` to supply the
"relations.level = ?" substatment (see the source for FeatureDB._relation
for more details).
`args` contains the arguments that will ultimately be supplied to the
sqlite3.connection.execute function. It may be further populated below --
for example, if strand="+", then the query will include a strand clause,
and the strand will be appended to the args.
`args` can be pre-filled with args that are passed to `other` and `extra`.
"""
_QUERY = ("{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} "
"{LIMIT} {STRAND} {ORDER_BY}")
# Construct a dictionary `d` that will be used later as _QUERY.format(**d).
# Default is just _SELECT, which returns all records in the features table.
# (Recall that constants._SELECT gets the fields in the order needed to
# reconstruct a Feature)
d = dict(_SELECT=constants._SELECT, OTHER="", FEATURETYPE="", LIMIT="",
STRAND="", ORDER_BY="", EXTRA="")
if other:
d['OTHER'] = other
if extra:
d['EXTRA'] = extra
# If `other` and `extra` take args (that is, they have "?" in them), then
# they should have been provided in `args`.
required_args = (d['EXTRA'] + d['OTHER']).count('?')
if len(args) != required_args:
raise ValueError('Not enough args (%s) for subquery' % args)
# Below, if a kwarg is specified, then we create sections of the query --
# appending to args as necessary.
#
# IMPORTANT: the order in which things are processed here is the same as
# the order of the placeholders in _QUERY. That is, we need to build the
# args in parallel with the query to avoid putting the wrong args in the
# wrong place.
if featuretype:
# Handle single or iterables of featuretypes.
#
# e.g., "featuretype = 'exon'"
#
# or, "featuretype IN ('exon', 'CDS')"
if isinstance(featuretype, six.string_types):
d['FEATURETYPE'] = "features.featuretype = ?"
args.append(featuretype)
else:
d['FEATURETYPE'] = (
"features.featuretype IN (%s)"
% (','.join(["?" for _ in featuretype]))
)
args.extend(featuretype)
if limit:
# Restrict to a genomic region. Makes use of the UCSC binning strategy
# for performance.
#
# `limit` is a string or a tuple of (chrom, start, stop)
#
# e.g., "seqid = 'chr2L' AND start > 1000 AND end < 5000"
if isinstance(limit, six.string_types):
seqid, startstop = limit.split(':')
start, end = startstop.split('-')
else:
seqid, start, end = limit
# Identify possible bins
_bins = bins.bins(int(start), int(end), one=False)
# Use different overlap conditions
if completely_within:
d['LIMIT'] = (
"features.seqid = ? AND features.start >= ? "
"AND features.end <= ?"
)
args.extend([seqid, start, end])
else:
d['LIMIT'] = (
"features.seqid = ? AND features.start <= ? "
"AND features.end >= ?"
)
# Note order (end, start)
args.extend([seqid, end, start])
# Add bin clause. See issue #45.
if len(_bins) < 900:
d['LIMIT'] += " AND features.bin IN (%s)" % (','.join(map(str, _bins)))
if strand:
# e.g., "strand = '+'"
d['STRAND'] = "features.strand = ?"
args.append(strand)
# TODO: implement file_order!
valid_order_by = constants._gffkeys_extra + ['file_order', 'length']
_order_by = []
if order_by:
# Default is essentially random order.
#
# e.g. "ORDER BY seqid, start DESC"
if isinstance(order_by, six.string_types):
_order_by.append(order_by)
else:
for k in order_by:
if k not in valid_order_by:
raise ValueError("%s not a valid order-by value in %s"
% (k, valid_order_by))
# There's no length field, so order by end - start
if k == 'length':
k = '(end - start)'
_order_by.append(k)
_order_by = ','.join(_order_by)
if reverse:
direction = 'DESC'
else:
direction = 'ASC'
d['ORDER_BY'] = 'ORDER BY %s %s' % (_order_by, direction)
# Ensure only one "WHERE" is included; the rest get "AND ". This is ugly.
where = False
if "where" in d['OTHER'].lower():
where = True
for i in ['EXTRA', 'FEATURETYPE', 'LIMIT', 'STRAND']:
if d[i]:
if not where:
d[i] = "WHERE " + d[i]
where = True
else:
d[i] = "AND " + d[i]
return _QUERY.format(**d), args
|
python
|
async def route_msg(self, msg):
"""Given a message, route it either to the incoming queue, or to the
future associated with its correlation_id.
"""
if msg.correlation_id in self._futures:
self._set_reply(msg.correlation_id, msg)
else:
await self._push_incoming(msg)
|
java
|
public String getString(String propertyPath) throws PropertyException {
Object o = getValue(propertyPath);
return o.toString();
}
|
java
|
public static Geometry ringBuffer(Geometry geom, double bufferDistance,
int numBuffer, String parameters, boolean doDifference) throws SQLException {
if(geom==null){
return null;
}
if (geom.getNumGeometries() > 1) {
throw new SQLException("This function supports only single geometry : point, linestring or polygon.");
} else {
String[] buffParemeters = parameters.split("\\s+");
BufferParameters bufferParameters = new BufferParameters();
for (String params : buffParemeters) {
String[] keyValue = params.split("=");
if (keyValue[0].equalsIgnoreCase("endcap")) {
String param = keyValue[1];
if (param.equalsIgnoreCase("round")) {
bufferParameters.setEndCapStyle(BufferParameters.CAP_ROUND);
} else if (param.equalsIgnoreCase("square")) {
bufferParameters.setEndCapStyle(BufferParameters.CAP_SQUARE);
} else {
throw new IllegalArgumentException("Supported join values are round or square.");
}
} else if (keyValue[0].equalsIgnoreCase("join")) {
String param = keyValue[1];
if (param.equalsIgnoreCase("bevel")) {
bufferParameters.setJoinStyle(BufferParameters.JOIN_BEVEL);
} else if (param.equalsIgnoreCase("mitre") || param.equalsIgnoreCase("miter")) {
bufferParameters.setJoinStyle(BufferParameters.JOIN_MITRE);
} else if (param.equalsIgnoreCase("round")) {
bufferParameters.setJoinStyle(BufferParameters.JOIN_ROUND);
} else {
throw new IllegalArgumentException("Supported join values are bevel, mitre, miter or round.");
}
} else if (keyValue[0].equalsIgnoreCase("mitre_limit") || keyValue[0].equalsIgnoreCase("miter_limit")) {
bufferParameters.setMitreLimit(Double.valueOf(keyValue[1]));
} else if (keyValue[0].equalsIgnoreCase("quad_segs")) {
bufferParameters.setQuadrantSegments(Integer.valueOf(keyValue[1]));
} else {
throw new IllegalArgumentException("Unknown parameters. Please read the documentation.");
}
}
if (bufferDistance > 0) {
return computePositiveRingBuffer(geom, bufferDistance, numBuffer, bufferParameters, doDifference);
} else if (bufferDistance < 0) {
if (geom instanceof Point) {
throw new SQLException("Cannot compute a negative ring buffer on a point.");
} else {
return computeNegativeRingBuffer(geom, bufferDistance, numBuffer, bufferParameters, doDifference);
}
} else {
return geom;
}
}
}
|
java
|
public void setHeaderFooter(HeaderFooter headerFooter, int displayAt) {
this.mode = MODE_MULTIPLE;
switch(displayAt) {
case RtfHeaderFooter.DISPLAY_ALL_PAGES:
headerAll = new RtfHeaderFooter(this.document, headerFooter, this.type, displayAt);
break;
case RtfHeaderFooter.DISPLAY_FIRST_PAGE:
headerFirst = new RtfHeaderFooter(this.document, headerFooter, this.type, displayAt);
break;
case RtfHeaderFooter.DISPLAY_LEFT_PAGES:
headerLeft = new RtfHeaderFooter(this.document, headerFooter, this.type, displayAt);
break;
case RtfHeaderFooter.DISPLAY_RIGHT_PAGES:
headerRight = new RtfHeaderFooter(this.document, headerFooter, this.type, displayAt);
break;
}
}
|
python
|
def call_somatic(tumor_name, normal_name):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Works from stdin and writes to stdout, finding positions of tumor and normal samples.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
new_headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">\n',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">\n')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
def _output_filter_line(line, indexes):
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh, indexes) and _check_freqs(parts, indexes):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
sys.stdout.write(line)
def _write_header(header):
for hline in header[:-1] + new_headers + [header[-1]]:
sys.stdout.write(hline)
header = []
indexes = None
for line in sys.stdin:
if not indexes:
if line.startswith("#"):
header.append(line)
else:
parts = header[-1].rstrip().split("\t")
indexes = {"tumor": parts.index(tumor_name), "normal": parts.index(normal_name)}
_write_header(header)
_output_filter_line(line, indexes)
else:
_output_filter_line(line, indexes)
# no calls, only output the header
if not indexes:
_write_header(header)
|
java
|
@Deprecated
public RpcInternalContext setLocalAddress(String host, int port) {
if (host == null) {
return this;
}
if (port < 0 || port > 0xFFFF) {
port = 0;
}
// 提前检查是否为空,防止createUnresolved抛出异常,损耗性能
this.localAddress = InetSocketAddress.createUnresolved(host, port);
return this;
}
|
python
|
def outcount(dset,fraction=0.1):
'''gets outlier count and returns ``(list of proportion of outliers by timepoint,total percentage of outlier time points)'''
polort = nl.auto_polort(dset)
info = nl.dset_info(dset)
o = nl.run(['3dToutcount','-fraction','-automask','-polort',polort,dset],stderr=None,quiet=None)
if o.return_code==0 and o.output:
oc = [float(x) for x in o.output.split('\n') if x.strip()!='']
binary_outcount = [x<fraction for x in oc]
perc_outliers = 1 - (sum(binary_outcount)/float(info.reps))
return (oc,perc_outliers)
|
python
|
def get_parent_element(self):
"""Signatures and Audit elements share sub-elements, we need to know which to set attributes on"""
return {AUDIT_REF_STATE: self.context.audit_record,
SIGNATURE_REF_STATE: self.context.signature}[self.ref_state]
|
java
|
private File getKeyPath(final KeyType keyType) {
return new File(Config.getInstance().getDataDirectorty()
+ File.separator
+ "keys" + File.separator
+ keyType.name().toLowerCase() + ".key");
}
|
java
|
private void visitFrame(final Frame f) {
int i, t;
int nTop = 0;
int nLocal = 0;
int nStack = 0;
int[] locals = f.inputLocals;
int[] stacks = f.inputStack;
// computes the number of locals (ignores TOP types that are just after
// a LONG or a DOUBLE, and all trailing TOP types)
for (i = 0; i < locals.length; ++i) {
t = locals[i];
if (t == Frame.TOP) {
++nTop;
} else {
nLocal += nTop + 1;
nTop = 0;
}
if (t == Frame.LONG || t == Frame.DOUBLE) {
++i;
}
}
// computes the stack size (ignores TOP types that are just after
// a LONG or a DOUBLE)
for (i = 0; i < stacks.length; ++i) {
t = stacks[i];
++nStack;
if (t == Frame.LONG || t == Frame.DOUBLE) {
++i;
}
}
// visits the frame and its content
startFrame(f.owner.position, nLocal, nStack);
for (i = 0; nLocal > 0; ++i, --nLocal) {
t = locals[i];
frame[frameIndex++] = t;
if (t == Frame.LONG || t == Frame.DOUBLE) {
++i;
}
}
for (i = 0; i < stacks.length; ++i) {
t = stacks[i];
frame[frameIndex++] = t;
if (t == Frame.LONG || t == Frame.DOUBLE) {
++i;
}
}
endFrame();
}
|
python
|
def cmdline(argv, flags):
"""A cmdopts wrapper that takes a list of flags and builds the
corresponding cmdopts rules to match those flags."""
rules = dict([(flag, {'flags': ["--%s" % flag]}) for flag in flags])
return parse(argv, rules)
|
java
|
public void pauseJob (@Nonnull final TriggerKey aTriggerKey)
{
ValueEnforcer.notNull (aTriggerKey, "TriggerKey");
try
{
m_aScheduler.pauseTrigger (aTriggerKey);
LOGGER.info ("Succesfully paused job with TriggerKey " + aTriggerKey.toString ());
}
catch (final SchedulerException ex)
{
LOGGER.error ("Failed to pause job with TriggerKey " + aTriggerKey.toString (), ex);
}
}
|
java
|
@DataRehashed
public void dataRehashed(DataRehashedEvent<K, V> event) {
ConsistentHash startHash = event.getConsistentHashAtStart();
ConsistentHash endHash = event.getConsistentHashAtEnd();
boolean trace = log.isTraceEnabled();
if (startHash != null && endHash != null) {
if (trace) {
log.tracef("Data rehash occurred startHash: %s and endHash: %s with new topology %s and was pre %s", startHash, endHash,
event.getNewTopologyId(), event.isPre());
}
if (!changeListener.isEmpty()) {
if (trace) {
log.tracef("Previous segments %s ", startHash.getSegmentsForOwner(localAddress));
log.tracef("After segments %s ", endHash.getSegmentsForOwner(localAddress));
}
// we don't care about newly added segments, since that means our run wouldn't include them anyways
IntSet beforeSegments = IntSets.mutableFrom(startHash.getSegmentsForOwner(localAddress));
// Now any that were there before but aren't there now should be added - we don't care about new segments
// since our current request shouldn't be working on it - it will have to retrieve it later
beforeSegments.removeAll(endHash.getSegmentsForOwner(localAddress));
if (!beforeSegments.isEmpty()) {
// We have to make sure all current listeners get the newest hashes updated. This has to occur for
// new nodes and nodes leaving as the hash segments will change in both cases.
for (Map.Entry<Object, SegmentListener> entry : changeListener.entrySet()) {
if (trace) {
log.tracef("Notifying %s through SegmentChangeListener", entry.getKey());
}
entry.getValue().lostSegments(beforeSegments);
}
} else if (trace) {
log.tracef("No segments have been removed from data rehash, no notification required");
}
} else if (trace) {
log.tracef("No change listeners present!");
}
}
}
|
java
|
public ApplicationGetOptions withOcpDate(DateTime ocpDate) {
if (ocpDate == null) {
this.ocpDate = null;
} else {
this.ocpDate = new DateTimeRfc1123(ocpDate);
}
return this;
}
|
java
|
public synchronized void shutdown() {
checkIsRunning();
try {
beanManager().fireEvent(new ContainerBeforeShutdown(id), BeforeDestroyed.Literal.APPLICATION);
} finally {
discard(id);
// Destroy all the dependent beans correctly
creationalContext.release();
beanManager().fireEvent(new ContainerShutdown(id), Destroyed.Literal.APPLICATION);
bootstrap.shutdown();
WeldSELogger.LOG.weldContainerShutdown(id);
}
}
|
java
|
public List<Example> actionCells(Example row)
{
List<Example> checkRow = new ArrayList<Example>();
checkRow.add(row.at(0, 1));
return checkRow;
}
|
java
|
protected void record( final Block block,
final Node blockNode ) throws Exception {
if (block != null) {
@SuppressWarnings( "unchecked" )
final List<Statement> statements = block.statements();
if ((statements != null) && !statements.isEmpty()) {
for (final Statement statement : statements) {
// TODO handle each type of statement
final Node stmtNode = blockNode.addNode(ClassFileSequencerLexicon.STATEMENT,
ClassFileSequencerLexicon.STATEMENT);
stmtNode.setProperty(ClassFileSequencerLexicon.CONTENT, statement.toString());
recordSourceReference(statement, stmtNode);
}
}
}
}
|
python
|
def cpu_halt_reasons(self):
"""Retrives the reasons that the CPU was halted.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of ``JLInkMOEInfo`` instances specifying the reasons for which
the CPU was halted. This list may be empty in the case that the CPU
is not halted.
Raises:
JLinkException: on hardware error.
"""
buf_size = self.MAX_NUM_MOES
buf = (structs.JLinkMOEInfo * buf_size)()
num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)
if num_reasons < 0:
raise errors.JLinkException(num_reasons)
return list(buf)[:num_reasons]
|
python
|
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
|
python
|
def returns_cumulative(returns, geometric=True, expanding=False):
""" return the cumulative return
Parameters
----------
returns : DataFrame or Series
geometric : bool, default is True
If True, geometrically link returns
expanding : bool default is False
If True, return expanding series/frame of returns
If False, return the final value(s)
"""
if expanding:
if geometric:
return (1. + returns).cumprod() - 1.
else:
return returns.cumsum()
else:
if geometric:
return (1. + returns).prod() - 1.
else:
return returns.sum()
|
python
|
def setup_launch_parser(self, parser):
"""Setup the given parser for the launch command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.launch)
parser.add_argument("addon", help="The jukebox addon to launch. The addon should be a standalone plugin.")
|
java
|
public int setProperties(Map<String,Object> properties)
{
return this.setProperties(properties, DBConstants.DISPLAY, DBConstants.SCREEN_MOVE);
}
|
java
|
public List<EventColumn> getUpdatedKeys() {
List<EventColumn> columns = new ArrayList<EventColumn>();
for (EventColumn column : this.keys) {
if (column.isUpdate()) {
columns.add(column);
}
}
return columns;
}
|
java
|
public static void waitForDie(Thread thread) {
boolean dead = false;
do {
try {
thread.join();
dead = true;
} catch (InterruptedException e) {
// ignore
}
} while (!dead);
}
|
python
|
def _get_lantern_format(self, df):
""" Feature slice view browser expects data in the format of:
{"metricValues": {"count": 12, "accuracy": 1.0}, "feature": "species:Iris-setosa"}
{"metricValues": {"count": 11, "accuracy": 0.72}, "feature": "species:Iris-versicolor"}
...
This function converts a DataFrame to such format.
"""
if ('count' not in df) or ('feature' not in df):
raise Exception('No "count" or "feature" found in data.')
if len(df.columns) < 3:
raise Exception('Need at least one metrics column.')
if len(df) == 0:
raise Exception('Data is empty')
data = []
for _, row in df.iterrows():
metric_values = dict(row)
feature = metric_values.pop('feature')
data.append({'feature': feature, 'metricValues': metric_values})
return data
|
java
|
private Response serveOneOrAll(Map<String, Model> modelsMap) {
// returns empty sets if !this.find_compatible_frames
Pair<Map<String, Frame>, Map<String, Set<String>>> frames_info = fetchFrames();
Map<String, Frame> all_frames = frames_info.getFirst();
Map<String, Set<String>> all_frames_cols = frames_info.getSecond();
Map<String, ModelSummary> modelSummaries = Models.generateModelSummaries(null, modelsMap, find_compatible_frames, all_frames, all_frames_cols);
Map resultsMap = new LinkedHashMap();
resultsMap.put("models", modelSummaries);
// If find_compatible_frames then include a map of the Frame summaries. Should we put this on a separate switch?
if (this.find_compatible_frames) {
Set<String> all_referenced_frames = new TreeSet<String>();
for (Map.Entry<String, ModelSummary> entry: modelSummaries.entrySet()) {
ModelSummary summary = entry.getValue();
all_referenced_frames.addAll(summary.compatible_frames);
}
Map<String, FrameSummary> frameSummaries = Frames.generateFrameSummaries(all_referenced_frames, all_frames, false, null, null);
resultsMap.put("frames", frameSummaries);
}
// TODO: temporary hack to get things going
String json = gson.toJson(resultsMap);
JsonObject result = gson.fromJson(json, JsonElement.class).getAsJsonObject();
return Response.done(result);
}
|
java
|
@Override
protected T doSwitch(int classifierID, EObject theEObject)
{
switch (classifierID)
{
case SimpleExpressionsPackage.IF_CONDITION:
{
IfCondition ifCondition = (IfCondition)theEObject;
T result = caseIfCondition(ifCondition);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.EXPRESSION:
{
Expression expression = (Expression)theEObject;
T result = caseExpression(expression);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.NUMBER_LITERAL:
{
NumberLiteral numberLiteral = (NumberLiteral)theEObject;
T result = caseNumberLiteral(numberLiteral);
if (result == null) result = caseExpression(numberLiteral);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.BOOLEAN_LITERAL:
{
BooleanLiteral booleanLiteral = (BooleanLiteral)theEObject;
T result = caseBooleanLiteral(booleanLiteral);
if (result == null) result = caseExpression(booleanLiteral);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.METHOD_CALL:
{
MethodCall methodCall = (MethodCall)theEObject;
T result = caseMethodCall(methodCall);
if (result == null) result = caseExpression(methodCall);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.OR_EXPRESSION:
{
OrExpression orExpression = (OrExpression)theEObject;
T result = caseOrExpression(orExpression);
if (result == null) result = caseExpression(orExpression);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.AND_EXPRESSION:
{
AndExpression andExpression = (AndExpression)theEObject;
T result = caseAndExpression(andExpression);
if (result == null) result = caseExpression(andExpression);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.COMPARISON:
{
Comparison comparison = (Comparison)theEObject;
T result = caseComparison(comparison);
if (result == null) result = caseExpression(comparison);
if (result == null) result = defaultCase(theEObject);
return result;
}
case SimpleExpressionsPackage.NOT_EXPRESSION:
{
NotExpression notExpression = (NotExpression)theEObject;
T result = caseNotExpression(notExpression);
if (result == null) result = caseExpression(notExpression);
if (result == null) result = defaultCase(theEObject);
return result;
}
default: return defaultCase(theEObject);
}
}
|
java
|
protected base_resource[] get_nitro_bulk_response(nitro_service service, String response) throws Exception
{
ns_config_diff_responses result = (ns_config_diff_responses) service.get_payload_formatter().string_to_resource(ns_config_diff_responses.class, response);
if(result.errorcode != 0)
{
if (result.errorcode == SESSION_NOT_EXISTS)
service.clear_session();
throw new nitro_exception(result.message, result.errorcode, (base_response [])result.ns_config_diff_response_array);
}
ns_config_diff[] result_ns_config_diff = new ns_config_diff[result.ns_config_diff_response_array.length];
for(int i = 0; i < result.ns_config_diff_response_array.length; i++)
{
result_ns_config_diff[i] = result.ns_config_diff_response_array[i].ns_config_diff[0];
}
return result_ns_config_diff;
}
|
python
|
def pdf_preprocess(pdf, batch=False):
"""
Load pdfs from local filepath if not already b64 encoded
"""
if batch:
return [pdf_preprocess(doc, batch=False) for doc in pdf]
if os.path.isfile(pdf):
# a filepath is provided, read and encode
return b64encode(open(pdf, 'rb').read())
else:
# assume pdf is already b64 encoded
return pdf
|
python
|
def _validate(self):
"""
Validate the input data.
"""
if self.data_format is FormatType.PYTHON:
self.data = self.raw_data
elif self.data_format is FormatType.JSON:
self._validate_json()
elif self.data_format is FormatType.YAML:
self._validate_yaml()
|
java
|
public static String[] getMethodVariableNames(final Class<?> clazz, final String targetMethodName, final Class<?>[] types) {
CtClass cc;
CtMethod cm = null;
try {
if (null == CLASS_POOL.find(clazz.getName())) {
CLASS_POOL.insertClassPath(new ClassClassPath(clazz));
}
cc = CLASS_POOL.get(clazz.getName());
final CtClass[] ptypes = new CtClass[types.length];
for (int i = 0; i < ptypes.length; i++) {
ptypes[i] = CLASS_POOL.get(types[i].getName());
}
cm = cc.getDeclaredMethod(targetMethodName, ptypes);
} catch (final NotFoundException e) {
LOGGER.log(Level.ERROR, "Get method variable names failed", e);
}
if (null == cm) {
return new String[types.length];
}
final MethodInfo methodInfo = cm.getMethodInfo();
final CodeAttribute codeAttribute = methodInfo.getCodeAttribute();
final LocalVariableAttribute attr = (LocalVariableAttribute) codeAttribute.getAttribute(LocalVariableAttribute.tag);
String[] variableNames = new String[0];
try {
variableNames = new String[cm.getParameterTypes().length];
} catch (final NotFoundException e) {
LOGGER.log(Level.ERROR, "Get method variable names failed", e);
}
// final int staticIndex = Modifier.isStatic(cm.getModifiers()) ? 0 : 1;
int j = -1;
String variableName = null;
Boolean ifkill = false;
while (!"this".equals(variableName)) {
j++;
variableName = attr.variableName(j);
// to prevent heap error when there being some unknown reasons to resolve the VariableNames
if (j > 99) {
LOGGER.log(Level.WARN,
"Maybe resolve to VariableNames error [class=" + clazz.getName() + ", targetMethodName=" + targetMethodName + ']');
ifkill = true;
break;
}
}
if (!ifkill) {
for (int i = 0; i < variableNames.length; i++) {
variableNames[i] = attr.variableName(++j);
}
}
return variableNames;
}
|
java
|
@Override
public GetLifecyclePoliciesResult getLifecyclePolicies(GetLifecyclePoliciesRequest request) {
request = beforeClientExecution(request);
return executeGetLifecyclePolicies(request);
}
|
python
|
def read_tpld_stats(self):
"""
:return: dictionary {tpld index {group name {stat name: value}}}.
Sea XenaTpld.stats_captions.
"""
payloads_stats = OrderedDict()
for tpld in self.tplds.values():
payloads_stats[tpld] = tpld.read_stats()
return payloads_stats
|
java
|
public EClass getIfcFace() {
if (ifcFaceEClass == null) {
ifcFaceEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI).getEClassifiers()
.get(220);
}
return ifcFaceEClass;
}
|
java
|
public ISynchronizationPoint<IOException> write(char c) {
if (!(stream instanceof ICharacterStream.Writable.Buffered))
return write(new char[] { c }, 0, 1);
ISynchronizationPoint<IOException> last = lastWrite;
if (last.isUnblocked()) {
lastWrite = ((ICharacterStream.Writable.Buffered)stream).writeAsync(c);
return lastWrite;
}
SynchronizationPoint<IOException> ours = new SynchronizationPoint<>();
lastWrite = ours;
last.listenInline(() -> { ((ICharacterStream.Writable.Buffered)stream).writeAsync(c).listenInline(ours); }, ours);
return ours;
}
|
java
|
public void compose(StylesheetRoot sroot) throws TransformerException
{
super.compose(sroot);
int length = getSortElemCount();
for (int i = 0; i < length; i++)
{
getSortElem(i).compose(sroot);
}
java.util.Vector vnames = sroot.getComposeState().getVariableNames();
if (null != m_selectExpression)
m_selectExpression.fixupVariables(
vnames, sroot.getComposeState().getGlobalsSize());
else
{
m_selectExpression =
getStylesheetRoot().m_selectDefault.getExpression();
}
}
|
python
|
def _remove_bound_conditions(agent, keep_criterion):
"""Removes bound conditions of agent such that keep_criterion is False.
Parameters
----------
agent: Agent
The agent whose bound conditions we evaluate
keep_criterion: function
Evaluates removal_criterion(a) for each agent a in a bound condition
and if it evaluates to False, removes a from agent's bound_conditions
"""
new_bc = []
for ind in range(len(agent.bound_conditions)):
if keep_criterion(agent.bound_conditions[ind].agent):
new_bc.append(agent.bound_conditions[ind])
agent.bound_conditions = new_bc
|
python
|
def process_affinity(affinity=None):
"""Get or set the CPU affinity set for the current process.
This will affect all future threads spawned by this process. It is
implementation-defined whether it will also affect previously-spawned
threads.
"""
if affinity is not None:
affinity = CPUSet(affinity)
if not affinity.issubset(system_affinity()):
raise ValueError("unknown cpus: %s" % affinity)
return system_affinity()
|
java
|
static Integer convertToInt(SessionInterface session, Object a, int type) {
int value;
if (a instanceof Integer) {
if (type == Types.SQL_INTEGER) {
return (Integer) a;
}
value = ((Integer) a).intValue();
} else if (a instanceof Long) {
long temp = ((Long) a).longValue();
if (Integer.MAX_VALUE < temp || temp < Integer.MIN_VALUE) {
throw Error.error(ErrorCode.X_22003);
}
value = (int) temp;
} else if (a instanceof BigDecimal) {
BigDecimal bd = ((BigDecimal) a);
if (bd.compareTo(MAX_INT) > 0 || bd.compareTo(MIN_INT) < 0) {
throw Error.error(ErrorCode.X_22003);
}
value = bd.intValue();
} else if (a instanceof Double || a instanceof Float) {
double d = ((Number) a).doubleValue();
if (session instanceof Session) {
if (!((Session) session).database.sqlConvertTruncate) {
d = java.lang.Math.rint(d);
}
}
if (Double.isInfinite(d) || Double.isNaN(d)
|| d >= (double) Integer.MAX_VALUE + 1
|| d <= (double) Integer.MIN_VALUE - 1) {
throw Error.error(ErrorCode.X_22003);
}
value = (int) d;
} else {
throw Error.error(ErrorCode.X_42561);
}
if (type == Types.TINYINT) {
if (Byte.MAX_VALUE < value || value < Byte.MIN_VALUE) {
throw Error.error(ErrorCode.X_22003);
}
} else if (type == Types.SQL_SMALLINT) {
if (Short.MAX_VALUE < value || value < Short.MIN_VALUE) {
throw Error.error(ErrorCode.X_22003);
}
}
return Integer.valueOf(value);
}
|
java
|
@Override
protected FaxJobStatus getFaxJobStatusImpl(FaxJob faxJob)
{
//get fax job ID
int faxJobIDInt=WindowsFaxClientSpiHelper.getFaxJobID(faxJob);
//invoke fax action
String faxJobStatusStr=this.winGetFaxJobStatus(this.faxServerName,faxJobIDInt);
//get fax job status
FaxJobStatus faxJobStatus=WindowsFaxClientSpiHelper.getFaxJobStatusFromWindowsFaxJobStatusString(faxJobStatusStr);
return faxJobStatus;
}
|
java
|
public void setContent(final String contentString, final String type) {
final Content newContent = new Content();
newContent.setType(type == null ? Content.HTML : type);
newContent.setValue(contentString);
final ArrayList<Content> contents = new ArrayList<Content>();
contents.add(newContent);
setContents(contents);
}
|
java
|
@SuppressWarnings("unchecked")
public List<Page> getPermittedChildren(Page page)
{
return gpUtil.getContentPermissionManager().getPermittedChildren(page, getRemoteUser());
}
|
java
|
public static <E extends HTMLElement> HtmlContentBuilder<E> htmlElement(String tag, Class<E> type) {
return new HtmlContentBuilder<>(createElement(tag, type));
}
|
java
|
@Override
public boolean containsKey(Object o) {
@SuppressWarnings("unchecked")
final K key = (K) o;
return binarySearchInArray(key) >= 0 || overflowEntries.containsKey(key);
}
|
java
|
public static void storeTracerInfo(ClientRequestContext requestContext) {
// tracer信息放入request 发到服务端
SofaTraceContext sofaTraceContext = SofaTraceContextHolder.getSofaTraceContext();
SofaTracerSpan clientSpan = sofaTraceContext.getCurrentSpan();
RpcInternalContext context = RpcInternalContext.getContext();
if (clientSpan != null) {
requestContext.getHeaders().add(RemotingConstants.NEW_RPC_TRACE_NAME,
clientSpan.getSofaTracerSpanContext().serializeSpanContext());
}
// 客户端发送自己的应用名
String appName = (String) context.getAttachment(INTERNAL_KEY_APP_NAME);
if (appName != null) {
requestContext.getHeaders().add(RemotingConstants.HEAD_APP_NAME, appName);
}
}
|
python
|
def _request_helper(self, url, params, method):
'''API request helper method'''
try:
if method == 'POST':
return self._request_post_helper(url, params)
elif method == 'GET':
return self._request_get_helper(url, params)
raise VultrError('Unsupported method %s' % method)
except requests.RequestException as ex:
raise RuntimeError(ex)
|
python
|
def logpdf(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the log likelihood (log pdf) using it
.. math:
\\log p(y|\\lambda(f))
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: log likelihood evaluated for this point
:rtype: float
"""
if isinstance(self.gp_link, link_functions.Identity):
return self.logpdf_link(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata)
|
python
|
def get_gateway_url(self, request):
"""
Routes a payment to Gateway, should return URL for redirection.
"""
params = {
'id': self.get_backend_setting('id'),
'description': self.get_order_description(self.payment, self.payment.order),
'amount': self.payment.amount,
'currency': self.payment.currency,
'type': 0, # 0 = show "return" button after finished payment
'control': self.payment.pk,
'URL': self.get_URL(self.payment.pk),
'URLC': self.get_URLC(),
'api_version': 'dev',
}
user_data = {
'email': None,
'lang': None,
}
signals.user_data_query.send(sender=None, order=self.payment.order, user_data=user_data)
if user_data['email']:
params['email'] = user_data['email']
if user_data['lang'] and user_data['lang'].lower() in self._ACCEPTED_LANGS:
params['lang'] = user_data['lang'].lower()
elif self.get_backend_setting('lang', False
) and self.get_backend_setting('lang').lower() in self._ACCEPTED_LANGS:
params['lang'] = self.get_backend_setting('lang').lower()
if self.get_backend_setting('onlinetransfer', False):
params['onlinetransfer'] = 1
if self.get_backend_setting('p_email', False):
params['p_email'] = self.get_backend_setting('p_email')
if self.get_backend_setting('p_info', False):
params['p_info'] = self.get_backend_setting('p_info')
if self.get_backend_setting('tax', False):
params['tax'] = 1
gateway_url = self.get_backend_setting('gateway_url', self._GATEWAY_URL)
if self.get_backend_setting('method', 'get').lower() == 'post':
return gateway_url, 'POST', params
elif self.get_backend_setting('method', 'get').lower() == 'get':
for key in params.keys():
params[key] = six.text_type(params[key]).encode('utf-8')
return gateway_url + '?' + urlencode(params), "GET", {}
else:
raise ImproperlyConfigured('Dotpay payment backend accepts only GET or POST')
|
java
|
public static Method getSetterPropertyMethod(Class<?> type,
String propertyName) {
String sourceMethodName = "set"
+ BeanUtils.capitalizePropertyName(propertyName);
Method sourceMethod = BeanUtils.getMethod(type, sourceMethodName);
return sourceMethod;
}
|
python
|
def feed_data(self, data: bytes) -> None:
"""
代理 feed_data
"""
if self._parser is not None:
self._parser.feed_data(data)
|
java
|
private void onWindowHit(Node node) {
admittor.record(node.key);
node.moveToTail(headWindow);
}
|
python
|
def _write_result_handler(self, routine):
"""
Generates code for calling the stored routine in the wrapper method.
"""
self._write_line('ret = {}')
self._write_execute_rows(routine)
self._write_line('for row in rows:')
num_of_dict = len(routine['columns'])
i = 0
while i < num_of_dict:
value = "row['{0!s}']".format(routine['columns'][i])
stack = ''
j = 0
while j < i:
stack += "[row['{0!s}']]".format(routine['columns'][j])
j += 1
line = 'if {0!s} in ret{1!s}:'.format(value, stack)
self._write_line(line)
i += 1
line = "raise Exception('Duplicate key for %s.' % str(({0!s})))". \
format(", ".join(["row['{0!s}']".format(column_name) for column_name in routine['columns']]))
self._write_line(line)
self._indent_level_down()
i = num_of_dict
while i > 0:
self._write_line('else:')
part1 = ''
j = 0
while j < i - 1:
part1 += "[row['{0!s}']]".format(routine['columns'][j])
j += 1
part1 += "[row['{0!s}']]".format(routine['columns'][j])
part2 = ''
j = i - 1
while j < num_of_dict:
if j + 1 != i:
part2 += "{{row['{0!s}']: ".format(routine['columns'][j])
j += 1
part2 += "row" + ('}' * (num_of_dict - i))
line = "ret{0!s} = {1!s}".format(part1, part2)
self._write_line(line)
self._indent_level_down()
if i > 1:
self._indent_level_down()
i -= 1
self._write_line()
self._write_line('return ret')
|
java
|
public FutureOperation subscribeTicker(final BitfinexTickerSymbol tickerSymbol)
throws BitfinexClientException {
final FutureOperation future = new FutureOperation(tickerSymbol);
pendingSubscribes.registerFuture(future);
final SubscribeTickerCommand command = new SubscribeTickerCommand(tickerSymbol);
client.sendCommand(command);
return future;
}
|
java
|
private void readProjectProperties(Gantt gantt)
{
Gantt.File file = gantt.getFile();
ProjectProperties props = m_projectFile.getProjectProperties();
props.setLastSaved(file.getSaved());
props.setCreationDate(file.getCreated());
props.setName(file.getName());
}
|
java
|
public static Matrix fromCSV(String csv) {
StringTokenizer lines = new StringTokenizer(csv, "\n");
Matrix result = DenseMatrix.zero(10, 10);
int rows = 0;
int columns = 0;
while (lines.hasMoreTokens()) {
if (result.rows() == rows) {
result = result.copyOfRows((rows * 3) / 2 + 1);
}
StringTokenizer elements = new StringTokenizer(lines.nextToken(), ", ");
int j = 0;
while (elements.hasMoreElements()) {
if (j == result.columns()) {
result = result.copyOfColumns((j * 3) / 2 + 1);
}
double x = Double.parseDouble(elements.nextToken());
result.set(rows, j++, x);
}
rows++;
columns = j > columns ? j : columns;
}
return result.copyOfShape(rows, columns);
}
|
python
|
def reply_to(self) -> Optional[Sequence[AddressHeader]]:
"""The ``Reply-To`` header."""
try:
return cast(Sequence[AddressHeader], self[b'reply-to'])
except KeyError:
return None
|
java
|
public static void main(String... args) {
CommandLineRunner runner = new CommandLineRunner();
try {
runner.run(args);
} catch (Exception t) {
System.err.println(t.getMessage());
System.err.println("Try '--help' for more information.");
t.printStackTrace(System.err);
System.exit(1);
}
System.exit(0);
}
|
python
|
def get_namespace_by_preorder( self, preorder_hash ):
"""
Given a namespace preorder hash, get the associated namespace
reveal or ready (it may be expired).
"""
cur = self.db.cursor()
return namedb_get_namespace_by_preorder_hash( cur, preorder_hash )
|
java
|
public R uploadAndFinish(InputStream in) throws X, DbxException, IOException
{
return start().uploadAndFinish(in);
}
|
python
|
def peek(self, offset=0):
""" Looking forward in the input text without actually stepping the current position.
returns None if the current position is at the end of the input. """
pos = self.pos + offset
if pos >= self.end:
return None
return self.text[pos]
|
java
|
public static int getDataType(String content) {
String text = content.trim();
if(text.length() < 1) {
return 9;
}
int i = 0;
int d = 0;
int e = 0;
char c = text.charAt(0);
int length = text.length();
if(c == '+' || c == '-') {
i++;
}
if(c == 't') {
if(text.equals("treu")) {
return 1;
}
else {
return 9;
}
}
if(c == 'f') {
if(text.equals("treu")) {
return 1;
}
else {
return 9;
}
}
if(c == '.') {
d = 1;
i++;
}
c = text.charAt(i);
if(!Character.isDigit(c)) {
return 9;
}
for(; i < length; i++) {
c = text.charAt(i);
if(Character.isDigit(c)) {
continue;
}
if(c == '.') {
if(d == 1 || e == 1) {
/**
* String
*/
return 9;
}
d = 1;
continue;
}
if(c == 'e' || c == 'E') {
if(e == 1) {
/**
* String
*/
return 9;
}
e = 1;
continue;
}
if(c == 'f' || c == 'F') {
if(i == length - 1) {
return 4;
}
else {
return 9;
}
}
if(c == 'd' || c == 'D') {
if(i == length - 1) {
return 4;
}
else {
return 9;
}
}
if(c == 'l' || c == 'L') {
if(d == 0 && e == 0 && i == length - 1) {
return 5;
}
else {
return 9;
}
}
return 9;
}
return ((d == 0 && e == 0) ? 2 : 4);
}
|
java
|
private void updateRequiredOptions(Option opt) throws ParseException
{
// if the option is a required option remove the option from
// the requiredOptions list
if (opt.isRequired())
{
getRequiredOptions().remove(opt.getKey());
}
// if the option is in an OptionGroup make that option the selected
// option of the group
if (getOptions().getOptionGroup(opt) != null)
{
OptionGroup group = getOptions().getOptionGroup(opt);
if (group.isRequired())
{
getRequiredOptions().remove(group);
}
group.setSelected(opt);
}
}
|
java
|
public RandomVariableInterface barrier(RandomVariableInterface trigger, RandomVariableInterface valueIfTriggerNonNegative, RandomVariableInterface valueIfTriggerNegative) {
// Set time of this random variable to maximum of time with respect to which measurability is known.
double newTime = Math.max(time, trigger.getFiltrationTime());
newTime = Math.max(newTime, valueIfTriggerNonNegative.getFiltrationTime());
newTime = Math.max(newTime, valueIfTriggerNegative.getFiltrationTime());
if(isDeterministic() && trigger.isDeterministic() && valueIfTriggerNonNegative.isDeterministic() && valueIfTriggerNegative.isDeterministic()) {
double newValueIfNonStochastic = trigger.get(0) >= 0 ? valueIfTriggerNonNegative.get(0) : valueIfTriggerNegative.get(0);
return new RandomVariableLowMemory(newTime, newValueIfNonStochastic);
}
else {
int numberOfPaths = Math.max(Math.max(trigger.size(), valueIfTriggerNonNegative.size()), valueIfTriggerNegative.size());
double[] newRealizations = new double[numberOfPaths];
for(int i=0; i<newRealizations.length; i++) {
newRealizations[i] = trigger.get(i) >= 0.0 ? valueIfTriggerNonNegative.get(i) : valueIfTriggerNegative.get(i);
}
return new RandomVariableLowMemory(newTime, newRealizations);
}
}
|
java
|
public static HtmlTree SCRIPT() {
HtmlTree htmltree = new HtmlTree(HtmlTag.SCRIPT);
htmltree.addAttr(HtmlAttr.TYPE, "text/javascript");
return htmltree;
}
|
python
|
def explain_prediction_df(estimator, doc, **kwargs):
# type: (...) -> pd.DataFrame
""" Explain prediction and export explanation to ``pandas.DataFrame``
All keyword arguments are passed to :func:`eli5.explain_prediction`.
Weights of all features are exported by default.
"""
kwargs = _set_defaults(kwargs)
return format_as_dataframe(
eli5.explain_prediction(estimator, doc, **kwargs))
|
python
|
def onStart(self):
""" Override onStart method for npyscreen """
curses.mousemask(0)
self.paths.host_config()
version = Version()
# setup initial runtime stuff
if self.first_time[0] and self.first_time[1] != 'exists':
system = System()
thr = Thread(target=system.start, args=(), kwargs={})
thr.start()
countdown = 60
while thr.is_alive():
npyscreen.notify_wait('Completing initialization:...' + str(countdown),
title='Setting up things...')
time.sleep(1)
countdown -= 1
thr.join()
quit_s = '\t'*4 + '^Q to quit'
tab_esc = '\t'*4 + 'ESC to close menu popup'
self.addForm('MAIN',
MainForm,
name='Vent ' + version +
'\t\t\t\t\t^T for help' + quit_s + tab_esc,
color='IMPORTANT')
self.addForm('HELP',
HelpForm,
name='Help\t\t\t\t\t\t\t\t^T to toggle previous' +
quit_s,
color='DANGER')
self.addForm('TUTORIALINTRO',
TutorialIntroForm,
name='Vent Tutorial' + quit_s,
color='DANGER')
self.addForm('TUTORIALBACKGROUND',
TutorialBackgroundForm,
name='About Vent' + quit_s,
color='DANGER')
self.addForm('TUTORIALTERMINOLOGY',
TutorialTerminologyForm,
name='About Vent' + quit_s,
color='DANGER')
self.addForm('TUTORIALGETTINGSETUP',
TutorialGettingSetupForm,
name='About Vent' + quit_s,
color='DANGER')
self.addForm('TUTORIALSTARTINGCORES',
TutorialStartingCoresForm,
name='Working with Cores' + quit_s,
color='DANGER')
self.addForm('TUTORIALADDINGPLUGINS',
TutorialAddingPluginsForm,
name='Working with Plugins' + quit_s,
color='DANGER')
self.addForm('TUTORIALADDINGFILES',
TutorialAddingFilesForm,
name='Files' + quit_s,
color='DANGER')
self.addForm('TUTORIALTROUBLESHOOTING',
TutorialTroubleshootingForm,
name='Troubleshooting' + quit_s,
color='DANGER')
|
python
|
def add_to_space_size(self, addition_bytes):
# type: (int) -> None
'''
A method to add bytes to the space size tracked by this Volume
Descriptor.
Parameters:
addition_bytes - The number of bytes to add to the space size.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# The 'addition' parameter is expected to be in bytes, but the space
# size we track is in extents. Round up to the next extent.
self.space_size += utils.ceiling_div(addition_bytes, self.log_block_size)
|
java
|
@Override
public void put(String id, Session session) throws Exception {
if (id == null || session == null) {
throw new IllegalArgumentException("Put key=" + id + " session=" + (session == null ? "null" : session.getId()));
}
try (Lock ignored = session.lock()) {
if (!session.isValid()) {
return;
}
if (sessionDataStore == null) {
if (log.isDebugEnabled()) {
log.debug("No SessionDataStore, putting into SessionCache only id=" + id);
}
session.setResident(true);
if (doPutIfAbsent(id, session) == null) { // ensure it is in our map
session.updateInactivityTimer();
}
return;
}
// don't do anything with the session until the last request for it has finished
if ((session.getRequests() <= 0)) {
// save the session
if (!sessionDataStore.isPassivating()) {
// if our backing datastore isn't the passivating kind, just save the session
sessionDataStore.store(id, session.getSessionData());
// if we evict on session exit, boot it from the cache
if (getEvictionPolicy() == EVICT_ON_SESSION_EXIT) {
if (log.isDebugEnabled()) {
log.debug("Eviction on request exit id=" + id);
}
doDelete(session.getId());
session.setResident(false);
} else {
session.setResident(true);
if (doPutIfAbsent(id,session) == null) { // ensure it is in our map
session.updateInactivityTimer();
}
if (log.isDebugEnabled()) {
log.debug("Non passivating SessionDataStore, session in SessionCache only id=" + id);
}
}
} else {
// backing store supports passivation, call the listeners
sessionHandler.willPassivate(session);
if (log.isDebugEnabled()) {
log.debug("Session passivating id=" + id);
}
sessionDataStore.store(id, session.getSessionData());
if (getEvictionPolicy() == EVICT_ON_SESSION_EXIT) {
// throw out the passivated session object from the map
doDelete(id);
session.setResident(false);
if (log.isDebugEnabled()) {
log.debug("Evicted on request exit id=" + id);
}
} else {
// reactivate the session
sessionHandler.didActivate(session);
session.setResident(true);
if (doPutIfAbsent(id,session) == null) // ensure it is in our map
session.updateInactivityTimer();
if (log.isDebugEnabled()) {
log.debug("Session reactivated id=" + id);
}
}
}
} else {
if (log.isDebugEnabled()) {
log.debug("Req count=" + session.getRequests() + " for id=" + id);
}
session.setResident(true);
if (doPutIfAbsent(id, session) == null) {
// ensure it is the map, but don't save it to the backing store until the last request exists
session.updateInactivityTimer();
}
}
}
}
|
java
|
void addRuleRootNodes(List<RBBINode> dest, RBBINode node) {
if (node == null) {
return;
}
if (node.fRuleRoot) {
dest.add(node);
// Note: rules cannot nest. If we found a rule start node,
// no child node can also be a start node.
return;
}
addRuleRootNodes(dest, node.fLeftChild);
addRuleRootNodes(dest, node.fRightChild);
}
|
java
|
public boolean exists(String path) {
if (path == null) {
return false;
}
for (SubdocOperationResult<OPERATION> result : resultList) {
if (path.equals(result.path()) && !(result.value() instanceof Exception)) {
return true;
}
}
return false;
}
|
java
|
@Nullable
public Symbol getSymbolFromString(String symStr) {
symStr = inferBinaryName(symStr);
Name name = getName(symStr);
Modules modules = Modules.instance(context);
boolean modular = modules.getDefaultModule() != getSymtab().noModule;
if (!modular) {
return getSymbolFromString(getSymtab().noModule, name);
}
for (ModuleSymbol msym : Modules.instance(context).allModules()) {
ClassSymbol result = getSymbolFromString(msym, name);
if (result != null) {
// TODO(cushon): the path where we iterate over all modules is probably slow.
// Try to learn some lessons from JDK-8189747, and consider disallowing this case and
// requiring users to call the getSymbolFromString(ModuleSymbol, Name) overload instead.
return result;
}
}
return null;
}
|
java
|
private boolean isTrimEnabled() {
String contentType = response.getContentType();
// If the contentType is the same string (by identity), return the previously determined value.
// This assumes the same string instance is returned by the response when content type not changed between calls.
if(contentType!=isTrimEnabledCacheContentType) {
isTrimEnabledCacheResult =
contentType==null
|| contentType.equals("application/xhtml+xml")
|| contentType.startsWith("application/xhtml+xml;")
|| contentType.equals("text/html")
|| contentType.startsWith("text/html;")
|| contentType.equals("application/xml")
|| contentType.startsWith("application/xml;")
|| contentType.equals("text/xml")
|| contentType.startsWith("text/xml;")
;
isTrimEnabledCacheContentType = contentType;
}
return isTrimEnabledCacheResult;
}
|
python
|
def _get_storage_api(retry_params, account_id=None):
"""Returns storage_api instance for API methods.
Args:
retry_params: An instance of api_utils.RetryParams. If none,
thread's default will be used.
account_id: Internal-use only.
Returns:
A storage_api instance to handle urlfetch work to GCS.
On dev appserver, this instance will talk to a local stub by default.
However, if you pass the arguments --appidentity_email_address and
--appidentity_private_key_path to dev_appserver.py it will attempt to use
the real GCS with these credentials. Alternatively, you can set a specific
access token with common.set_access_token. You can also pass
--default_gcs_bucket_name to set the default bucket.
"""
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
# when running local unit tests, the service account is test@localhost
# from google.appengine.api.app_identity.app_identity_stub.APP_SERVICE_ACCOUNT_NAME
service_account = app_identity.get_service_account_name()
if (common.local_run() and not common.get_access_token()
and (not service_account or service_account.endswith('@localhost'))):
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
|
java
|
@Override
public Message decrypt(SignMessage signMessage) throws DecryptionException {
ServiceableComponent<SignMessageDecryptionService> component = null;
try {
component = service.getServiceableComponent();
if (null == component) {
throw new DecryptionException("SignMessageDecryptionService: Error accessing underlying component: Invalid configuration");
}
else {
final SignMessageDecryptionService svc = component.getComponent();
return svc.decrypt(signMessage);
}
}
finally {
if (null != component) {
component.unpinComponent();
}
}
}
|
python
|
def _init_humidity(self):
"""
Internal. Initialises the humidity sensor via RTIMU
"""
if not self._humidity_init:
self._humidity_init = self._humidity.humidityInit()
if not self._humidity_init:
raise OSError('Humidity Init Failed')
|
java
|
private static String rewriteSoapAddress(SOAPAddressRewriteMetadata sarm, String origAddress, String newAddress, String uriScheme)
{
try
{
URL url = new URL(newAddress);
String path = url.getPath();
String host = sarm.getWebServiceHost();
String port = getDotPortNumber(uriScheme, sarm);
StringBuilder sb = new StringBuilder(uriScheme);
sb.append("://");
sb.append(host);
sb.append(port);
if (isPathRewriteRequired(sarm)) {
sb.append(SEDProcessor.newInstance(sarm.getWebServicePathRewriteRule()).processLine(path));
}
else
{
sb.append(path);
}
final String urlStr = sb.toString();
ADDRESS_REWRITE_LOGGER.addressRewritten(origAddress, urlStr);
return urlStr;
}
catch (MalformedURLException e)
{
ADDRESS_REWRITE_LOGGER.invalidAddressProvidedUseItWithoutRewriting(newAddress, origAddress);
return origAddress;
}
}
|
java
|
@Override
public <T> ByteBuffer serialize(final Serializer<T> serializer, final T value)
throws IOException {
try (final BytesSerialWriter writer = writeBytes()) {
serializer.serialize(writer, value);
return writer.toByteBuffer();
}
}
|
python
|
def send_error_response(self, msgid, methodname, status_code, status_desc,
error_insts=None):
"""Send a CIM-XML response message back to the WBEM server that
indicates error."""
resp_xml = cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEEXPRSP(
cim_xml.EXPMETHODRESPONSE(
methodname,
cim_xml.ERROR(
str(status_code),
status_desc,
error_insts),
), # noqa: E123
), # noqa: E123
msgid, IMPLEMENTED_PROTOCOL_VERSION),
IMPLEMENTED_CIM_VERSION, IMPLEMENTED_DTD_VERSION)
resp_body = '<?xml version="1.0" encoding="utf-8" ?>\n' + \
resp_xml.toxml()
if isinstance(resp_body, six.text_type):
resp_body = resp_body.encode("utf-8")
http_code = 200
self.send_response(http_code, http_client.responses.get(http_code, ''))
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(resp_body)))
self.send_header("CIMExport", "MethodResponse")
self.end_headers()
self.wfile.write(resp_body)
self.log('%s: HTTP status %s; CIM error response: %s: %s',
(self._get_log_prefix(), http_code,
_statuscode2name(status_code), status_desc),
logging.WARNING)
|
java
|
public static void removeActionForm(ActionMapping mapping, HttpServletRequest request) {
if (mapping.getAttribute() != null) {
if ("request".equals(mapping.getScope()))
request.removeAttribute(mapping.getAttribute());
else {
HttpSession session = request.getSession();
session.removeAttribute(mapping.getAttribute());
request.removeAttribute(mapping.getAttribute());
}
}
}
|
java
|
@Transactional(readOnly = true)
public Settings getSettings() {
try {
return (Settings) getEntity(StorageConstants.SETTINGS_ROOT);
} catch (NotFoundException e) {
// should never happen
e.printStackTrace();
LOG.error("Could not read Settings node", e);
return new Settings();
}
}
|
java
|
public OutputStream extractData(
final ClientHttpResponse response
)
throws IOException
{
IoUtil.copy( response.getBody(), _output );
return _output;
}
|
java
|
public BuildableType.Builder mergeFrom(BuildableType value) {
BuildableType_Builder defaults = new BuildableType.Builder();
if (defaults._unsetProperties.contains(Property.TYPE)
|| !Objects.equals(value.type(), defaults.type())) {
type(value.type());
}
if (defaults._unsetProperties.contains(Property.BUILDER_TYPE)
|| !Objects.equals(value.builderType(), defaults.builderType())) {
builderType(value.builderType());
}
if (defaults._unsetProperties.contains(Property.MERGE_BUILDER)
|| !Objects.equals(value.mergeBuilder(), defaults.mergeBuilder())) {
mergeBuilder(value.mergeBuilder());
}
if (defaults._unsetProperties.contains(Property.PARTIAL_TO_BUILDER)
|| !Objects.equals(value.partialToBuilder(), defaults.partialToBuilder())) {
partialToBuilder(value.partialToBuilder());
}
if (defaults._unsetProperties.contains(Property.BUILDER_FACTORY)
|| !Objects.equals(value.builderFactory(), defaults.builderFactory())) {
builderFactory(value.builderFactory());
}
if (defaults._unsetProperties.contains(Property.SUPPRESS_UNCHECKED)
|| !Objects.equals(value.suppressUnchecked(), defaults.suppressUnchecked())) {
suppressUnchecked(value.suppressUnchecked());
}
return (BuildableType.Builder) this;
}
|
java
|
public Matrix4f setPerspectiveLH(float fovy, float aspect, float zNear, float zFar, boolean zZeroToOne) {
MemUtil.INSTANCE.zero(this);
float h = (float) Math.tan(fovy * 0.5f);
this._m00(1.0f / (h * aspect));
this._m11(1.0f / h);
boolean farInf = zFar > 0 && Float.isInfinite(zFar);
boolean nearInf = zNear > 0 && Float.isInfinite(zNear);
if (farInf) {
// See: "Infinite Projection Matrix" (http://www.terathon.com/gdc07_lengyel.pdf)
float e = 1E-6f;
this._m22(1.0f - e);
this._m32((e - (zZeroToOne ? 1.0f : 2.0f)) * zNear);
} else if (nearInf) {
float e = 1E-6f;
this._m22((zZeroToOne ? 0.0f : 1.0f) - e);
this._m32(((zZeroToOne ? 1.0f : 2.0f) - e) * zFar);
} else {
this._m22((zZeroToOne ? zFar : zFar + zNear) / (zFar - zNear));
this._m32((zZeroToOne ? zFar : zFar + zFar) * zNear / (zNear - zFar));
}
this._m23(1.0f);
_properties(PROPERTY_PERSPECTIVE);
return this;
}
|
java
|
public void setPattern(final ConwayPattern pattern) {
final boolean[][] gridData = pattern.getPattern();
int gridWidth = gridData[0].length;
int gridHeight = gridData.length;
int columnOffset = 0;
int rowOffset = 0;
if ( gridWidth > getNumberOfColumns() ) {
gridWidth = getNumberOfColumns();
} else {
columnOffset = (getNumberOfColumns() - gridWidth) / 2;
}
if ( gridHeight > getNumberOfRows() ) {
gridHeight = getNumberOfRows();
} else {
rowOffset = (getNumberOfRows() - gridHeight) / 2;
}
this.delegate.killAll();
for ( int column = 0; column < gridWidth; column++ ) {
for ( int row = 0; row < gridHeight; row++ ) {
if ( gridData[row][column] ) {
final Cell cell = getCellAt( row + rowOffset,
column + columnOffset );
updateCell( cell, CellState.LIVE );
}
}
}
//this.delegate.setPattern();
}
|
java
|
@SafeVarargs
public static <D extends DestroyInterface> void cleanUp(D... destroys) {
JMOptional.getOptional(destroys).map(Arrays::asList)
.ifPresent(Destroyer::cleanUp);
}
|
java
|
public void stopStoppableSession () { //471642
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "stopStoppableSession");
stoppableSessionStopped = true;
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "stopStoppableSession");
}
|
python
|
def load_json_or_yaml(string, is_path=False, file_type='json',
exception=ScriptWorkerTaskException,
message="Failed to load %(file_type)s: %(exc)s"):
"""Load json or yaml from a filehandle or string, and raise a custom exception on failure.
Args:
string (str): json/yaml body or a path to open
is_path (bool, optional): if ``string`` is a path. Defaults to False.
file_type (str, optional): either "json" or "yaml". Defaults to "json".
exception (exception, optional): the exception to raise on failure.
If None, don't raise an exception. Defaults to ScriptWorkerTaskException.
message (str, optional): the message to use for the exception.
Defaults to "Failed to load %(file_type)s: %(exc)s"
Returns:
dict: the data from the string.
Raises:
Exception: as specified, on failure
"""
if file_type == 'json':
_load_fh = json.load
_load_str = json.loads
else:
_load_fh = yaml.safe_load
_load_str = yaml.safe_load
try:
if is_path:
with open(string, 'r') as fh:
contents = _load_fh(fh)
else:
contents = _load_str(string)
return contents
except (OSError, ValueError, yaml.scanner.ScannerError) as exc:
if exception is not None:
repl_dict = {'exc': str(exc), 'file_type': file_type}
raise exception(message % repl_dict)
|
java
|
private static List<String> getFilesSafeForUninstall(AddOn addOn, Set<AddOn> installedAddOns) {
if (addOn.getFiles() == null || addOn.getFiles().isEmpty()) {
return Collections.emptyList();
}
List<String> files = new ArrayList<>(addOn.getFiles());
installedAddOns.forEach(installedAddOn -> {
if (installedAddOn == addOn) {
return;
}
List<String> addOnFiles = installedAddOn.getFiles();
if (addOnFiles == null || addOnFiles.isEmpty()) {
return;
}
files.removeAll(addOnFiles);
});
return files;
}
|
java
|
@Override
public Request<CreateDhcpOptionsRequest> getDryRunRequest() {
Request<CreateDhcpOptionsRequest> request = new CreateDhcpOptionsRequestMarshaller().marshall(this);
request.addParameter("DryRun", Boolean.toString(true));
return request;
}
|
python
|
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
|
python
|
def delete(context, sequence):
"""Delete jobs events from a given sequence"""
uri = '%s/%s/%s' % (context.dci_cs_api, RESOURCE, sequence)
return context.session.delete(uri)
|
python
|
def _make_sj_out_panel(sj_outD, total_jxn_cov_cutoff=20):
"""Filter junctions from many sj_out files and make panel.
Parameters
----------
sj_outD : dict
Dict whose keys are sample names and values are sj_out dataframes
total_jxn_cov_cutoff : int
If the unique read coverage of a junction summed over all samples is not
greater than or equal to this value, the junction will not be included
in the final output.
Returns
-------
sj_outP : pandas.Panel
Panel where each dataframe corresponds to an sj_out file filtered to
remove low coverage junctions. Each dataframe has COUNT_COLS =
('unique_junction_reads', 'multimap_junction_reads', 'max_overhang')
annotDF : pandas.DataFrame
Dataframe with values ANNOTATION_COLS = ('chrom', 'start',
'end', 'intron_motif', 'annotated') that are otherwise
duplicated in the panel.
"""
# num_jxns = dict()
# # set of all junctions
# jxnS = reduce(lambda x,y: set(x) | set(y),
# [ sj_outD[k].index for k in sj_outD.keys() ])
# jxn_keepS = set()
# jxn_setsD = dict()
# for k in sj_outD.keys():
# jxn_setsD[k] = frozenset(sj_outD[k].index)
# for j in jxnS:
# if sum([ sj_outD[k].ix[j,'unique_junction_reads'] for k in sj_outD.keys()
# if j in jxn_setsD[k] ]) >= total_jxn_cov_cutoff:
# jxn_keepS.add(j)
# for k in sj_outD.keys():
# sj_outD[k] = sj_outD[k].ix[jxn_keepS]
sj_outP = pd.Panel(sj_outD)
for col in ['unique_junction_reads', 'multimap_junction_reads',
'max_overhang']:
sj_outP.ix[:,:,col] = sj_outP.ix[:,:,col].fillna(0)
# Some dataframes will be missing information like intron_motif etc. for
# junctions that were not observed in that sample. The info is somewhere in
# the panel though so we can get it.
annotDF = reduce(pd.DataFrame.combine_first,
[ sj_outP.ix[item,:,ANNOTATION_COLS].dropna() for item in
sj_outP.items ])
annotDF['start'] = annotDF['start'].astype(int)
annotDF['end'] = annotDF['end'].astype(int)
annotDF['annotated'] = annotDF['annotated'].astype(bool)
# Sort annotation and panel
annotDF = annotDF.sort_values(by=['chrom', 'start', 'end'])
sj_outP = sj_outP.ix[:, annotDF.index, :]
sj_outP = sj_outP.ix[:,:,COUNT_COLS].astype(int)
return sj_outP, annotDF
|
python
|
def find_vlans(
self,
number,
name,
iexact,
environment,
net_type,
network,
ip_version,
subnet,
acl,
pagination):
"""
Find vlans by all search parameters
:param number: Filter by vlan number column
:param name: Filter by vlan name column
:param iexact: Filter by name will be exact?
:param environment: Filter by environment ID related
:param net_type: Filter by network_type ID related
:param network: Filter by each octs in network
:param ip_version: Get only version (0:ipv4, 1:ipv6, 2:all)
:param subnet: Filter by octs will search by subnets?
:param acl: Filter by vlan acl column
:param pagination: Class with all data needed to paginate
:return: Following dictionary:
::
{'vlan': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_ambiente': < id_ambiente >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'acl_file_name_v6': < acl_file_name_v6 >,
'acl_valida_v6': < acl_valida_v6 >,
'ativada': < ativada >,
'ambiente_name': < divisao_dc-ambiente_logico-grupo_l3 >
'redeipv4': [ { all networkipv4 related } ],
'redeipv6': [ { all networkipv6 related } ] },
'total': {< total_registros >} }
:raise InvalidParameterError: Some parameter was invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not isinstance(pagination, Pagination):
raise InvalidParameterError(
u"Invalid parameter: pagination must be a class of type 'Pagination'.")
vlan_map = dict()
vlan_map['start_record'] = pagination.start_record
vlan_map['end_record'] = pagination.end_record
vlan_map['asorting_cols'] = pagination.asorting_cols
vlan_map['searchable_columns'] = pagination.searchable_columns
vlan_map['custom_search'] = pagination.custom_search
vlan_map['numero'] = number
vlan_map['nome'] = name
vlan_map['exato'] = iexact
vlan_map['ambiente'] = environment
vlan_map['tipo_rede'] = net_type
vlan_map['rede'] = network
vlan_map['versao'] = ip_version
vlan_map['subrede'] = subnet
vlan_map['acl'] = acl
url = 'vlan/find/'
code, xml = self.submit({'vlan': vlan_map}, 'POST', url)
key = 'vlan'
return get_list_map(
self.response(
code, xml, [
key, 'redeipv4', 'redeipv6', 'equipamentos']), key)
|
java
|
private void getLobLocator(CodeAssembler a, StorablePropertyInfo info) {
if (!info.isLob()) {
throw new IllegalArgumentException();
}
a.invokeInterface(TypeDesc.forClass(RawSupport.class), "getLocator",
TypeDesc.LONG, new TypeDesc[] {info.getStorageType()});
}
|
java
|
private void obtainStyledAttributes(@Nullable final AttributeSet attributeSet,
@AttrRes final int defaultStyle,
@StyleRes final int defaultStyleResource) {
TypedArray numberPickerTypedArray = getContext()
.obtainStyledAttributes(attributeSet, R.styleable.AbstractNumberPickerPreference,
defaultStyle, defaultStyleResource);
TypedArray unitTypedArray = getContext()
.obtainStyledAttributes(attributeSet, R.styleable.AbstractUnitPreference,
defaultStyle, defaultStyleResource);
try {
obtainUseInputMethod(numberPickerTypedArray);
obtainWrapSelectorWheel(numberPickerTypedArray);
obtainUnit(unitTypedArray);
} finally {
numberPickerTypedArray.recycle();
}
}
|
java
|
private List checkTransactionParticipationAndWaitForOtherTransactions(List list, MithraTransaction tx)
{
if (list == null) return null;
List result = list;
if (this.getTxParticipationMode(tx).mustParticipateInTxOnRead())
{
for(int i=0;i<list.size();i++)
{
MithraTransactionalObject mto = (MithraTransactionalObject) list.get(i);
if (!mto.zIsParticipatingInTransaction(tx))
{
result = null;
mto.zWaitForExclusiveWriteTx(tx);
}
}
}
return result;
}
|
python
|
def update_ref(self, new_ref_info, repository_id, filter, project=None, project_id=None):
"""UpdateRef.
[Preview API] Lock or Unlock a branch.
:param :class:`<GitRefUpdate> <azure.devops.v5_1.git.models.GitRefUpdate>` new_ref_info: The ref update action (lock/unlock) to perform
:param str repository_id: The name or ID of the repository.
:param str filter: The name of the branch to lock/unlock
:param str project: Project ID or project name
:param str project_id: ID or name of the team project. Optional if specifying an ID for repository.
:rtype: :class:`<GitRef> <azure.devops.v5_1.git.models.GitRef>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if filter is not None:
query_parameters['filter'] = self._serialize.query('filter', filter, 'str')
if project_id is not None:
query_parameters['projectId'] = self._serialize.query('project_id', project_id, 'str')
content = self._serialize.body(new_ref_info, 'GitRefUpdate')
response = self._send(http_method='PATCH',
location_id='2d874a60-a811-4f62-9c9f-963a6ea0a55b',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('GitRef', response)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.