language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
java
|
public static Trade adaptTrade(CexIOTrade trade, CurrencyPair currencyPair) {
BigDecimal amount = trade.getAmount();
BigDecimal price = trade.getPrice();
Date date = DateUtils.fromMillisUtc(trade.getDate() * 1000L);
OrderType type = trade.getType().equals(ORDER_TYPE_BUY) ? OrderType.BID : OrderType.ASK;
return new Trade(type, amount, currencyPair, price, date, String.valueOf(trade.getTid()));
}
|
python
|
def __calculate_audio_frames(self):
""" Aligns audio with video.
This should be called for instance after a seeking operation or resuming
from a pause. """
if self.audioformat is None:
return
start_frame = self.clock.current_frame
totalsize = int(self.clip.audio.fps*self.clip.audio.duration)
self.audio_times = list(range(0, totalsize,
self.audioformat['buffersize'])) + [totalsize]
# Remove audio segments up to the starting frame
del(self.audio_times[0:start_frame])
|
java
|
protected void invoke(String serviceName, Operation operation, int partitionId) {
try {
operationCount++;
operationService
.invokeOnPartition(serviceName, operation, partitionId)
.andThen(mergeCallback);
} catch (Throwable t) {
throw rethrow(t);
}
}
|
python
|
def get_left_child(self, n):
'''
API: get_left_child(self, n)
Description:
Returns left child of node n. n can be Node() instance or string
(name of node).
Pre:
Node n should be present in the tree.
Input:
n: Node name or Node() instance.
Return:
Returns name of the left child of n.
'''
if isinstance(n, Node):
return n.get_attr('Lchild')
return self.get_node_attr(n, 'Lchild')
|
python
|
def make_anchor(file_path: pathlib.Path,
offset: int,
width: int,
context_width: int,
metadata,
encoding: str = 'utf-8',
handle=None):
"""Construct a new `Anchor`.
Args:
file_path: The absolute path to the target file for the anchor.
offset: The offset of the anchored text in codepoints in `file_path`'s
contents.
width: The width in codepoints of the anchored text.
context_width: The width in codepoints of context on either side of the
anchor.
metadata: The metadata to attach to the anchor. Must be json-serializeable.
encoding: The encoding of the contents of `file_path`.
handle: If not `None`, this is a file-like object the contents of which
are used to calculate the context of the anchor. If `None`, then
the file indicated by `file_path` is opened instead.
Raises:
ValueError: `width` characters can't be read at `offset`.
ValueError: `file_path` is not absolute.
"""
@contextmanager
def get_handle():
if handle is None:
with file_path.open(mode='rt', encoding=encoding) as fp:
yield fp
else:
yield handle
with get_handle() as fp:
context = _make_context(fp, offset, width, context_width)
return Anchor(
file_path=file_path,
encoding=encoding,
context=context,
metadata=metadata)
|
python
|
def tls_feature(self):
"""The :py:class:`~django_ca.extensions.TLSFeature` extension, or ``None`` if it doesn't exist."""
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE)
except x509.ExtensionNotFound:
return None
return TLSFeature(ext)
|
python
|
def _start_selecting(self, event):
"""Comienza con el proceso de seleccion."""
self._selecting = True
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
self._sstart = (x, y)
if not self._sobject:
self._sobject = canvas.create_rectangle(
self._sstart[0], self._sstart[1], x, y,
dash=(3,5), outline='#0000ff'
)
canvas.itemconfigure(self._sobject, state=tk.NORMAL)
|
java
|
private static void getDateTimeSkeleton(String skeleton,
StringBuilder dateSkeleton,
StringBuilder normalizedDateSkeleton,
StringBuilder timeSkeleton,
StringBuilder normalizedTimeSkeleton)
{
// dateSkeleton follows the sequence of y*M*E*d*
// timeSkeleton follows the sequence of hm*[v|z]?
int i;
int ECount = 0;
int dCount = 0;
int MCount = 0;
int yCount = 0;
int hCount = 0;
int HCount = 0;
int mCount = 0;
int vCount = 0;
int zCount = 0;
for (i = 0; i < skeleton.length(); ++i) {
char ch = skeleton.charAt(i);
switch ( ch ) {
case 'E':
dateSkeleton.append(ch);
++ECount;
break;
case 'd':
dateSkeleton.append(ch);
++dCount;
break;
case 'M':
dateSkeleton.append(ch);
++MCount;
break;
case 'y':
dateSkeleton.append(ch);
++yCount;
break;
case 'G':
case 'Y':
case 'u':
case 'Q':
case 'q':
case 'L':
case 'l':
case 'W':
case 'w':
case 'D':
case 'F':
case 'g':
case 'e':
case 'c':
case 'U':
case 'r':
normalizedDateSkeleton.append(ch);
dateSkeleton.append(ch);
break;
case 'a':
// 'a' is implicitly handled
timeSkeleton.append(ch);
break;
case 'h':
timeSkeleton.append(ch);
++hCount;
break;
case 'H':
timeSkeleton.append(ch);
++HCount;
break;
case 'm':
timeSkeleton.append(ch);
++mCount;
break;
case 'z':
++zCount;
timeSkeleton.append(ch);
break;
case 'v':
++vCount;
timeSkeleton.append(ch);
break;
case 'V':
case 'Z':
case 'k':
case 'K':
case 'j':
case 's':
case 'S':
case 'A':
timeSkeleton.append(ch);
normalizedTimeSkeleton.append(ch);
break;
}
}
/* generate normalized form for date*/
if ( yCount != 0 ) {
for (i = 0; i < yCount; i++) {
normalizedDateSkeleton.append('y');
}
}
if ( MCount != 0 ) {
if ( MCount < 3 ) {
normalizedDateSkeleton.append('M');
} else {
for ( i = 0; i < MCount && i < 5; ++i ) {
normalizedDateSkeleton.append('M');
}
}
}
if ( ECount != 0 ) {
if ( ECount <= 3 ) {
normalizedDateSkeleton.append('E');
} else {
for ( i = 0; i < ECount && i < 5; ++i ) {
normalizedDateSkeleton.append('E');
}
}
}
if ( dCount != 0 ) {
normalizedDateSkeleton.append('d');
}
/* generate normalized form for time */
if ( HCount != 0 ) {
normalizedTimeSkeleton.append('H');
}
else if ( hCount != 0 ) {
normalizedTimeSkeleton.append('h');
}
if ( mCount != 0 ) {
normalizedTimeSkeleton.append('m');
}
if ( zCount != 0 ) {
normalizedTimeSkeleton.append('z');
}
if ( vCount != 0 ) {
normalizedTimeSkeleton.append('v');
}
}
|
java
|
private boolean itemIsObscuredByHeader(RecyclerView parent, View item, View header, int orientation) {
RecyclerView.LayoutParams layoutParams = (RecyclerView.LayoutParams) item.getLayoutParams();
mDimensionCalculator.initMargins(mTempRect1, header);
int adapterPosition = parent.getChildAdapterPosition(item);
if (adapterPosition == RecyclerView.NO_POSITION || mHeaderProvider.getHeader(parent, adapterPosition) != header) {
// Resolves https://github.com/timehop/sticky-headers-recyclerview/issues/36
// Handles an edge case where a trailing header is smaller than the current sticky header.
return false;
}
if (orientation == LinearLayoutManager.VERTICAL) {
int itemTop = item.getTop() - layoutParams.topMargin;
int headerBottom = getListTop(parent) + header.getBottom() + mTempRect1.bottom + mTempRect1.top;
if (itemTop >= headerBottom) {
return false;
}
} else {
int itemLeft = item.getLeft() - layoutParams.leftMargin;
int headerRight = getListLeft(parent) + header.getRight() + mTempRect1.right + mTempRect1.left;
if (itemLeft >= headerRight) {
return false;
}
}
return true;
}
|
java
|
@BetaApi
public final HttpsHealthCheck2 getHttpsHealthCheck(
ProjectGlobalHttpsHealthCheckName httpsHealthCheck) {
GetHttpsHealthCheckHttpRequest request =
GetHttpsHealthCheckHttpRequest.newBuilder()
.setHttpsHealthCheck(httpsHealthCheck == null ? null : httpsHealthCheck.toString())
.build();
return getHttpsHealthCheck(request);
}
|
python
|
def _resolve(self):
"""resolve the type symbol from name by doing a lookup"""
self.__is_resolved = True
if self.is_complex:
type = self.nested if self.nested else self
type.__reference = self.module.lookup(type.name)
|
python
|
def get_access_key(self):
"""
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
"""
access_key = self.get_as_nullable_string("access_key")
access_key = access_key if access_key != None else self.get_as_nullable_string("access_key")
return access_key
|
java
|
public OHLCSeries updateOHLCSeries(
String seriesName,
double[] newXData,
double[] newOpenData,
double[] newHighData,
double[] newLowData,
double[] newCloseData) {
sanityCheck(seriesName, newOpenData, newHighData, newLowData, newCloseData);
Map<String, OHLCSeries> seriesMap = getSeriesMap();
OHLCSeries series = seriesMap.get(seriesName);
if (series == null) {
throw new IllegalArgumentException("Series name >" + seriesName + "< not found!!!");
}
final double[] xDataToUse;
if (newXData != null) {
// Sanity check
checkDataLengths(seriesName, "X-Axis", "Close", newXData, newCloseData);
xDataToUse = newXData;
} else {
xDataToUse = Utils.getGeneratedDataAsArray(newCloseData.length);
}
series.replaceData(xDataToUse, newOpenData, newHighData, newLowData, newCloseData);
return series;
}
|
python
|
def calculate_fee(self, input_values):
'''
Tx, list(int) -> int
Inputs don't know their value without the whole chain.
'''
return \
sum(input_values) \
- sum([utils.le2i(o.value) for o in self.tx_outs])
|
python
|
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
for encoder in self.encoders:
data, data_length, seq_len = encoder.encode(data, data_length, seq_len)
return data, data_length, seq_len
|
python
|
def Idelchik(dp, voidage, vs, rho, mu, L=1):
r'''Calculates pressure drop across a packed bed of spheres as in [2]_,
originally in [1]_.
.. math::
\frac{\Delta P}{L\rho v_s^2} d_p = \frac{0.765}{\epsilon^{4.2}}
\left(\frac{30}{Re_l} + \frac{3}{Re_l^{0.7}} + 0.3\right)
.. math::
Re_l = (0.45/\epsilon^{0.5})Re_{Erg}
.. math::
Re_{Erg} = \frac{\rho v_s D_p}{\mu(1-\epsilon)}
Parameters
----------
dp : float
Particle diameter of spheres [m]
voidage : float
Void fraction of bed packing [-]
vs : float
Superficial velocity of the fluid (volumetric flow rate/cross-sectional
area)[m/s]
rho : float
Density of the fluid [kg/m^3]
mu : float
Viscosity of the fluid, [Pa*s]
L : float, optional
Length the fluid flows in the packed bed [m]
Returns
-------
dP : float
Pressure drop across the bed [Pa]
Notes
-----
:math:`0.001 < Re_{Erg} <1000`
This equation is valid for void fractions between 0.3 and 0.8. Cited as
by Bernshtein.
Examples
--------
>>> Idelchik(dp=8E-4, voidage=0.4, vs=1E-3, rho=1E3, mu=1E-3)
1571.909125999067
References
----------
.. [1] Idelchik, I. E. Flow Resistance: A Design Guide for Engineers.
Hemisphere Publishing Corporation, New York, 1989.
.. [2] Allen, K. G., T. W. von Backstrom, and D. G. Kroger. "Packed Bed
Pressure Drop Dependence on Particle Shape, Size Distribution, Packing
Arrangement and Roughness." Powder Technology 246 (September 2013):
590-600. doi:10.1016/j.powtec.2013.06.022.
'''
Re = rho*vs*dp/mu/(1-voidage)
Re = (0.45/voidage**0.5)*Re
right = 0.765/voidage**4.2*(30./Re + 3./Re**0.7 + 0.3)
left = dp/L/rho/vs**2
return right/left
|
java
|
public static List<CommerceWishList> toModels(
CommerceWishListSoap[] soapModels) {
if (soapModels == null) {
return null;
}
List<CommerceWishList> models = new ArrayList<CommerceWishList>(soapModels.length);
for (CommerceWishListSoap soapModel : soapModels) {
models.add(toModel(soapModel));
}
return models;
}
|
java
|
protected void appendElements(RendersnakeHtmlCanvas html,
List<SubtitleItem.Inner> elements) throws IOException {
for (SubtitleItem.Inner element : elements) {
String kanji = element.getKanji();
if (kanji != null) {
html.spanKanji(kanji);
} else {
html.write(element.getText());
}
}
}
|
python
|
def text_entry(self):
""" Relay literal text entry from user to Roku until
<Enter> or <Esc> pressed. """
allowed_sequences = set(['KEY_ENTER', 'KEY_ESCAPE', 'KEY_DELETE'])
sys.stdout.write('Enter text (<Esc> to abort) : ')
sys.stdout.flush()
# Track start column to ensure user doesn't backspace too far
start_column = self.term.get_location()[1]
cur_column = start_column
with self.term.cbreak():
val = ''
while val != 'KEY_ENTER' and val != 'KEY_ESCAPE':
val = self.term.inkey()
if not val:
continue
elif val.is_sequence:
val = val.name
if val not in allowed_sequences:
continue
if val == 'KEY_ENTER':
self.roku.enter()
elif val == 'KEY_ESCAPE':
pass
elif val == 'KEY_DELETE':
self.roku.backspace()
if cur_column > start_column:
sys.stdout.write(u'\b \b')
cur_column -= 1
else:
self.roku.literal(val)
sys.stdout.write(val)
cur_column += 1
sys.stdout.flush()
# Clear to beginning of line
sys.stdout.write(self.term.clear_bol)
sys.stdout.write(self.term.move(self.term.height, 0))
sys.stdout.flush()
|
java
|
@Override
public com.liferay.commerce.shipping.engine.fixed.model.CommerceShippingFixedOption deleteCommerceShippingFixedOption(
com.liferay.commerce.shipping.engine.fixed.model.CommerceShippingFixedOption commerceShippingFixedOption) {
return _commerceShippingFixedOptionLocalService.deleteCommerceShippingFixedOption(commerceShippingFixedOption);
}
|
python
|
def train_async(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
model_type,
max_steps=5000,
num_epochs=None,
train_batch_size=100,
eval_batch_size=16,
min_eval_frequency=100,
top_n=None,
layer_sizes=None,
learning_rate=0.01,
epsilon=0.0005,
job_name=None, # cloud param
job_name_prefix='', # cloud param
cloud=None, # cloud param
):
# NOTE: if you make a chane go this doc string, you MUST COPY it 4 TIMES in
# mltoolbox.{classification|regression}.{dnn|linear}, but you must remove
# the model_type parameter, and maybe change the layer_sizes and top_n
# parameters!
# Datalab does some tricky things and messing with train.__doc__ will
# not work!
"""Train model locally or in the cloud.
Local Training:
Args:
train_dataset: CsvDataSet
eval_dataset: CsvDataSet
analysis_dir: The output directory from local_analysis
output_dir: Output directory of training.
features: file path or features object. Example:
{
"col_A": {"transform": "scale", "default": 0.0},
"col_B": {"transform": "scale","value": 4},
# Note col_C is missing, so default transform used.
"col_D": {"transform": "hash_one_hot", "hash_bucket_size": 4},
"col_target": {"transform": "target"},
"col_key": {"transform": "key"}
}
The keys correspond to the columns in the input files as defined by the
schema file during preprocessing. Some notes
1) The "key" and "target" transforms are required.
2) Default values are optional. These are used if the input data has
missing values during training and prediction. If not supplied for a
column, the default value for a numerical column is that column's
mean vlaue, and for a categorical column the empty string is used.
3) For numerical colums, the following transforms are supported:
i) {"transform": "identity"}: does nothing to the number. (default)
ii) {"transform": "scale"}: scales the colum values to -1, 1.
iii) {"transform": "scale", "value": a}: scales the colum values
to -a, a.
For categorical colums, the following transforms are supported:
i) {"transform": "one_hot"}: A one-hot vector using the full
vocabulary is used. (default)
ii) {"transform": "embedding", "embedding_dim": d}: Each label is
embedded into an d-dimensional space.
model_type: One of 'linear_classification', 'linear_regression',
'dnn_classification', 'dnn_regression'.
max_steps: Int. Number of training steps to perform.
num_epochs: Maximum number of training data epochs on which to train.
The training job will run for max_steps or num_epochs, whichever occurs
first.
train_batch_size: number of rows to train on in one step.
eval_batch_size: number of rows to eval in one step. One pass of the eval
dataset is done. If eval_batch_size does not perfectly divide the numer
of eval instances, the last fractional batch is not used.
min_eval_frequency: Minimum number of training steps between evaluations.
top_n: Int. For classification problems, the output graph will contain the
labels and scores for the top n classes with a default of n=1. Use
None for regression problems.
layer_sizes: List. Represents the layers in the connected DNN.
If the model type is DNN, this must be set. Example [10, 3, 2], this
will create three DNN layers where the first layer will have 10 nodes,
the middle layer will have 3 nodes, and the laster layer will have 2
nodes.
learning_rate: tf.train.AdamOptimizer's learning rate,
epsilon: tf.train.AdamOptimizer's epsilon value.
Cloud Training:
Args:
All local training arguments are valid for cloud training. Cloud training
contains two additional args:
cloud: A CloudTrainingConfig object.
job_name: Training job name. A default will be picked if None.
job_name_prefix: If job_name is None, the job will be named
'<job_name_prefix>_<timestamp>'.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
import google.datalab.utils as du
if model_type not in ['linear_classification', 'linear_regression', 'dnn_classification',
'dnn_regression']:
raise ValueError('Unknown model_type %s' % model_type)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud:
return cloud_train(
train_dataset=train_dataset,
eval_dataset=eval_dataset,
analysis_dir=analysis_dir,
output_dir=output_dir,
features=features,
model_type=model_type,
max_steps=max_steps,
num_epochs=num_epochs,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
min_eval_frequency=min_eval_frequency,
top_n=top_n,
layer_sizes=layer_sizes,
learning_rate=learning_rate,
epsilon=epsilon,
job_name=job_name,
job_name_prefix=job_name_prefix,
config=cloud,
)
else:
def fn():
return local_train(
train_dataset=train_dataset,
eval_dataset=eval_dataset,
analysis_dir=analysis_dir,
output_dir=output_dir,
features=features,
model_type=model_type,
max_steps=max_steps,
num_epochs=num_epochs,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
min_eval_frequency=min_eval_frequency,
top_n=top_n,
layer_sizes=layer_sizes,
learning_rate=learning_rate,
epsilon=epsilon)
return du.LambdaJob(fn, job_id=None)
|
java
|
public static byte[] marshallTxnEntry(TxnHeader hdr, Record txn)
throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
OutputArchive boa = BinaryOutputArchive.getArchive(baos);
hdr.serialize(boa, "hdr");
if (txn != null) {
txn.serialize(boa, "txn");
}
return baos.toByteArray();
}
|
python
|
def plot_dos(self, sigma=0.05):
"""
plot dos
Args:
sigma: a smearing
Returns:
a matplotlib object
"""
plotter = DosPlotter(sigma=sigma)
plotter.add_dos("t", self._bz.dos)
return plotter.get_plot()
|
python
|
def return_obj(cols, df, return_cols=False):
"""Construct a DataFrameHolder and then return either that or the DataFrame."""
df_holder = DataFrameHolder(cols=cols, df=df)
return df_holder.return_self(return_cols=return_cols)
|
java
|
public final double estimatedLatitude(long et)
{
et = correctedTime(et);
lock.lock();
try
{
calc();
if (rateOfTurn == 0)
{
double dist = calcDist(et);
return latitude+deltaLatitude(dist, bearing);
}
else
{
double deg = calcDeg(et);
return centerLatitude+deltaLatitude(radius, deg);
}
}
finally
{
lock.unlock();
}
}
|
python
|
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
|
python
|
def readSheet(self, sheet):
"""Reads a sheet in the sheet dictionary
Stores each sheet as an array (rows) of arrays (columns)
"""
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
row_comment = ""
arrCells = GrowingList()
cells = row.getElementsByType(TableCell)
# for each cell
count = 0
for cell in cells:
# repeated value?
repeat = cell.getAttribute("numbercolumnsrepeated")
if(not repeat):
repeat = 1
spanned = \
int(cell.getAttribute('numbercolumnsspanned') or 0)
# clone spanned cells
if self.clonespannedcolumns is not None and spanned > 1:
repeat = spanned
ps = cell.getElementsByType(P)
textContent = ""
# for each text/text:span node
for p in ps:
for n in p.childNodes:
if (n.nodeType == 1 and n.tagName == "text:span"):
for c in n.childNodes:
if (c.nodeType == 3):
textContent = u'{}{}'.format(textContent,
n.data)
if (n.nodeType == 3):
textContent = u'{}{}'.format(textContent, n.data)
if(textContent):
if(textContent[0] != "#"): # ignore comments cells
for rr in xrange(int(repeat)): # repeated?
arrCells[count]=textContent
count+=1
else:
row_comment = row_comment + textContent + " "
else:
for rr in xrange(int(repeat)):
count+=1
# if row contained something
if(len(arrCells)):
arrRows.append(arrCells)
#else:
# print ("Empty or commented row (", row_comment, ")")
self.sheets.append(arrRows)
self.sheet_names.append(name)
|
python
|
def buildDefaultRefWCS(self):
""" Generate a default reference WCS for this image. """
self.default_refWCS = None
if self.use_wcs:
wcslist = []
for scichip in self.chip_catalogs:
wcslist.append(self.chip_catalogs[scichip]['wcs'])
self.default_refWCS = utils.output_wcs(wcslist)
|
java
|
public static CommerceSubscriptionEntry remove(
long commerceSubscriptionEntryId)
throws com.liferay.commerce.exception.NoSuchSubscriptionEntryException {
return getPersistence().remove(commerceSubscriptionEntryId);
}
|
java
|
@Override
public ModifyVolumeAttributeResult modifyVolumeAttribute(ModifyVolumeAttributeRequest request) {
request = beforeClientExecution(request);
return executeModifyVolumeAttribute(request);
}
|
python
|
def sort(iterable):
"""
Given an IP address list, this function sorts the list.
:type iterable: Iterator
:param iterable: An IP address list.
:rtype: list
:return: The sorted IP address list.
"""
ips = sorted(normalize_ip(ip) for ip in iterable)
return [clean_ip(ip) for ip in ips]
|
java
|
@Nonnull
public final LBiCharFunction<R> build() {
final LBiCharFunction<R> eventuallyFinal = this.eventually;
LBiCharFunction<R> retval;
final Case<LBiCharPredicate, LBiCharFunction<R>>[] casesArray = cases.toArray(new Case[cases.size()]);
retval = LBiCharFunction.<R> biCharFunc((a1, a2) -> {
try {
for (Case<LBiCharPredicate, LBiCharFunction<R>> aCase : casesArray) {
if (aCase.casePredicate().test(a1, a2)) {
return aCase.caseFunction().apply(a1, a2);
}
}
return eventuallyFinal.apply(a1, a2);
} catch (Error e) { // NOSONAR
throw e;
} catch (Throwable e) { // NOSONAR
throw Handler.handleOrPropagate(e, handling);
}
});
if (consumer != null) {
consumer.accept(retval);
}
return retval;
}
|
java
|
public static InputStream getStream(String string) {
try {
return new ByteArrayInputStream(string.getBytes("UTF-8"));
} catch (UnsupportedEncodingException wontHappen) {
throw new FaultException(wontHappen);
}
}
|
python
|
def text_justification(words, max_width):
'''
:type words: list
:type max_width: int
:rtype: list
'''
ret = [] # return value
row_len = 0 # current length of strs in a row
row_words = [] # current words in a row
index = 0 # the index of current word in words
is_first_word = True # is current word the first in a row
while index < len(words):
while row_len <= max_width and index < len(words):
if len(words[index]) > max_width:
raise ValueError("there exists word whose length is larger than max_width")
tmp = row_len
row_words.append(words[index])
tmp += len(words[index])
if not is_first_word:
tmp += 1 # except for the first word, each word should have at least a ' ' before it.
if tmp > max_width:
row_words.pop()
break
row_len = tmp
index += 1
is_first_word = False
# here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width.
row = ""
# if the row is the last
if index == len(words):
for word in row_words:
row += (word + ' ')
row = row[:-1]
row += ' ' * (max_width - len(row))
# not the last row and more than one word
elif len(row_words) != 1:
space_num = max_width - row_len
space_num_of_each_interval = space_num // (len(row_words) - 1)
space_num_rest = space_num - space_num_of_each_interval * (len(row_words) - 1)
for j in range(len(row_words)):
row += row_words[j]
if j != len(row_words) - 1:
row += ' ' * (1 + space_num_of_each_interval)
if space_num_rest > 0:
row += ' '
space_num_rest -= 1
# row with only one word
else:
row += row_words[0]
row += ' ' * (max_width - len(row))
ret.append(row)
# after a row , reset those value
row_len = 0
row_words = []
is_first_word = True
return ret
|
java
|
public int compare(BuildableItem lhs, BuildableItem rhs) {
return compare(lhs.buildableStartMilliseconds,rhs.buildableStartMilliseconds);
}
|
python
|
def limits(self,variable):
"""Return minimum and maximum of variable across all rows of data."""
(vmin,vmax), = self.SELECT('min(%(variable)s), max(%(variable)s)' % vars())
return vmin,vmax
|
python
|
def get_login_redirect_url(self, request):
"""
Returns the default URL to redirect to after logging in. Note
that URLs passed explicitly (e.g. by passing along a `next`
GET parameter) take precedence over the value returned here.
"""
assert request.user.is_authenticated
url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None)
if url:
warnings.warn("LOGIN_REDIRECT_URLNAME is deprecated, simply"
" use LOGIN_REDIRECT_URL with a URL name",
DeprecationWarning)
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
|
java
|
public static <V> Map<V, V> convertListToMap(List<V> list) {
Map<V, V> map = new HashMap<V, V>();
if(list.size() % 2 != 0)
throw new VoldemortException("Failed to convert list to map.");
for(int i = 0; i < list.size(); i += 2) {
map.put(list.get(i), list.get(i + 1));
}
return map;
}
|
java
|
public static IMolecularFormula removeElement(IMolecularFormula formula, IElement element) {
for (IIsotope isotope : getIsotopes(formula, element)) {
formula.removeIsotope(isotope);
}
return formula;
}
|
python
|
def output(self, _in, out, **kwargs):
"""Wrap translation in Angular module."""
out.write(
'angular.module("{0}", ["gettext"]).run('
'["gettextCatalog", function (gettextCatalog) {{'.format(
self.catalog_name
)
)
out.write(_in.read())
out.write('}]);')
|
java
|
public long get() throws MemcachedException, InterruptedException, TimeoutException {
Object result = this.memcachedClient.get(this.key);
if (result == null) {
throw new MemcachedClientException("key is not existed.");
} else {
if (result instanceof Long)
return (Long) result;
else
return Long.valueOf(((String) result).trim());
}
}
|
python
|
def absent(name, profile='grafana'):
'''
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
org = __salt__['grafana4.get_org'](name, profile)
if not org:
ret['result'] = True
ret['comment'] = 'Org {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Org {0} will be deleted'.format(name)
return ret
__salt__['grafana4.delete_org'](org['id'], profile=profile)
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'Org {0} was deleted'.format(name)
return ret
|
java
|
public InternalTimersSnapshot<K, N> snapshotTimersForKeyGroup(int keyGroupIdx) {
return new InternalTimersSnapshot<>(
keySerializer,
namespaceSerializer,
eventTimeTimersQueue.getSubsetForKeyGroup(keyGroupIdx),
processingTimeTimersQueue.getSubsetForKeyGroup(keyGroupIdx));
}
|
java
|
@Override
public JSONObject toJSON() throws JSONException {
JSONObject json = super.toJSON();
JSONArray geometries = new JSONArray();
for (Geometry geometry : this.mGeometries) {
geometries.put(geometry.toJSON());
}
json.put(JSON_GEOMETRIES, geometries);
return json;
}
|
java
|
public static <A extends ModelAdapter<Model, Item>, Model, Item extends IItem> A set(final A adapter, final List<Item> items, final DiffCallback<Item> callback) {
return set(adapter, items, callback, true);
}
|
java
|
public static BufferedImage convertImageToGrayscale(BufferedImage image) {
BufferedImage tmp = new BufferedImage(image.getWidth(), image.getHeight(), BufferedImage.TYPE_BYTE_GRAY);
Graphics2D g2 = tmp.createGraphics();
g2.drawImage(image, 0, 0, null);
g2.dispose();
return tmp;
}
|
java
|
private int indexSingleKey(InputStream input, long entryOffset, KeyUpdateCollection keyUpdateCollection) throws IOException {
// Retrieve the next entry, get its Key and hash it.
val e = AsyncTableEntryReader.readEntryComponents(input, entryOffset, this.connector.getSerializer());
HashedArray key = new HashedArray(e.getKey());
// Index the Key. If it was used before, then their versions will be compared to determine which one prevails.
val update = new BucketUpdate.KeyUpdate(key, entryOffset, e.getVersion(), e.getHeader().isDeletion());
keyUpdateCollection.add(update, e.getHeader().getTotalLength(), e.getHeader().getEntryVersion());
return e.getHeader().getTotalLength();
}
|
java
|
@CheckResult @NonNull
public Preference<String> getString(@NonNull String key) {
return getString(key, DEFAULT_STRING);
}
|
java
|
static <T> T doWithMainClasses(File rootFolder, MainClassCallback<T> callback)
throws IOException {
if (!rootFolder.exists()) {
return null; // nothing to do
}
if (!rootFolder.isDirectory()) {
throw new IllegalArgumentException(
"Invalid root folder '" + rootFolder + "'");
}
String prefix = rootFolder.getAbsolutePath() + "/";
Deque<File> stack = new ArrayDeque<>();
stack.push(rootFolder);
while (!stack.isEmpty()) {
File file = stack.pop();
if (file.isFile()) {
try (InputStream inputStream = new FileInputStream(file)) {
ClassDescriptor classDescriptor = createClassDescriptor(inputStream);
if (classDescriptor != null && classDescriptor.isMainMethodFound()) {
String className = convertToClassName(file.getAbsolutePath(),
prefix);
T result = callback.doWith(new MainClass(className,
classDescriptor.getAnnotationNames()));
if (result != null) {
return result;
}
}
}
}
if (file.isDirectory()) {
pushAllSorted(stack, file.listFiles(PACKAGE_FOLDER_FILTER));
pushAllSorted(stack, file.listFiles(CLASS_FILE_FILTER));
}
}
return null;
}
|
java
|
private void onSetSubComparator(CfDef cfDef, Properties cfProperties, StringBuilder builder)
{
String subComparatorType = cfProperties.getProperty(CassandraConstants.SUBCOMPARATOR_TYPE);
if (subComparatorType != null && ColumnFamilyType.valueOf(cfDef.getColumn_type()) == ColumnFamilyType.Super)
{
if (builder != null)
{
// super column are not supported for composite key as of
// now, leaving blank place holder..
}
else
{
cfDef.setSubcomparator_type(subComparatorType);
}
}
}
|
python
|
def agreement_weighted(ci, wts):
'''
D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the
exception that each partitions contribution is weighted according to
the corresponding scalar value stored in the vector WTS. As an example,
suppose CI contained partitions obtained using some heuristic for
maximizing modularity. A possible choice for WTS might be the Q metric
(Newman's modularity score). Such a choice would add more weight to
higher modularity partitions.
NOTE: Unlike AGREEMENT, this script does not have the input argument
BUFFSZ.
Parameters
----------
ci : MxN np.ndarray
set of M (possibly degenerate) partitions of N nodes
wts : Mx1 np.ndarray
relative weight of each partition
Returns
-------
D : NxN np.ndarray
weighted agreement matrix
'''
ci = np.array(ci)
m, n = ci.shape
wts = np.array(wts) / np.sum(wts)
D = np.zeros((n, n))
for i in range(m):
d = dummyvar(ci[i, :].reshape(1, n))
D += np.dot(d, d.T) * wts[i]
return D
|
java
|
@Override
public void setRadii(float[] radii) {
if (radii == null) {
Arrays.fill(mCornerRadii, 0);
mRadiiNonZero = false;
} else {
Preconditions.checkArgument(radii.length == 8, "radii should have exactly 8 values");
System.arraycopy(radii, 0, mCornerRadii, 0, 8);
mRadiiNonZero = false;
for (int i = 0; i < 8; i++) {
mRadiiNonZero |= (radii[i] > 0);
}
}
mIsPathDirty = true;
invalidateSelf();
}
|
java
|
public static int Mode( int[] values ){
int mode = 0, curMax = 0;
for ( int i = 0, length = values.length; i < length; i++ )
{
if ( values[i] > curMax )
{
curMax = values[i];
mode = i;
}
}
return mode;
}
|
python
|
def read_toml(self, encoding=None, errors=None, newline=None, **kwargs):
"""Read this path as a TOML document.
The `TOML <https://github.com/toml-lang/toml>`_ parsing is done with
the :mod:`pytoml` module. The *encoding*, *errors*, and *newline*
keywords are passed to :meth:`open`. The remaining *kwargs* are passed
to :meth:`toml.load`.
Returns the decoded data structure.
"""
import pytoml
with self.open (mode='rt', encoding=encoding, errors=errors, newline=newline) as f:
return pytoml.load (f, **kwargs)
|
python
|
def handle_stderr(stderr_pipe):
"""
Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command().
"""
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True
|
python
|
def detach_model(vmssvm_model, lun):
'''Detach a data disk from a VMSS VM model'''
data_disks = vmssvm_model['properties']['storageProfile']['dataDisks']
data_disks[:] = [disk for disk in data_disks if disk.get('lun') != lun]
vmssvm_model['properties']['storageProfile']['dataDisks'] = data_disks
return vmssvm_model
|
python
|
def _process_attachments(self, attachments):
"""
Create attachments suitable for delivery to Hectane from the provided
list of attachments.
Each attachment may be either a local filename, a file object, or a
dict describing the content (in the same format as Hectane). Note that
if the filename cannot be determined, it will be set to "untitled".
"""
for a in attachments:
if not isinstance(a, dict):
if isinstance(a, string_types):
a = open(a, 'rb')
filename = basename(getattr(a, 'name', 'untitled'))
a = {
"filename": filename,
"content_type": guess_type(filename)[0] or 'application/octet-stream',
"content": a.read(),
}
if isinstance(a['content'], binary_type):
a['content'] = b64encode(a['content']).decode()
a['encoded'] = True
yield a
|
java
|
@Pure
@Inline(value = "Math.abs($1) < Math.ulp($1)", imported = Math.class)
public static boolean isEpsilonZero(double value) {
return Math.abs(value) < Math.ulp(value);
}
|
python
|
def calculate_sources(self, targets):
"""Generate a set of source files from the given targets."""
sources = set()
for target in targets:
sources.update(
source for source in target.sources_relative_to_buildroot()
if source.endswith(self._PYTHON_SOURCE_EXTENSION)
)
return sources
|
java
|
private boolean shouldMergeTo(Address thisAddress, Address targetAddress) {
String thisAddressStr = "[" + thisAddress.getHost() + "]:" + thisAddress.getPort();
String targetAddressStr = "[" + targetAddress.getHost() + "]:" + targetAddress.getPort();
if (thisAddressStr.equals(targetAddressStr)) {
throw new IllegalArgumentException("Addresses should be different! This: "
+ thisAddress + ", Target: " + targetAddress);
}
// Since strings are guaranteed to be different, result will always be non-zero.
int result = thisAddressStr.compareTo(targetAddressStr);
return result > 0;
}
|
java
|
protected SMailPostalPersonnel createPostalPersonnel() {
final SMailDogmaticPostalPersonnel personnel = createDogmaticPostalPersonnel();
return fessConfig.isMailSendMock() ? personnel.asTraining() : personnel;
}
|
python
|
def calculate_mag_calibration(self, mag_samples):
"""Performs magnetometer calibration. Assumes mag_samples contains samples
in order [+x, -x, +y, -y, +z, -z]. Calculates per-axis scale/offset values"""
max_vals = [mag_samples[0][0], mag_samples[2][1], mag_samples[3][2]]
min_vals = [mag_samples[1][0], mag_samples[3][1], mag_samples[5][2]]
magbiases = [int((max_vals[i] + min_vals[i]) / 2.0) for i in range(3)]
magscalings = [(max_vals[i] - min_vals[i]) / 2.0 for i in range(3)]
avg_rads = sum(magscalings) / 3.0
magscalings = [avg_rads / magscalings[i] for i in range(3)]
data = self.calibration_data[self.current_imuid]
data[self.MAGX_OFFSET] = str(int(magbiases[0]))
data[self.MAGY_OFFSET] = str(int(magbiases[1]))
data[self.MAGZ_OFFSET] = str(int(magbiases[2]))
data[self.MAGX_SCALE] = str(magscalings[0])
data[self.MAGY_SCALE] = str(magscalings[1])
data[self.MAGZ_SCALE] = str(magscalings[2])
data[self.MAG_TIMESTAMP] = datetime.now().isoformat()
self.write_calibration_data()
self.update_data_display(self.calibration_data[self.current_imuid])
self.calibration_state = self.CAL_NONE
|
java
|
public @CheckForNull <U extends T> U get(@Nonnull Class<U> type) {
for (T ext : this)
if(ext.getClass()==type)
return type.cast(ext);
return null;
}
|
java
|
private boolean goToNextStartPosition() throws IOException {
int nextSpans1StartPosition;
while ((nextSpans1StartPosition = spans1.spans
.nextStartPosition()) != NO_MORE_POSITIONS) {
if (nextSpans1StartPosition == lastSpans2EndPosition) {
return true;
} else {
// clean up
if (maximumSpans2EndPosition < nextSpans1StartPosition) {
previousSpans2EndPositions.clear();
maximumSpans2EndPosition = -1;
} else if (previousSpans2EndPositions
.contains(nextSpans1StartPosition)) {
return true;
}
// try to find match
while (lastSpans2StartPosition < nextSpans1StartPosition) {
if (lastSpans2StartPosition != NO_MORE_POSITIONS) {
lastSpans2StartPosition = spans2.spans.nextStartPosition();
}
if (lastSpans2StartPosition == NO_MORE_POSITIONS) {
if (previousSpans2EndPositions.isEmpty()) {
noMorePositions = true;
return false;
}
} else {
lastSpans2EndPosition = spans2.spans.endPosition();
if (lastSpans2EndPosition >= nextSpans1StartPosition) {
previousSpans2EndPositions.add(lastSpans2EndPosition);
maximumSpans2EndPosition = Math.max(maximumSpans2EndPosition,
lastSpans2EndPosition);
}
if (nextSpans1StartPosition == lastSpans2EndPosition) {
return true;
}
}
}
}
}
return false;
}
|
python
|
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ExampleCollector, self).get_default_config()
config.update({
'path': 'example'
})
return config
|
python
|
def filetypes_info_to_rows_header(infos, attrnames=None, header=None, flag_wrap_description=False,
description_width=40):
"""
Converts filetype information to a (multiline_rows, header) tuple that can be more easily be tabulated
**Attention** uses ReST syntax, using a "|br|" marker for line break. It requires the .rst source
file to contain the following bit:
.. |br| raw:: html
<br />
Args:
infos: list of FileTypeInfo
attrnames: list of attribute names (keys of FILE_TYPE_INFO_ATTRS).
Defaults to all attributes
header: list of strings containing headers. If not passed, uses default names
flag_wrap_description: whether to wrap the description text
description_width: width to wrap the description text (effective only if
flag_wrap_description is True)
Returns:
tuple: (rows, header): rows is a list of lists
"""
if attrnames is None:
attrnames = FILE_TYPE_INFO_ATTRS.keys()
if header is None:
header = [FILE_TYPE_INFO_ATTRS[key] for key in attrnames]
if flag_wrap_description:
wr = textwrap.TextWrapper(width=description_width, subsequent_indent="|br| ")
data = []
for i, info in enumerate(infos):
row = []
for j, attrname in enumerate(attrnames):
if attrname != "description" or not flag_wrap_description:
row.append(info[attrname])
else:
row.append(wr.wrap(info[attrname]))
data.append(row)
return data, header
|
python
|
def plot_vs_mass(dataset, vars, filename, bins=60):
""" Plot 2D marginalised posteriors of the 'vars' vs the dark matter mass.
We plot the one sigma, and two sigma filled contours. More contours can be plotted
which produces something more akin to a heatmap.
If one require more complicated plotting, it is recommended to write a custom
plotting function by extending the default plot() method.
"""
n = len(vars)
fig, axes = plt.subplots(nrows=n,
ncols=1,
sharex='col',
sharey=False)
plt.subplots_adjust(wspace=0, hspace=0)
m = 'log(m_{\chi})'
for i, y in enumerate(vars):
ax = axes[i]
P = posterior.twoD(dataset+'.h5', m, y,
xlimits=limits(m), ylimits=limits(y), xbins=bins, ybins=bins)
# apply some gaussian smoothing to make the contours slightly smoother
sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0])
P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest')
P.plot(ax, levels=np.linspace(0.9, 0.1, 9))
ax.set_xlabel(labels('log(m_{\chi})'))
ax.set_ylabel(labels(y))
fig.set_size_inches(4,n*3)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig)
|
java
|
@Override
public void cacheResult(CPDisplayLayout cpDisplayLayout) {
entityCache.putResult(CPDisplayLayoutModelImpl.ENTITY_CACHE_ENABLED,
CPDisplayLayoutImpl.class, cpDisplayLayout.getPrimaryKey(),
cpDisplayLayout);
finderCache.putResult(FINDER_PATH_FETCH_BY_UUID_G,
new Object[] { cpDisplayLayout.getUuid(), cpDisplayLayout.getGroupId() },
cpDisplayLayout);
finderCache.putResult(FINDER_PATH_FETCH_BY_C_C,
new Object[] {
cpDisplayLayout.getClassNameId(), cpDisplayLayout.getClassPK()
}, cpDisplayLayout);
cpDisplayLayout.resetOriginalValues();
}
|
python
|
def zoom_in_area(self, area):
""" zoom in area"""
x = area[0]
y = area[1]
level = x[2]
logger.debug("x = {}".format(x))
logger.debug("y = {}".format(y))
logger.debug("level = {}".format(level))
if level == y[2] and level > 0:
new_level = level - 1
high_x = self.zoom_in_pixel(x)
high_y = self.zoom_in_pixel(y)
new_x = [
min([high_x[0], high_y[0]]),
min([high_x[1], high_y[1]]),
new_level,
]
new_y = [
max([high_x[0], high_y[0]]),
max([high_x[1], high_y[1]]),
new_level,
]
new_area = [new_x, new_y]
else:
new_area = area
return new_area
|
java
|
public void open() throws IOException, ServerException {
if (hasBeenOpened()) {
throw new IOException("Attempt to open an already opened connection");
}
InetAddress allIPs[];
//depending on constructor used, we may already have streams
if (!haveStreams()) {
boolean found = false;
int i = 0;
boolean firstPass = true;
allIPs = InetAddress.getAllByName(host);
while(!found)
{
try
{
logger.debug("opening control channel to "
+ allIPs[i] + " : " + port);
InetSocketAddress isa =
new InetSocketAddress(allIPs[i], port);
socket = new Socket();
socket.setSoTimeout(CoGProperties.getDefault().getSocketTimeout());
socket.connect(isa, CoGProperties.getDefault().getSocketTimeout());
found = true;
}
catch(IOException ioEx)
{
logger.debug("failed connecting to "
+ allIPs[i] + " : " + port +":"+ioEx);
i++;
if(i == allIPs.length)
{
if(firstPass)
{
firstPass = false;
i = 0;
}
else
{
throw ioEx;
}
}
}
}
String pv = System.getProperty("org.globus.ftp.IPNAME");
if(pv != null)
{
host = socket.getInetAddress().getHostAddress();
}
else
{
host = socket.getInetAddress().getCanonicalHostName();
}
setInputStream(socket.getInputStream());
setOutputStream(socket.getOutputStream());
}
readInitialReplies();
hasBeenOpened = true;
}
|
python
|
def get_texts_box(texts, fs):
"""Approximation of multiple texts bounds"""
max_len = max(map(len, texts))
return (fs, text_len(max_len, fs))
|
java
|
private void restoreCoords(IntStack stack, Point2d[] src) {
for (int i = 0; i < stack.len; i++) {
int v = stack.xs[i];
atoms[v].getPoint2d().x = src[v].x;
atoms[v].getPoint2d().y = src[v].y;
}
}
|
java
|
protected TypedParams<IncludedFieldsParams> parseIncludedFieldsParameters(final QueryParamsParserContext context) {
String sparseKey = RestrictedQueryParamsMembers.fields.name();
Map<String, Set<String>> sparse = filterQueryParamsByKey(context, sparseKey);
Map<String, Set<String>> temporarySparseMap = new LinkedHashMap<>();
for (Map.Entry<String, Set<String>> entry : sparse.entrySet()) {
List<String> propertyList = buildPropertyListFromEntry(entry, sparseKey);
if (propertyList.size() > 1) {
throw new ParametersDeserializationException("Exceeded maximum level of nesting of 'fields' " +
"parameter (1) eg. fields[tasks][name] <-- #2 level and more are not allowed");
}
String resourceType = propertyList.get(0);
if (temporarySparseMap.containsKey(resourceType)) {
Set<String> resourceParams = temporarySparseMap.get(resourceType);
resourceParams.addAll(entry.getValue());
temporarySparseMap.put(resourceType, resourceParams);
} else {
Set<String> resourceParams = new LinkedHashSet<>();
resourceParams.addAll(entry.getValue());
temporarySparseMap.put(resourceType, resourceParams);
}
}
Map<String, IncludedFieldsParams> decodedSparseMap = new LinkedHashMap<>();
for (Map.Entry<String, Set<String>> resourceTypesMap : temporarySparseMap.entrySet()) {
Set<String> sparseSet = Collections.unmodifiableSet(resourceTypesMap.getValue());
decodedSparseMap.put(resourceTypesMap.getKey(), new IncludedFieldsParams(sparseSet));
}
return new TypedParams<>(Collections.unmodifiableMap(decodedSparseMap));
}
|
python
|
def discover_group(group, separator="/", exclude=None):
"""Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
]
|
java
|
private void applyInstallExtension(DefaultInstalledExtension installedExtension, String namespace,
boolean dependency, Map<String, Object> properties, Map<String, ExtensionDependency> managedDependencies)
throws InstallException
{
// INSTALLED
installedExtension.setInstalled(true, namespace);
installedExtension.setInstallDate(new Date(), namespace);
// DEPENDENCY
installedExtension.setDependency(dependency, namespace);
// Add custom install properties for the specified namespace. The map holding the namespace properties should
// not be null because it is initialized by the InstalledExtension#setInstalled(true, namespace) call above.
installedExtension.getNamespaceProperties(namespace).putAll(properties);
// Save properties
try {
this.localRepository.setProperties(installedExtension.getLocalExtension(),
installedExtension.getProperties());
} catch (Exception e) {
throw new InstallException("Failed to modify extension descriptor", e);
}
// VALID
installedExtension.setValid(namespace, isValid(installedExtension, namespace, managedDependencies));
// Update caches
addInstalledExtension(installedExtension, namespace);
}
|
python
|
def html_print_single_text(self, catalog, cdli_number, destination):
"""
Prints text_file in html.
:param catalog: CDLICorpus().catalog
:param cdli_number: which text you want printed
:param destination: where you wish to save the HTML data
:return: output in html_file.html.
"""
if cdli_number in catalog:
pnum = catalog[cdli_number]['pnum']
edition = catalog[cdli_number]['edition']
metadata = '<br>\n'.join(catalog[cdli_number]['metadata'])
transliteration = '<br>\n'.join(catalog[cdli_number]['transliteration'])
normalization = '<br>\n'.join(catalog[cdli_number]['normalization'])
translation = '<br>\n'.join(catalog[cdli_number]['translation'])
self.html_single = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{edition}</title>
</head>
<body><table cellpadding="10"; border="1">
<tr><th>
<h2>{edition}<br>{pnum}</h2>
</th><th>
<h3>transliteration</h3>
</th><th>
<h3>normalization</h3>
</th><th>
<h3>translation</h3>
</tr><tr><td>
{metadata}</td><td>
<p>{trans}
</td><td>
<p>{norm}
</td><td>
<font size='2'>
{translation}
</font></td></tr>
</table>
<br>
</body>
</html>""".format(
pnum=pnum, edition=edition, metadata=metadata,
trans=transliteration, norm=normalization,
translation=translation)
with open(destination, mode='r+', encoding='utf8') as t_f:
t_f.write(self.html_single)
|
java
|
public static Function getBuiltinFunction(final String name) {
if (name.equals("sin")) {
return builtinFunctions[INDEX_SIN];
} else if (name.equals("cos")) {
return builtinFunctions[INDEX_COS];
} else if (name.equals("tan")) {
return builtinFunctions[INDEX_TAN];
} else if (name.equals("cot")) {
return builtinFunctions[INDEX_COT];
} else if (name.equals("asin")) {
return builtinFunctions[INDEX_ASIN];
} else if (name.equals("acos")) {
return builtinFunctions[INDEX_ACOS];
} else if (name.equals("atan")) {
return builtinFunctions[INDEX_ATAN];
} else if (name.equals("sinh")) {
return builtinFunctions[INDEX_SINH];
} else if (name.equals("cosh")) {
return builtinFunctions[INDEX_COSH];
} else if (name.equals("tanh")) {
return builtinFunctions[INDEX_TANH];
} else if (name.equals("abs")) {
return builtinFunctions[INDEX_ABS];
} else if (name.equals("log")) {
return builtinFunctions[INDEX_LOG];
} else if (name.equals("log10")) {
return builtinFunctions[INDEX_LOG10];
} else if (name.equals("log2")) {
return builtinFunctions[INDEX_LOG2];
} else if (name.equals("log1p")) {
return builtinFunctions[INDEX_LOG1P];
} else if (name.equals("ceil")) {
return builtinFunctions[INDEX_CEIL];
} else if (name.equals("floor")) {
return builtinFunctions[INDEX_FLOOR];
} else if (name.equals("sqrt")) {
return builtinFunctions[INDEX_SQRT];
} else if (name.equals("cbrt")) {
return builtinFunctions[INDEX_CBRT];
} else if (name.equals("pow")) {
return builtinFunctions[INDEX_POW];
} else if (name.equals("exp")) {
return builtinFunctions[INDEX_EXP];
} else if (name.equals("expm1")) {
return builtinFunctions[INDEX_EXPM1];
} else if (name.equals("signum")) {
return builtinFunctions[INDEX_SGN];
} else {
return null;
}
}
|
java
|
void dump(java.io.PrintStream out) {
if (fFTable.length == 0) {
// There is no table. Fail early for testing purposes.
throw new NullPointerException();
}
out.println("RBBI Data Wrapper dump ...");
out.println();
out.println("Forward State Table");
dumpTable(out, fFTable);
out.println("Reverse State Table");
dumpTable(out, fRTable);
out.println("Forward Safe Points Table");
dumpTable(out, fSFTable);
out.println("Reverse Safe Points Table");
dumpTable(out, fSRTable);
dumpCharCategories(out);
out.println("Source Rules: " + fRuleSource);
}
|
python
|
def get_vnetwork_dvs_output_vnetwork_dvs_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
interface_name = ET.SubElement(vnetwork_dvs, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
java
|
public synchronized double getAverageMS()
{
double result = 0.0;
for( Double d : m_times.m_values )
{
result += d;
}
return result / m_times.size();
}
|
java
|
private String getCertFromHeader(final HttpServletRequest request) {
val certHeaderValue = request.getHeader(sslClientCertHeader);
if (StringUtils.isBlank(certHeaderValue)) {
return null;
}
if ("(null)".equalsIgnoreCase(certHeaderValue)) {
return null;
}
return StringUtils.trim(certHeaderValue);
}
|
java
|
public static Path getWindupHome()
{
String windupHome = System.getProperty(WINDUP_HOME);
if (windupHome == null)
{
Path path = new File("").toPath();
LOG.warning("$WINDUP_HOME not set, using [" + path.toAbsolutePath().toString() + "] instead.");
return path;
}
return Paths.get(windupHome);
}
|
python
|
def to_dict(self, include_null=True):
"""
Convert to dict.
"""
if include_null:
return dict(self.items())
else:
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith("_sa_")
}
|
java
|
public ValidationWarning withWarnings(String... warnings) {
if (this.warnings == null) {
setWarnings(new com.amazonaws.internal.SdkInternalList<String>(warnings.length));
}
for (String ele : warnings) {
this.warnings.add(ele);
}
return this;
}
|
java
|
public final void mDURATION() throws RecognitionException {
try {
int _type = DURATION;
int _channel = DEFAULT_TOKEN_CHANNEL;
// druidG.g:616:9: ( ( 'DURATION' ) )
// druidG.g:616:11: ( 'DURATION' )
{
// druidG.g:616:11: ( 'DURATION' )
// druidG.g:616:12: 'DURATION'
{
match("DURATION");
}
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
|
java
|
@Deprecated
public static int normalize(char[] src,int srcStart, int srcLimit,
char[] dest,int destStart, int destLimit,
Mode mode, int options) {
CharBuffer srcBuffer = CharBuffer.wrap(src, srcStart, srcLimit - srcStart);
CharsAppendable app = new CharsAppendable(dest, destStart, destLimit);
mode.getNormalizer2(options).normalize(srcBuffer, app);
return app.length();
}
|
python
|
def createCluster(self, clusterName, machineNames="", tcpClusterPort=""):
"""
Creating a new cluster involves defining a clustering protocol that
will be shared by all server machines participating in the cluster.
All server machines that are added to the cluster must be
registered with the site. The clustering protocol and the initial
list of server machines are optional. In this case, the server
picks the default clustering protocol and selects the port numbers
such that they do not conflict with other configured ports on the
server machine. Once a cluster has been created you can add more
machines (to increase the compute power) or remove them (to reduce
the compute power) dynamically.
Inputs:
clusterName - The name of the cluster. This must be a unique
name within a site
machineNames - An optional comma-separated list of server
machines to be added to this cluster.
tcpClusterPort - A TCP port number that will be used by all the
server machines to communicate with each other
when using the TCP clustering protocol. This is
the default clustering protocol. If this
parameter is missing, a suitable default will
be used.
"""
url = self._url + "/create"
params = {
"f" : "json",
"clusterName" : clusterName,
"machineNames" : machineNames,
"tcpClusterPort" : tcpClusterPort
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
java
|
public static int compare(int char32a, String str2, int options) {
return internalCompare(UTF16.valueOf(char32a), str2, options);
}
|
java
|
public static Optional<SoyRuntimeType> getUnboxedType(SoyType soyType) {
// Optional is immutable so Optional<Subclass> can always be safely cast to Optional<SuperClass>
@SuppressWarnings({"unchecked", "rawtypes"})
Optional<SoyRuntimeType> typed = (Optional) primitiveTypeCache.getUnchecked(soyType);
return typed;
}
|
java
|
static long durationInNanos(long duration, @Nullable TimeUnit unit) {
return (unit == null) ? UNSET_INT : unit.toNanos(duration);
}
|
python
|
def filter_by_status(weather_list, status, weather_code_registry):
"""
Filters out from the provided list of *Weather* objects a sublist of items
having a status corresponding to the provided one. The lookup is performed
against the provided *WeatherCodeRegistry* object.
:param weathers: a list of *Weather* objects
:type weathers: list
:param status: a string indicating a detailed weather status
:type status: str
:param weather_code_registry: a *WeatherCodeRegistry* object
:type weather_code_registry: *WeatherCodeRegistry*
:returns: ``True`` if the check is positive, ``False`` otherwise
"""
result = []
for weather in weather_list:
if status_is(weather, status, weather_code_registry):
result.append(weather)
return result
|
python
|
def get_text(self, position_from, position_to):
"""
Return text between *position_from* and *position_to*
Positions may be positions or 'sol', 'eol', 'sof', 'eof' or 'cursor'
"""
cursor = self.__select_text(position_from, position_to)
text = to_text_string(cursor.selectedText())
all_text = position_from == 'sof' and position_to == 'eof'
if text and not all_text:
while text.endswith("\n"):
text = text[:-1]
while text.endswith(u"\u2029"):
text = text[:-1]
return text
|
java
|
@Override
public final void setStringProperty(String name, String value) throws JMSException
{
setProperty(name,value);
}
|
python
|
def _setProperty(self, _type, data, win=None, mask=None):
"""
Send a ClientMessage event to the root window
"""
if not win:
win = self.root
if type(data) is str:
dataSize = 8
else:
data = (data+[0]*(5-len(data)))[:5]
dataSize = 32
ev = protocol.event.ClientMessage(
window=win,
client_type=self.display.get_atom(_type), data=(dataSize, data))
if not mask:
mask = (X.SubstructureRedirectMask | X.SubstructureNotifyMask)
self.root.send_event(ev, event_mask=mask)
|
python
|
def from_bytes(cls, bitstream):
r'''
Parse the given packet and update properties accordingly
>>> data_hex = ('80000000'
... '6e000000004811402a0086400001ffff'
... '000000000000000a2a02000000000000'
... '0000000000000000'
... '10f610f600487396'
... '10000201ee924adef97a97d700000001'
... '57c3c44d00015f61535d0002200109e0'
... '85000b000000000000000001000f0002'
... '2a020000000000000000000000000000')
>>> data = data_hex.decode('hex')
>>> message = EncapsulatedControlMessage.from_bytes(data)
>>> message.security
False
>>> message.ddt_originated
False
>>> bytes(message.payload)
... # doctest: +ELLIPSIS
'n\x00\x00\x00\x00H\x11...\x00\x00'
'''
packet = cls()
# Convert to ConstBitStream (if not already provided)
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
# Read the type
type_nr = bitstream.read('uint:4')
if type_nr != packet.message_type:
msg = 'Invalid bitstream for a {0} packet'
class_name = packet.__class__.__name__
raise ValueError(msg.format(class_name))
# Read the flags
(packet.security,
packet.ddt_originated) = bitstream.readlist('2*bool')
# Read reserved bits
packet._reserved1 = bitstream.read(26)
# If the security flag is set then there should be security data here
# TODO: deal with security flag [LISP-Security]
if packet.security:
raise NotImplementedError('Handling security data is not ' +
'implemented yet')
# The rest of the packet is payload
remaining = bitstream[bitstream.pos:]
# Parse IP packet
if len(remaining):
ip_version = remaining.peek('uint:4')
if ip_version == 4:
packet.payload = IPv4Packet.from_bytes(remaining)
elif ip_version == 6:
packet.payload = IPv6Packet.from_bytes(remaining)
else:
packet.payload = remaining.bytes
# Verify that the properties make sense
packet.sanitize()
return packet
|
java
|
protected PExp patternToExp(PPattern pattern, IPogAssistantFactory af,
UniqueNameGenerator unq)
{
PatternToExpVisitor visitor = new PatternToExpVisitor(unq, af);
try
{
return pattern.apply(visitor);
} catch (AnalysisException e)
{
return null;
}
}
|
python
|
def datasets_create_new(self, dataset_new_request, **kwargs): # noqa: E501
"""Create a new dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_create_new(dataset_new_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
else:
(data) = self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501
return data
|
python
|
def __execute_bsh(self, instr):
"""Execute BSH instruction.
"""
op0_val = self.read_operand(instr.operands[0])
op1_val = self.read_operand(instr.operands[1])
op1_size = instr.operands[1].size
# Check sign bit.
if extract_sign_bit(op1_val, op1_size) == 0:
op2_val = op0_val << op1_val
else:
op2_val = op0_val >> twos_complement(op1_val, op1_size)
self.write_operand(instr.operands[2], op2_val)
return None
|
java
|
public static Object invokeGroovyObjectInvoker(MissingMethodException e, Object receiver, String name, Object[] args) {
if (e instanceof MissingMethodExecutionFailed) {
throw (MissingMethodException)e.getCause();
} else if (receiver.getClass() == e.getType() && e.getMethod().equals(name)) {
//TODO: we should consider calling this one directly for MetaClassImpl,
// then we save the new method selection
// in case there's nothing else, invoke the object's own invokeMethod()
return ((GroovyObject)receiver).invokeMethod(name, args);
} else {
throw e;
}
}
|
python
|
def deprecated(will_be=None, on_version=None, name=None):
"""
Function decorator that warns about deprecation upon function invocation.
:param will_be: str representing the target action on the deprecated function
:param on_version: tuple representing a SW version
:param name: name of the entity to be deprecated (useful when decorating
__init__ methods so you can specify the deprecated class name)
:return: callable
"""
def outer_function(function):
if name is None:
_name = function.__name__
else:
_name = name
warning_msg = '"%s" is deprecated.' % _name
if will_be is not None and on_version is not None:
warning_msg += " It will be %s on version %s" % (
will_be,
'.'.join(map(str, on_version)))
@wraps(function)
def inner_function(*args, **kwargs):
warnings.warn(warning_msg,
category=DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
return inner_function
return outer_function
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.