language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
python
|
def StatFS(self, path=None):
"""Call os.statvfs for a given list of rdf_paths.
OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
Args:
path: a Unicode string containing the path or None. If path is None the
value in self.path is used.
Returns:
posix.statvfs_result object
Raises:
RuntimeError: if called on windows
"""
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
local_path = client_utils.CanonicalPathToLocalPath(path or self.path)
return os.statvfs(local_path)
|
python
|
def to_pandas(self):
"""Return a Pandas dataframe of the minimum spanning tree.
Each row is an edge in the tree; the columns are `from`,
`to`, and `distance` giving the two vertices of the edge
which are indices into the dataset, and the distance
between those datapoints.
"""
try:
from pandas import DataFrame
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame({'from': self._mst.T[0].astype(int),
'to': self._mst.T[1].astype(int),
'distance': self._mst.T[2]})
return result
|
java
|
@Override
public void acquire(HLock lock) {
acquire(lock, Long.MAX_VALUE - System.currentTimeMillis() - 10000);
}
|
python
|
def unreduce_array(array, shape, axis, keepdims):
"""Reverse summing over a dimension, NumPy implementation.
Args:
array: The array that was reduced.
shape: The original shape of the array before reduction.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
"""
# NumPy uses a special default value for keepdims, which is equivalent to
# False.
if axis is not None and (not keepdims or keepdims is numpy._NoValue): # pylint: disable=protected-access
if isinstance(axis, int):
axis = axis,
for ax in sorted(axis):
array = numpy.expand_dims(array, ax)
return numpy.broadcast_to(array, shape)
|
java
|
public static ByteBuffer toSerializedEvent(AbstractEvent event) throws IOException {
final Class<?> eventClass = event.getClass();
if (eventClass == EndOfPartitionEvent.class) {
return ByteBuffer.wrap(new byte[] { 0, 0, 0, END_OF_PARTITION_EVENT });
}
else if (eventClass == CheckpointBarrier.class) {
return serializeCheckpointBarrier((CheckpointBarrier) event);
}
else if (eventClass == EndOfSuperstepEvent.class) {
return ByteBuffer.wrap(new byte[] { 0, 0, 0, END_OF_SUPERSTEP_EVENT });
}
else if (eventClass == CancelCheckpointMarker.class) {
CancelCheckpointMarker marker = (CancelCheckpointMarker) event;
ByteBuffer buf = ByteBuffer.allocate(12);
buf.putInt(0, CANCEL_CHECKPOINT_MARKER_EVENT);
buf.putLong(4, marker.getCheckpointId());
return buf;
}
else {
try {
final DataOutputSerializer serializer = new DataOutputSerializer(128);
serializer.writeInt(OTHER_EVENT);
serializer.writeUTF(event.getClass().getName());
event.write(serializer);
return serializer.wrapAsByteBuffer();
}
catch (IOException e) {
throw new IOException("Error while serializing event.", e);
}
}
}
|
java
|
public static <R> Function<Object,Set<R>> setOf(final Type<R> resultType, final String methodName, final Object... optionalParameters) {
return methodForSetOf(resultType, methodName, optionalParameters);
}
|
python
|
def log_spewer(self, gconfig, fd):
'''Child process to manage logging.
This reads pairs of lines from `fd`, which are alternating
priority (Python integer) and message (unformatted string).
'''
setproctitle('rejester fork_worker log task')
yakonfig.set_default_config([yakonfig, dblogger], config=gconfig)
try:
while True:
prefix = os.read(fd, struct.calcsize('ii'))
level, msglen = struct.unpack('ii', prefix)
msg = os.read(fd, msglen)
logger.log(level, msg)
except Exception, e:
logger.critical('log writer failed', exc_info=e)
raise
|
java
|
private void appendExcludesListToExcludesFile(Plugin plugin, List<String> nonAffectedClasses) throws MojoExecutionException {
String excludesFileName = extractParamValue(plugin, EXCLUDES_FILE_PARAM_NAME);
File excludesFileFile = new File(excludesFileName);
// First restore file in case it has been modified by this
// plugin before (if 'restore' was not run, or VM crashed).
restoreExcludesFile(plugin);
PrintWriter pw = null;
try {
// Have to restore original file (on shutdown); see
// RestoreMojo.
pw = new PrintWriter(new FileOutputStream(excludesFileFile, true), true);
pw.println(EKSTAZI_LINE_MARKER);
for (String exclude : nonAffectedClasses) {
pw.println(exclude);
}
// If "exclude(s)" is not present, we also have to add default value to exclude inner classes.
if (!isAtLeastOneExcludePresent(plugin)) {
pw.println("**/*$*");
}
} catch (IOException ex) {
throw new MojoExecutionException("Could not access excludesFile", ex);
} finally {
if (pw != null) {
pw.close();
}
}
}
|
java
|
protected void transcode(File file, Transcoder transcoder) throws IOException, TranscoderException {
// Disable validation, performance is more important here (thumbnails!)
transcoder.addTranscodingHint(XMLAbstractTranscoder.KEY_XML_PARSER_VALIDATING, Boolean.FALSE);
SVGDocument doc = cloneDocument();
TranscoderInput input = new TranscoderInput(doc);
OutputStream out = new BufferedOutputStream(new FileOutputStream(file));
TranscoderOutput output = new TranscoderOutput(out);
transcoder.transcode(input, output);
out.flush();
out.close();
}
|
java
|
public boolean addFriendByName(String name, FriendGroup friendGroup) {
if (getRiotApi() != null) {
try {
final StringBuilder buf = new StringBuilder();
buf.append("sum");
buf.append(getRiotApi().getSummonerId(name));
buf.append("@pvp.net");
addFriendById(buf.toString(), name, friendGroup);
return true;
} catch (IOException | URISyntaxException e) {
e.printStackTrace();
return false;
}
}
return false;
}
|
python
|
def cli(env, identifier, count):
"""Get details for a ticket."""
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
env.fout(ticket.get_ticket_results(mgr, ticket_id, update_count=count))
|
python
|
def epcr_parse(self):
"""
Parse the ePCR outputs
"""
logging.info('Parsing ePCR outputs')
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
# Create a set to store all the unique results
toxin_set = set()
if os.path.isfile(sample[self.analysistype].resultsfile):
with open(sample[self.analysistype].resultsfile) as epcrresults:
for result in epcrresults:
# Only the lines without a # contain results
if "#" not in result:
# Split on \t
data = result.split('\t')
# The subtyping primer pair is the first entry on lines with results
vttype = data[0].split('_')[0]
# Add the verotoxin subtype to the set of detected subtypes
toxin_set.add(vttype)
# Create a string of the entries in the sorted list of toxins joined with ";"
sample[self.analysistype].toxinprofile = ";".join(sorted(list(toxin_set))) if toxin_set else 'ND'
else:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].toxinprofile = 'NA'
|
java
|
public static String collectReplacements(String orig, @ClosureParams(value=SimpleType.class, options="char") Closure<String> transform) {
if (orig == null) return orig;
StringBuilder sb = null; // lazy create for edge-case efficiency
for (int i = 0, len = orig.length(); i < len; i++) {
final char ch = orig.charAt(i);
final String replacement = transform.call(ch);
if (replacement != null) {
// output differs from input; we write to our local buffer
if (sb == null) {
sb = new StringBuilder((int) (1.1 * len));
sb.append(orig.substring(0, i));
}
sb.append(replacement);
} else if (sb != null) {
// earlier output differs from input; we write to our local buffer
sb.append(ch);
}
}
return sb == null ? orig : sb.toString();
}
|
python
|
def trigger(self, event, *args):
"""Trigger event by name."""
for handler in self._event_handlers[event]:
handler(*args)
|
java
|
public static boolean overlaps(TermOccurrence o1, TermOccurrence o2) {
return o1.getSourceDocument().equals(o2.getSourceDocument())
&& o1.getBegin() < o2.getEnd()
&& o2.getBegin()< o1.getEnd();
}
|
java
|
public void init(HttpInboundConnection conn, RequestMessage req) {
this.request = req;
this.response = conn.getResponse();
this.connection = conn;
this.outStream = new ResponseBody(this.response.getBody());
this.locale = Locale.getDefault();
}
|
java
|
protected void writeResultEntry(StringBuilder sb, ExecutionInfo execInfo, List<QueryInfo> queryInfoList) {
sb.append("Success:");
sb.append(execInfo.isSuccess() ? "True" : "False");
sb.append(", ");
}
|
java
|
public void trace( Object messagePattern, Object arg )
{
if( m_delegate.isTraceEnabled() && messagePattern != null )
{
String msgStr = (String) messagePattern;
msgStr = MessageFormatter.format( msgStr, arg );
m_delegate.trace( msgStr, null );
}
}
|
python
|
def chdir(path):
"""Change the working directory to `path` for the duration of this context
manager.
:param str path: The path to change to
"""
cur_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cur_cwd)
|
java
|
private int initializePatternPCETable() {
long[] pcetable = new long[INITIAL_ARRAY_SIZE_];
int pcetablesize = pcetable.length;
int patternlength = pattern_.text_.length();
CollationElementIterator coleiter = utilIter_;
if (coleiter == null) {
coleiter = new CollationElementIterator(pattern_.text_, collator_);
utilIter_ = coleiter;
} else {
coleiter.setText(pattern_.text_);
}
int offset = 0;
int result = 0;
long pce;
CollationPCE iter = new CollationPCE(coleiter);
// ** Should processed CEs be signed or unsigned?
// ** (the rest of the code in this file seems to play fast-and-loose with
// ** whether a CE is signed or unsigned. For example, look at routine above this one.)
while ((pce = iter.nextProcessed(null)) != CollationPCE.PROCESSED_NULLORDER) {
long[] temp = addToLongArray(pcetable, offset, pcetablesize, pce, patternlength - coleiter.getOffset() + 1);
offset++;
pcetable = temp;
}
pcetable[offset] = 0;
pattern_.PCE_ = pcetable;
pattern_.PCELength_ = offset;
return result;
}
|
java
|
public Map<String, String> getListsDirectory() throws RottenTomatoesException {
properties.clear();
properties.put(ApiBuilder.PROPERTY_URL, URL_LISTS_DIRECTORY);
WrapperLists wrapper = response.getResponse(WrapperLists.class, properties);
if (wrapper != null && wrapper.getLinks() != null) {
return wrapper.getLinks();
} else {
return Collections.emptyMap();
}
}
|
python
|
def describeTopics(self, maxTermsPerTopic=None):
"""Return the topics described by weighted terms.
WARNING: If vocabSize and k are large, this can return a large object!
:param maxTermsPerTopic:
Maximum number of terms to collect for each topic.
(default: vocabulary size)
:return:
Array over topics. Each topic is represented as a pair of
matching arrays: (term indices, term weights in topic).
Each topic's terms are sorted in order of decreasing weight.
"""
if maxTermsPerTopic is None:
topics = self.call("describeTopics")
else:
topics = self.call("describeTopics", maxTermsPerTopic)
return topics
|
python
|
def last(self, n=1):
"""
Get the last element of an array. Passing **n** will return the last N
values in the array.
The **guard** check allows it to work with `_.map`.
"""
res = self.obj[-n:]
if len(res) is 1:
res = res[0]
return self._wrap(res)
|
java
|
@Override
protected void setProfile(List<Step> sx, List<Step> sy) {
profile = pair = new SimpleProfilePair<S, C>(getQuery(), getTarget(), sx, sy);
}
|
python
|
def _get_boll(cls, df):
""" Get Bollinger bands.
boll_ub means the upper band of the Bollinger bands
boll_lb means the lower band of the Bollinger bands
boll_ub = MA + Kσ
boll_lb = MA − Kσ
M = BOLL_PERIOD
K = BOLL_STD_TIMES
:param df: data
:return: None
"""
moving_avg = df['close_{}_sma'.format(cls.BOLL_PERIOD)]
moving_std = df['close_{}_mstd'.format(cls.BOLL_PERIOD)]
df['boll'] = moving_avg
moving_avg = list(map(np.float64, moving_avg))
moving_std = list(map(np.float64, moving_std))
# noinspection PyTypeChecker
df['boll_ub'] = np.add(moving_avg,
np.multiply(cls.BOLL_STD_TIMES, moving_std))
# noinspection PyTypeChecker
df['boll_lb'] = np.subtract(moving_avg,
np.multiply(cls.BOLL_STD_TIMES,
moving_std))
|
python
|
def paths_from_version(version):
"""Get the EnergyPlus install directory and executable path.
Parameters
----------
version : str, optional
EnergyPlus version in the format "X-X-X", e.g. "8-7-0".
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_home : str
Full path to the EnergyPlus install directory.
"""
if platform.system() == 'Windows':
eplus_home = "C:/EnergyPlusV{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, 'energyplus.exe')
elif platform.system() == "Linux":
eplus_home = "/usr/local/EnergyPlus-{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, 'energyplus')
else:
eplus_home = "/Applications/EnergyPlus-{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, 'energyplus')
return eplus_exe, eplus_home
|
python
|
def item_id(response):
"""
Parse the item ids, will be available as ``item_0_name``, ``item_1_name``,
``item_2_name`` and so on
"""
dict_keys = ['item_0', 'item_1', 'item_2',
'item_3', 'item_4', 'item_5']
new_keys = ['item_0_name', 'item_1_name', 'item_2_name',
'item_3_name', 'item_4_name', 'item_5_name']
for player in response['players']:
for key, new_key in zip(dict_keys, new_keys):
for item in items['items']:
if item['id'] == player[key]:
player[new_key] = item['localized_name']
return response
|
java
|
public final SelectableChannel configureBlocking(boolean block)
throws IOException
{
synchronized (regLock) {
if (!isOpen())
throw new ClosedChannelException();
if (blocking == block)
return this;
if (block && haveValidKeys())
throw new IllegalBlockingModeException();
implConfigureBlocking(block);
blocking = block;
}
return this;
}
|
java
|
public static void glBindTexture(int target, int textureID)
{
checkContextCompatibility();
nglBindTexture(target, WebGLObjectMap.get().toTexture(textureID));
}
|
java
|
public void rebuildAllIndexes() throws Exception {
I_CmsReport report = new CmsShellReport(m_cms.getRequestContext().getLocale());
OpenCms.getSearchManager().rebuildAllIndexes(report);
}
|
java
|
public String formatAsString(final ObjectCell<?> cell, final Locale locale) {
return format(cell, locale).getText();
}
|
java
|
public Observable<ServiceResponse<Page<IdentifierInner>>> listSiteIdentifiersAssignedToHostNameNextWithServiceResponseAsync(final String nextPageLink) {
return listSiteIdentifiersAssignedToHostNameNextSinglePageAsync(nextPageLink)
.concatMap(new Func1<ServiceResponse<Page<IdentifierInner>>, Observable<ServiceResponse<Page<IdentifierInner>>>>() {
@Override
public Observable<ServiceResponse<Page<IdentifierInner>>> call(ServiceResponse<Page<IdentifierInner>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(listSiteIdentifiersAssignedToHostNameNextWithServiceResponseAsync(nextPageLink));
}
});
}
|
java
|
protected void closeInputs() throws IOException{
while(true){
if(in instanceof Transport)
return;
in.close();
in = in.detachInput();
}
}
|
java
|
public static <S extends Iterator<? extends T>, T> Iterator<T>
iteratorOverIterators(Iterable<S> iteratorsIterable)
{
Objects.requireNonNull(iteratorsIterable,
"The iteratorsIterable is null");
return new CombiningIterator<T>(iteratorsIterable.iterator());
}
|
python
|
def parse_url(request, url):
"""Parse url URL parameter."""
try:
validate = URLValidator()
validate(url)
except ValidationError:
if url.startswith('/'):
host = request.get_host()
scheme = 'https' if request.is_secure() else 'http'
url = '{scheme}://{host}{uri}'.format(scheme=scheme,
host=host,
uri=url)
else:
url = request.build_absolute_uri(reverse(url))
return url
|
python
|
def native(self):
"""
The native Python datatype representation of this value
:return:
An integer or None
"""
if self.contents is None:
return None
if self._native is None:
self._native = self.__int__()
if self._map is not None and self._native in self._map:
self._native = self._map[self._native]
return self._native
|
java
|
@Override
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException
{
checkClosed();
throw SQLError.noSupport();
}
|
java
|
@Override
public ByteBuffer serialize(Character object) {
ByteBuffer byteBuffer = ByteBuffer.allocate(2);
byteBuffer.putChar(object).flip();
return byteBuffer;
}
|
python
|
def add_target_with_env(self, environment, name=None):
"""Add an SCons target to this nest, with an SCons Environment
The function decorated will be immediately called with three arguments:
* ``environment``: A clone of the SCons environment, with variables
populated for all values in the control dictionary, plus a variable
``OUTDIR``.
* ``outdir``: The output directory
* ``control``: The control dictionary
Each result will be added to the respective control dictionary for
later nests to access.
Differs from :meth:`SConsWrap.add_target` only by the addition of the
``Environment`` clone.
"""
def deco(func):
def nestfunc(control):
env = environment.Clone()
for k, v in control.items():
if k in env:
logger.warn("Overwriting previously bound value %s=%s",
k, env[k])
env[k] = v
destdir = os.path.join(self.dest_dir, control['OUTDIR'])
env['OUTDIR'] = destdir
return [func(env, destdir, control)]
key = name or func.__name__
self.nest.add(key, nestfunc, create_dir=False)
self._register_alias(key)
return func
return deco
|
python
|
def handle_m2m_field(self, obj, field):
"""
while easymode follows inverse relations for foreign keys,
for manytomayfields it follows the forward relation.
While easymode excludes all relations to "self" you could
still create a loop if you add one extra level of indirection.
"""
if field.rel.through._meta.auto_created:# and obj.__class__ is not field.rel.to:
# keep approximate recursion level
with recursion_depth('handle_m2m_field') as recursion_level:
# a stack trace is better than python crashing.
if recursion_level > getattr(settings, 'RECURSION_LIMIT', sys.getrecursionlimit() / 10):
raise Exception(MANY_TO_MANY_RECURSION_LIMIT_ERROR %
(field.name, obj.__class__.__name__, field.rel.to.__name__))
self._start_relational_field(field)
s = RecursiveXmlSerializer()
s.serialize( getattr(obj, field.name).iterator(), xml=self.xml, stream=self.stream)
self.xml.endElement("field")
|
python
|
def to_string(address, dns_format=False):
""" Convert address to string
:param address: WIPV4Address to convert
:param dns_format: whether to use arpa-format or not
:return:
"""
if isinstance(address, WIPV4Address) is False:
raise TypeError('Invalid address type')
address = [str(int(x)) for x in address.__address]
if dns_format is False:
return '.'.join(address)
address.reverse()
return ('.'.join(address) + '.in-addr.arpa')
|
python
|
def add(self, num):
"""
Adds num to the current value
"""
try:
val = self.value() + num
except:
val = num
self.set(min(self.fmax, max(self.fmin, val)))
|
java
|
public boolean previous()
{
// see if it is back to the start already
if( state == 0 )
return false;
else
state = 1;
for( int i = c; i >= 0; i-- ) {
bins[i]--;
if( i == 0 ) {
if( bins[0] < 0 ) {
state = 0;
// put it back into its first combination
for( int j = 0; j < bins.length; j++ ) {
bins[j] = j;
}
return false;
}
break;
} else if( bins[i] <= bins[i-1] ) {
bins[i] = N-1-(c-i);
} else {
break;
}
}
return true;
}
|
java
|
private String compressImage(String imagePath, File destDirectory) {
Bitmap scaledBitmap = null;
BitmapFactory.Options options = new BitmapFactory.Options();
// by setting this field as true, the actual bitmap pixels are not loaded in the memory. Just the bounds are loaded. If
// you try the use the bitmap here, you will get null.
options.inJustDecodeBounds = true;
Bitmap bmp = BitmapFactory.decodeFile(imagePath, options);
int actualHeight = options.outHeight;
int actualWidth = options.outWidth;
// max Height and width values of the compressed image is taken as 816x612
float maxHeight = 816.0f;
float maxWidth = 612.0f;
float imgRatio = actualWidth / actualHeight;
float maxRatio = maxWidth / maxHeight;
// width and height values are set maintaining the aspect ratio of the image
if (actualHeight > maxHeight || actualWidth > maxWidth) {
if (imgRatio < maxRatio) {
imgRatio = maxHeight / actualHeight;
actualWidth = (int) (imgRatio * actualWidth);
actualHeight = (int) maxHeight;
} else if (imgRatio > maxRatio) {
imgRatio = maxWidth / actualWidth;
actualHeight = (int) (imgRatio * actualHeight);
actualWidth = (int) maxWidth;
} else {
actualHeight = (int) maxHeight;
actualWidth = (int) maxWidth;
}
}
// setting inSampleSize value allows to load a scaled down version of the original image
options.inSampleSize = calculateInSampleSize(options, actualWidth, actualHeight);
// inJustDecodeBounds set to false to load the actual bitmap
options.inJustDecodeBounds = false;
// this options allow android to claim the bitmap memory if it runs low on memory
options.inPurgeable = true;
options.inInputShareable = true;
options.inTempStorage = new byte[16 * 1024];
try {
// load the bitmap from its path
bmp = BitmapFactory.decodeFile(imagePath, options);
} catch (OutOfMemoryError exception) {
exception.printStackTrace();
}
try {
scaledBitmap = Bitmap.createBitmap(actualWidth, actualHeight, Bitmap.Config.ARGB_8888);
} catch (OutOfMemoryError exception) {
exception.printStackTrace();
}
float ratioX = actualWidth / (float) options.outWidth;
float ratioY = actualHeight / (float) options.outHeight;
float middleX = actualWidth / 2.0f;
float middleY = actualHeight / 2.0f;
Matrix scaleMatrix = new Matrix();
scaleMatrix.setScale(ratioX, ratioY, middleX, middleY);
Canvas canvas = new Canvas(scaledBitmap);
canvas.setMatrix(scaleMatrix);
canvas.drawBitmap(bmp, middleX - bmp.getWidth() / 2, middleY - bmp.getHeight() / 2, new Paint(Paint.FILTER_BITMAP_FLAG));
// check the rotation of the image and display it properly
ExifInterface exif;
try {
exif = new ExifInterface(imagePath);
int orientation = exif.getAttributeInt(
ExifInterface.TAG_ORIENTATION, 0);
Log.d("EXIF", "Exif: " + orientation);
Matrix matrix = new Matrix();
if (orientation == 6) {
matrix.postRotate(90);
Log.d("EXIF", "Exif: " + orientation);
} else if (orientation == 3) {
matrix.postRotate(180);
Log.d("EXIF", "Exif: " + orientation);
} else if (orientation == 8) {
matrix.postRotate(270);
Log.d("EXIF", "Exif: " + orientation);
}
scaledBitmap = Bitmap.createBitmap(scaledBitmap, 0, 0,
scaledBitmap.getWidth(), scaledBitmap.getHeight(), matrix,
true);
} catch (IOException e) {
e.printStackTrace();
}
FileOutputStream out = null;
String resultFilePath = getFilename(imagePath, destDirectory);
try {
out = new FileOutputStream(resultFilePath);
// write the compressed bitmap at the destination specified by filename.
scaledBitmap.compress(Bitmap.CompressFormat.JPEG, 80, out);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
return resultFilePath;
}
|
python
|
def _generate_object(cls, soup, game, players):
"""
get box_score data
:param soup: Beautifulsoup object
:param game: MLBAM Game object
:param players: MLBAM Players object
:return: pitchpx.box_score.box_score.BoxScore object
"""
def get_batter(soup, team_flag):
batting = soup.find('batting', attrs={'team_flag': team_flag})
if batting:
return batting.find_all('batter')
return []
def get_pitcher(soup, team_flag):
pitching = soup.find('pitching', attrs={'team_flag': team_flag})
if pitching:
return pitching.find_all('pitcher')
return []
box_score = BoxScore(game, players)
box_score.retro_game_id = game.retro_game_id
box_score.home_team_id = game.home_team_id
box_score.away_team_id = game.away_team_id
box_score.home_batting = [box_score._get_batter(b) for b in get_batter(soup, 'home')]
box_score.away_batting = [box_score._get_batter(b) for b in get_batter(soup, 'away')]
box_score.home_pitching = [box_score._get_pitcher(p) for p in get_pitcher(soup, 'home')]
box_score.away_pitching = [box_score._get_pitcher(p) for p in get_pitcher(soup, 'away')]
return box_score
|
python
|
def write_context_error_report(self,file,context_type):
"""Write a context error report relative to the target or query into the specified filename
:param file: The name of a file to write the report to
:param context_type: They type of profile, target or query based
:type file: string
:type context_type: string
"""
if context_type == 'target':
r = self.get_target_context_error_report()
elif context_type == 'query':
r = self.get_query_context_error_report()
else:
sys.stderr.write("ERROR invalid type must be target or query\n")
sys.exit()
of = open(file,'w')
of.write("\t".join(r['header'])+"\n")
for row in r['data']:
of.write("\t".join([str(x) for x in row])+"\n")
return
|
java
|
public LambdaDslObject eachKeyLike(String exampleKey, PactDslJsonRootValue value) {
object.eachKeyLike(exampleKey, value);
return this;
}
|
python
|
def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rdd_a`, `rdd_b`)
"""
if func.__code__.co_argcount == 2:
oldfunc = func
func = lambda t, a, b: oldfunc(a, b)
assert func.__code__.co_argcount == 3, "func should take two or three arguments"
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(),
other._jdstream.dstream(), jfunc)
jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer
return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
|
python
|
def rnormal(mu, tau, size=None):
"""
Random normal variates.
"""
return np.random.normal(mu, 1. / np.sqrt(tau), size)
|
java
|
private String getElementURI() {
String uri = null;
// At this point in processing we have received all the
// namespace mappings
// As we still don't know the elements namespace,
// we now figure it out.
String prefix = getPrefixPart(m_elemContext.m_elementName);
if (prefix == null) {
// no prefix so lookup the URI of the default namespace
uri = m_prefixMap.lookupNamespace("");
} else {
uri = m_prefixMap.lookupNamespace(prefix);
}
if (uri == null) {
// We didn't find the namespace for the
// prefix ... ouch, that shouldn't happen.
// This is a hack, we really don't know
// the namespace
uri = EMPTYSTRING;
}
return uri;
}
|
python
|
def updateLayoutParameters(self, algorithmName, body, verbose=None):
"""
Updates the Layout parameters for the Layout algorithm specified by the `algorithmName` parameter.
:param algorithmName: Name of the layout algorithm
:param body: A list of Layout Parameters with Values.
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'apply/layouts/'+str(algorithmName)+'/parameters', method="PUT", body=body, verbose=verbose)
return response
|
java
|
public static <K, V, V2> Lens.Simple<Map<K, V>, Map<K, V2>> mappingValues(Iso<V, V, V2, V2> iso) {
return simpleLens(m -> toMap(HashMap::new, map(t -> t.biMapR(view(iso)), map(Tuple2::fromEntry, m.entrySet()))),
(s, b) -> view(mappingValues(iso.mirror()), b));
}
|
python
|
def extract_lookups_from_string(value):
"""Extract any lookups within a string.
Args:
value (str): string value we're extracting lookups from
Returns:
list: list of :class:`stacker.lookups.Lookup` if any
"""
lookups = set()
for match in LOOKUP_REGEX.finditer(value):
groupdict = match.groupdict()
raw = match.groups()[0]
lookup_type = groupdict["type"]
lookup_input = groupdict["input"]
lookups.add(Lookup(lookup_type, lookup_input, raw))
return lookups
|
java
|
public final T parse(String value, Path configFilePath) {
if (value == null) {
throw new RuntimeException(ErrorMessage.UNAVAILABLE_PROPERTY.getMessage(name(), configFilePath));
}
return parser().read(value);
}
|
java
|
static void reportError(final String errorMessage) {
if (MetricsManager.instance != null) {
MetricsManager.rootMetricsLogger.incCounter(METRIC_ERRORS, 1);
}
LOG.error(errorMessage);
}
|
java
|
protected void setNonDeferrableScheduledExecutor(ServiceReference<ScheduledExecutorService> ref) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(tc, "setNonDeferrableScheduledExecutor", ref);
nonDeferrableSchedXSvcRef.setReference(ref);
}
|
java
|
void formatTimeZone_X(StringBuilder b, ZonedDateTime d, int width, char ch) {
int[] tz = getTzComponents(d);
// Emit a 'Z' by itself for X if time is exactly GMT
if (ch == 'X' && tz[TZOFFSET] == 0) {
b.append('Z');
return;
}
switch (width) {
case 5:
case 4:
case 3:
case 2:
case 1:
if (tz[TZNEG] == -1) {
b.append('-');
} else {
b.append('+');
}
zeroPad2(b, tz[TZHOURS], 2);
// Delimiter is omitted for X, XX and XXXX
if (width == 3 || width == 5) {
b.append(':');
}
int mins = tz[TZMINS];
// Minutes are optional for X
if (width != 1 || mins > 0) {
zeroPad2(b, mins, 2);
}
break;
}
}
|
java
|
protected final void scan(Package tree) {
Set<Class<?>> classes = Classes.matching(annotatedWith(Entity.class)).in(tree);
for (Class<?> clazz : classes) {
addPersistent(clazz);
}
}
|
java
|
private static String escapeString(String value) {
Map<String, String> substitutions = new HashMap<String, String>();
substitutions.put("\n", "\\n");
substitutions.put("\t", "\\t");
substitutions.put("\r", "\\r");
return CmsStringUtil.substitute(value, substitutions);
}
|
java
|
@SafeVarargs
public static<T> Filter<T> and(final Filter<T>... f) {
return new Filter<T>() {
@Override
public boolean apply(final T x) {
for (final Filter<T> filter: f) if (! filter.apply(x)) return false;
return true;
}
@Override
public String toString() {
return "(" + StringUtils.join(f, " and ") + ")";
}
@Override
public Filter<T> copy() {
return Filters.and(Filters.copy(f));
}
};
}
|
python
|
def _get_udev_rules(self, channel_read, channel_write, channel_data):
"""construct udev rules info."""
sub_str = '%(read)s %%k %(read)s %(write)s %(data)s qeth' % {
'read': channel_read,
'read': channel_read,
'write': channel_write,
'data': channel_data}
rules_str = '# Configure qeth device at'
rules_str += ' %(read)s/%(write)s/%(data)s\n' % {
'read': channel_read,
'write': channel_write,
'data': channel_data}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL=='
'\"qeth\", IMPORT{program}=\"collect %s\"\n') % sub_str
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(read)s\", IMPORT{program}="collect %(channel)s\"\n') % {
'read': channel_read, 'channel': sub_str}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(write)s\", IMPORT{program}=\"collect %(channel)s\"\n') % {
'write': channel_write, 'channel': sub_str}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(data)s\", IMPORT{program}=\"collect %(channel)s\"\n') % {
'data': channel_data, 'channel': sub_str}
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"drivers\", KERNEL==\"'
'qeth\", IMPORT{program}=\"collect --remove %s\"\n') % sub_str
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(read)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n'
) % {'read': channel_read, 'channel': sub_str}
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(write)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n'
) % {'write': channel_write, 'channel': sub_str}
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(data)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n'
) % {'data': channel_data, 'channel': sub_str}
rules_str += ('TEST==\"[ccwgroup/%(read)s]\", GOTO=\"qeth-%(read)s'
'-end\"\n') % {'read': channel_read, 'read': channel_read}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", ENV{COLLECT_'
'%(read)s}==\"0\", ATTR{[drivers/ccwgroup:qeth]group}=\"'
'%(read)s,%(write)s,%(data)s\"\n') % {
'read': channel_read, 'read': channel_read,
'write': channel_write, 'data': channel_data}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"qeth'
'\", ENV{COLLECT_%(read)s}==\"0\", ATTR{[drivers/'
'ccwgroup:qeth]group}=\"%(read)s,%(write)s,%(data)s\"\n'
'LABEL=\"qeth-%(read)s-end\"\n') % {
'read': channel_read, 'read': channel_read, 'write': channel_write,
'data': channel_data, 'read': channel_read}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL=='
'\"%s\", ATTR{layer2}=\"1\"\n') % channel_read
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL=='
'\"%s\", ATTR{online}=\"1\"\n') % channel_read
return rules_str
|
python
|
def _collapse_to_cwl_record_single(data, want_attrs, input_files):
"""Convert a single sample into a CWL record.
"""
out = {}
for key in want_attrs:
key_parts = key.split("__")
out[key] = _to_cwl(tz.get_in(key_parts, data), input_files)
return out
|
java
|
public static <T> T fromJson(String json, Class<T> clazz) {
Objects.requireNonNull(json, Required.JSON.toString());
Objects.requireNonNull(clazz, Required.CLASS.toString());
T object = null;
try {
object = mapper.readValue(json, clazz);
} catch (IOException e) {
LOG.error("Failed to convert json to object class", e);
}
return object;
}
|
python
|
def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels
|
python
|
def remover(self, id_tipo_acesso, id_equipamento):
"""Removes relationship between equipment and access type.
:param id_equipamento: Equipment identifier.
:param id_tipo_acesso: Access type identifier.
:return: None
:raise EquipamentoNaoExisteError: Equipment doesn't exist.
:raise EquipamentoAcessoNaoExisteError: Relationship between equipment and access type doesn't exist.
:raise InvalidParameterError: Equipment and/or access type id is/are invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_tipo_acesso):
raise InvalidParameterError(u'Access type id is invalid.')
if not is_valid_int_param(id_equipamento):
raise InvalidParameterError(u'Equipment id is invalid.')
url = 'equipamentoacesso/' + \
str(id_equipamento) + '/' + str(id_tipo_acesso) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
|
java
|
public static boolean isNCName(CharSequence s) {
if (isNullOrEmpty(s)) {
return false;
}
int firstCodePoint = Character.codePointAt(s, 0);
if (!isNCNameStartChar(firstCodePoint)) {
return false;
}
for (int i = Character.charCount(firstCodePoint); i < s.length();) {
int codePoint = Character.codePointAt(s, i);
if (!isNCNameChar(codePoint)) {
return false;
}
i += Character.charCount(codePoint);
}
return true;
}
|
java
|
public void write(File file, Map<String, String> outputProperties) throws TransformerException, IOException {
Writer writer = new Utf8Writer(file);
try {
write(writer, outputProperties);
} finally {
writer.close();
}
}
|
python
|
def from_file(filename, password='', keytype=None):
"""
Returns a new PrivateKey instance with the given attributes.
If keytype is None, we attempt to automatically detect the type.
:type filename: string
:param filename: The key file name.
:type password: string
:param password: The key password.
:type keytype: string
:param keytype: The key type.
:rtype: PrivateKey
:return: The new key.
"""
if keytype is None:
try:
key = RSAKey.from_private_key_file(filename)
keytype = 'rsa'
except SSHException as e:
try:
key = DSSKey.from_private_key_file(filename)
keytype = 'dss'
except SSHException as e:
msg = 'not a recognized private key: ' + repr(filename)
raise ValueError(msg)
key = PrivateKey(keytype)
key.filename = filename
key.password = password
return key
|
python
|
def load_cli(subparsers):
"""Given a parser, load the CLI subcommands"""
for command_name in available_commands():
module = '{}.{}'.format(__package__, command_name)
loader, description = _import_loader(module)
parser = subparsers.add_parser(command_name,
description=description)
command = loader(parser)
if command is None:
raise RuntimeError('Failed to load "{}".'.format(command_name))
parser.set_defaults(cmmd=command)
|
java
|
public T peek() {
if (nextToken == null) {
nextToken = getNext();
}
if (nextToken == null) {
throw new NoSuchElementException();
}
return nextToken;
}
|
java
|
public ItemRequest<CustomField> findById(String customField) {
String path = String.format("/custom_fields/%s", customField);
return new ItemRequest<CustomField>(this, CustomField.class, path, "GET");
}
|
java
|
protected void gotoEOR(ArchiveRecord record) throws IOException {
if (getIn().available() <= 0) {
return;
}
// Remove any trailing LINE_SEPARATOR
int c = -1;
while (getIn().available() > 0) {
if (getIn().markSupported()) {
getIn().mark(1);
}
c = getIn().read();
if (c != -1) {
if (c == LINE_SEPARATOR) {
continue;
}
if (getIn().markSupported()) {
// We've overread. We're probably in next record. There is
// no way of telling for sure. It may be dross at end of
// current record. Backup.
getIn().reset();
break;
}
ArchiveRecordHeader h = (getCurrentRecord() != null)?
record.getHeader(): null;
throw new IOException("Read " + (char)c +
" when only " + LINE_SEPARATOR + " expected. " +
getReaderIdentifier() + ((h != null)?
h.getHeaderFields().toString(): ""));
}
}
}
|
java
|
public static Document unmarshalHelper(InputSource source, EntityResolver resolver) throws CmsXmlException {
return unmarshalHelper(source, resolver, false);
}
|
python
|
def triplify_object(binding):
""" Create bi-directional bindings for object relationships. """
triples = []
if binding.uri:
triples.append((binding.subject, RDF.type, binding.uri))
if binding.parent is not None:
parent = binding.parent.subject
if binding.parent.is_array:
parent = binding.parent.parent.subject
triples.append((parent, binding.predicate, binding.subject))
if binding.reverse is not None:
triples.append((binding.subject, binding.reverse, parent))
for prop in binding.properties:
_, prop_triples = triplify(prop)
triples.extend(prop_triples)
return binding.subject, triples
|
java
|
private String periodToString(Period period) {
String retVal;
if (period.getYears() > 0) {
int weeks = Math.abs(period.getYears()) * 52;
if (period.getYears() < 0) {
weeks = -weeks;
}
retVal = String.format("P%dW", weeks);
} else if (period.getMonths() > 0) {
int weeks = Math.abs(period.getMonths()) * 4;
if (period.getMonths() < 0) {
weeks = -weeks;
}
retVal = String.format("P%dW", weeks);
} else if (period.getDays() % 7 == 0) {
int weeks = Math.abs(period.getDays()) / 7;
if (period.getDays() < 0) {
weeks = -weeks;
}
retVal = String.format("P%dW", weeks);
} else {
retVal = period.toString();
}
return retVal;
}
|
java
|
@Override
public ServiceRefAmp bind(ServiceRefAmp service, String address)
{
if (log.isLoggable(Level.FINEST)) {
log.finest(L.l("bind {0} for {1} in {2}",
address, service.api().getType(), this));
}
address = toCanonical(address);
registry().bind(address, service);
return service;
}
|
java
|
boolean onHeartbeatStop()
{
SocketPool clusterSocketPool;
if (isExternal()) {
clusterSocketPool = _clusterSocketPool.getAndSet(null);
}
else {
clusterSocketPool = _clusterSocketPool.get();
}
if (clusterSocketPool != null) {
clusterSocketPool.getFactory().notifyHeartbeatStop();
}
/*
if (! _heartbeatState.notifyHeartbeatStop()) {
return false;
}
*/
log.fine("notify-heartbeat-stop " + this);
return true;
}
|
python
|
def list_variables(self, page_size=None, page_token=None, client=None):
"""API call: list variables for this config.
This only lists variable names, not the values.
See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables/list
:type page_size: int
:param page_size:
Optional. The maximum number of variables in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of variables, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:type client: :class:`~google.cloud.runtimeconfig.client.Client`
:param client:
(Optional) The client to use. If not passed, falls back to the
``client`` stored on the current config.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.cloud.runtimeconfig.variable.Variable`
belonging to this project.
"""
path = "%s/variables" % (self.path,)
client = self._require_client(client)
iterator = page_iterator.HTTPIterator(
client=client,
api_request=client._connection.api_request,
path=path,
item_to_value=_item_to_variable,
items_key="variables",
page_token=page_token,
max_results=page_size,
)
iterator._MAX_RESULTS = "pageSize"
iterator.config = self
return iterator
|
java
|
public void destroy() {
CmsXmlContentUgcApi.SERVICE.destroySession(m_content.getSessionId(), new AsyncCallback<Void>() {
public void onFailure(Throwable caught) {
throw new RuntimeException(caught);
}
public void onSuccess(Void result) {
// do nothing
}
});
}
|
python
|
def ring2nest(nside, ipix):
"""Drop-in replacement for healpy `~healpy.pixelfunc.ring2nest`."""
ipix = np.atleast_1d(ipix).astype(np.int64, copy=False)
return ring_to_nested(ipix, nside)
|
python
|
def add(cls, pid, connection):
"""Add a new connection and session to a pool.
:param str pid: The pool id
:type connection: psycopg2.extensions.connection
:param connection: The connection to add to the pool
"""
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].add(connection)
|
python
|
def create_subfield_layer(aspect, ip):
'''Reads the SUBFIELD.pgn file and creates
the subfield layer.'''
layer = []
if 'PANTS' in aspect:
layer = pgnreader.parse_pagan_file(FILE_SUBFIELD, ip, invert=False, sym=True)
else:
layer = pgnreader.parse_pagan_file(FILE_MIN_SUBFIELD, ip, invert=False, sym=True)
return layer
|
python
|
def _parse_field_list(fieldnames, include_parents=False):
"""
Parse a list of field names, possibly including dot-separated subform
fields, into an internal ParsedFieldList object representing the base
fields and subform listed.
:param fieldnames: a list of field names as strings. dot-separated names
are interpreted as subform fields.
:param include_parents: optional boolean, defaults to False. if True,
subform fields implicitly include their parent fields in the parsed
list.
"""
field_parts = (name.split('.') for name in fieldnames)
return _collect_fields(field_parts, include_parents)
|
java
|
public StrBuilder insert(int index, CharSequence csq, int start, int end) {
if (csq == null) {
csq = "null";
}
final int csqLen = csq.length();
if (start > csqLen) {
return this;
}
if (start < 0) {
start = 0;
}
if (end > csqLen) {
end = csqLen;
}
if (start >= end) {
return this;
}
if (index < 0) {
index = 0;
}
final int length = end - start;
moveDataAfterIndex(index, length);
for (int i = start, j = this.position; i < end; i++, j++) {
value[j] = csq.charAt(i);
}
this.position = Math.max(this.position, index) + length;
return this;
}
|
python
|
def logfile_generator(self):
"""Yield each line of the file, or the next line if several files."""
if not self.args['exclude']:
# ask all filters for a start_limit and fast-forward to the maximum
start_limits = [f.start_limit for f in self.filters
if hasattr(f, 'start_limit')]
if start_limits:
for logfile in self.args['logfile']:
logfile.fast_forward(max(start_limits))
if len(self.args['logfile']) > 1:
# merge log files by time
for logevent in self._merge_logfiles():
yield logevent
else:
# only one file
for logevent in self.args['logfile'][0]:
if self.args['timezone'][0] != 0 and logevent.datetime:
logevent._datetime = (logevent.datetime +
timedelta(hours=self
.args['timezone'][0]))
yield logevent
|
python
|
def setup(cls,
opts=type('opts', (), {
'background': None,
'logdir': None,
'logging_conf_file': None,
'log_level': 'DEBUG'
})):
"""Setup logging via CLI params and config."""
logger = logging.getLogger('luigi')
if cls._configured:
logger.info('logging already configured')
return False
cls._configured = True
if cls.config.getboolean('core', 'no_configure_logging', False):
logger.info('logging disabled in settings')
return False
configured = cls._cli(opts)
if configured:
logger = logging.getLogger('luigi')
logger.info('logging configured via special settings')
return True
configured = cls._conf(opts)
if configured:
logger = logging.getLogger('luigi')
logger.info('logging configured via *.conf file')
return True
configured = cls._section(opts)
if configured:
logger = logging.getLogger('luigi')
logger.info('logging configured via config section')
return True
configured = cls._default(opts)
if configured:
logger = logging.getLogger('luigi')
logger.info('logging configured by default settings')
return configured
|
python
|
def update(self, τ: float = 1.0, update_indicators=True, dampen=False):
""" Advance the model by one time step. """
for n in self.nodes(data=True):
n[1]["next_state"] = n[1]["update_function"](n)
for n in self.nodes(data=True):
n[1]["rv"].dataset = n[1]["next_state"]
for n in self.nodes(data=True):
for i in range(self.res):
self.s0[i][n[0]] = n[1]["rv"].dataset[i]
if dampen:
self.s0[i][f"∂({n[0]})/∂t"] = self.s0_original[
f"∂({n[0]})/∂t"
] * exp(-τ * self.t)
if update_indicators:
for indicator in n[1]["indicators"].values():
indicator.samples = np.random.normal(
indicator.mean * np.array(n[1]["rv"].dataset),
scale=0.01,
)
self.t += self.Δt
|
java
|
@Deprecated
public boolean delete(String src) throws IOException {
checkOpen();
clearFileStatusCache();
return namenode.delete(src, true);
}
|
python
|
def set_base_prompt(
self, pri_prompt_terminator=">", alt_prompt_terminator="]", delay_factor=1
):
"""
Sets self.base_prompt
Used as delimiter for stripping of trailing prompt in output.
Should be set to something that is general and applies in multiple contexts. For Comware
this will be the router prompt with < > or [ ] stripped off.
This will be set on logging in, but not when entering system-view
"""
prompt = super(HPComwareBase, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
# Strip off leading character
prompt = prompt[1:]
prompt = prompt.strip()
self.base_prompt = prompt
return self.base_prompt
|
java
|
public ReplicationClientFactory usingApiKey(String apiKey) {
if (Objects.equal(_apiKey, apiKey)) {
return this;
}
return new ReplicationClientFactory(_jerseyClient, apiKey);
}
|
java
|
private void buildReplacementNodesFromTranslation(MsgNode msg, SoyMsg translation) {
currReplacementNodes = Lists.newArrayList();
for (SoyMsgPart msgPart : translation.getParts()) {
if (msgPart instanceof SoyMsgRawTextPart) {
// Append a new RawTextNode to the currReplacementNodes list.
String rawText = ((SoyMsgRawTextPart) msgPart).getRawText();
currReplacementNodes.add(
new RawTextNode(nodeIdGen.genId(), rawText, msg.getSourceLocation()));
} else if (msgPart instanceof SoyMsgPlaceholderPart) {
// Get the representative placeholder node and iterate through its contents.
String placeholderName = ((SoyMsgPlaceholderPart) msgPart).getPlaceholderName();
MsgPlaceholderNode placeholderNode = msg.getRepPlaceholderNode(placeholderName);
for (StandaloneNode contentNode : placeholderNode.getChildren()) {
// If the content node is a MsgHtmlTagNode, it needs to be replaced by a number of
// consecutive siblings. This is done by visiting the MsgHtmlTagNode. Otherwise, we
// simply add the content node to the currReplacementNodes list being built.
if (contentNode instanceof MsgHtmlTagNode) {
currReplacementNodes.addAll(((MsgHtmlTagNode) contentNode).getChildren());
} else {
currReplacementNodes.add(contentNode);
}
}
} else {
throw new AssertionError();
}
}
}
|
python
|
def argsort(self, axis=-1, kind="quicksort", order=None):
"""
Returns the indices that would sort the array.
See the documentation of ndarray.argsort for details about the keyword
arguments.
Example
-------
>>> from unyt import km
>>> data = [3, 8, 7]*km
>>> print(np.argsort(data))
[0 2 1]
>>> print(data.argsort())
[0 2 1]
"""
return self.view(np.ndarray).argsort(axis, kind, order)
|
java
|
boolean reportSubtaskStats(JobVertexID jobVertexId, SubtaskStateStats subtask) {
TaskStateStats taskStateStats = taskStats.get(jobVertexId);
if (taskStateStats != null && taskStateStats.reportSubtaskStats(subtask)) {
currentNumAcknowledgedSubtasks++;
latestAcknowledgedSubtask = subtask;
currentStateSize += subtask.getStateSize();
long alignmentBuffered = subtask.getAlignmentBuffered();
if (alignmentBuffered > 0) {
currentAlignmentBuffered += alignmentBuffered;
}
return true;
} else {
return false;
}
}
|
java
|
@Override
public T decode(byte[] bytes) throws IOException {
if (bytes == null) {
throw new IOException("byte array is null.");
}
CodedInputStream input = CodedInputStream.newInstance(bytes, 0, bytes.length);
return readFrom(input);
}
|
python
|
def guess(system):
"""
input format guess function. First guess by extension, then test by lines
"""
files = system.files
maybe = []
if files.input_format:
maybe.append(files.input_format)
# first, guess by extension
for key, val in input_formats.items():
if type(val) == list:
for item in val:
if files.ext.strip('.').lower() == item:
maybe.append(key)
else:
if files.ext.strip('.').lower() == val:
maybe.append(key)
# second, guess by lines
true_format = ''
fid = open(files.case, 'r')
for item in maybe:
try:
parser = importlib.import_module('.' + item, __name__)
testlines = getattr(parser, 'testlines')
if testlines(fid):
true_format = item
break
except ImportError:
logger.debug(
'Parser for {:s} format is not found. '
'Format guess will continue.'.
format(item))
fid.close()
if true_format:
logger.debug('Input format guessed as {:s}.'.format(true_format))
else:
logger.error('Unable to determine case format.')
files.input_format = true_format
# guess addfile format
if files.addfile:
_, add_ext = os.path.splitext(files.addfile)
for key, val in input_formats.items():
if type(val) == list:
if add_ext[1:] in val:
files.add_format = key
else:
if add_ext[1:] == val:
files.add_format = key
return true_format
|
python
|
def coalescence_waiting_times(self, backward=True):
'''Generator over the waiting times of successive coalescence events
Args:
``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``
'''
if not isinstance(backward, bool):
raise TypeError("backward must be a bool")
times = list(); lowest_leaf_dist = float('-inf')
for n,d in self.distances_from_root():
if len(n.children) > 1:
times.append(d)
elif len(n.children) == 0 and d > lowest_leaf_dist:
lowest_leaf_dist = d
times.append(lowest_leaf_dist)
times.sort(reverse=backward)
for i in range(len(times)-1):
yield abs(times[i]-times[i+1])
|
python
|
def group_membership_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#delete-membership"
api_path = "/api/v2/group_memberships/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs)
|
python
|
def job_status_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/job_statuses#show-job-status"
api_path = "/api/v2/job_statuses/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
python
|
def cleanup(self):
"""
This function is called when the service has finished running
regardless of intentionally or not.
"""
# if an event broker has been created for this service
if self.event_broker:
# stop the event broker
self.event_broker.stop()
# attempt
try:
# close the http server
self._server_handler.close()
self.loop.run_until_complete(self._server_handler.wait_closed())
self.loop.run_until_complete(self._http_handler.finish_connections(shutdown_timeout))
# if there was no handler
except AttributeError:
# keep going
pass
# more cleanup
self.loop.run_until_complete(self.app.shutdown())
self.loop.run_until_complete(self.app.cleanup())
|
python
|
def _set_intf_isis(self, v, load=False):
"""
Setter method for intf_isis, mapped from YANG variable /routing_system/interface/ve/intf_isis (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_isis() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=intf_isis.intf_isis, is_container='container', presence=True, yang_name="intf-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisVeInterface', u'sort-priority': u'131'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """intf_isis must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=intf_isis.intf_isis, is_container='container', presence=True, yang_name="intf-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisVeInterface', u'sort-priority': u'131'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__intf_isis = t
if hasattr(self, '_set'):
self._set()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.