language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
python
|
def _fill_cache(self):
"""Fill the cache from the `astropy.table.Table`"""
for irow in range(len(self._table)):
job_details = self.make_job_details(irow)
self._cache[job_details.fullkey] = job_details
|
java
|
public static String parseMessage(int status) {
String msg = null;
if (status < 500) {
if (status == 402
|| (status > 417 && status < 421)
|| status > 424) {
msg = ErrorMessage.getLocaleMessage(400);
} else {
msg = ErrorMessage.getLocaleMessage(status);
}
} else {
switch (status) {
case 501:
msg = ErrorMessage.getLocaleMessage(status);
break;
}
}
if (msg == null) {
msg = ErrorMessage.getLocaleMessage();
}
return msg;
}
|
python
|
def update_currency_by_id(cls, currency_id, currency, **kwargs):
"""Update Currency
Update attributes of Currency
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_currency_by_id(currency_id, currency, async=True)
>>> result = thread.get()
:param async bool
:param str currency_id: ID of currency to update. (required)
:param Currency currency: Attributes of currency to update. (required)
:return: Currency
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs)
else:
(data) = cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs)
return data
|
java
|
private Object getScriptResult(String script, ScriptEngine engine) throws ScriptException {
String scriptToExecute = script.substring(script.indexOf(":") + 1);
return engine.eval(scriptToExecute);
}
|
java
|
private List<IAtomContainer> removeDuplicates(List<IAtomContainer> tautomers) throws CDKException {
Set<String> cansmis = new HashSet<>();
List<IAtomContainer> result = new ArrayList<>();
for (IAtomContainer tautomer : tautomers) {
if (cansmis.add(CANSMI.create(tautomer)))
result.add(tautomer);
}
LOGGER.debug("# tautomers after clean up : ", tautomers.size());
return result;
}
|
java
|
protected JCClassDecl classDeclaration(JCModifiers mods, Comment dc) {
int pos = token.pos;
accept(CLASS);
Name name = ident();
List<JCTypeParameter> typarams = typeParametersOpt();
JCExpression extending = null;
if (token.kind == EXTENDS) {
nextToken();
extending = parseType();
}
List<JCExpression> implementing = List.nil();
if (token.kind == IMPLEMENTS) {
nextToken();
implementing = typeList();
}
List<JCTree> defs = classOrInterfaceBody(name, false);
JCClassDecl result = toP(F.at(pos).ClassDef(
mods, name, typarams, extending, implementing, defs));
attach(result, dc);
return result;
}
|
java
|
public void addInt(
@DoNotSub final int index,
final int element)
{
checkIndexForAdd(index);
@DoNotSub final int requiredSize = size + 1;
ensureCapacityPrivate(requiredSize);
if (index < size)
{
System.arraycopy(elements, index, elements, index + 1, size - index);
}
elements[index] = element;
size++;
}
|
java
|
public static double getPvalue(TransposeDataList transposeDataList) {
Object[] keys = transposeDataList.keySet().toArray();
if(keys.length!=2) {
throw new IllegalArgumentException("The collection must contain observations from 2 groups.");
}
Object keyX = keys[0];
Object keyY = keys[1];
FlatDataList flatDataListX = transposeDataList.get(keyX);
FlatDataList flatDataListY = transposeDataList.get(keyY);
int n = flatDataListX.size();
if(n<=0 || n!=flatDataListY.size()) {
throw new IllegalArgumentException("The number of observations in each group must be equal and larger than 0.");
}
AssociativeArray Di = new AssociativeArray();
for(int j=0;j<n;++j) {
double delta= flatDataListX.getDouble(j) - flatDataListY.getDouble(j);
if(delta==0) {
continue; //don't count it at all
}
String key="+";
if(delta<0) {
key="-";
}
Di.put(key+Integer.toString(j), Math.abs(delta));
}
//converts the values of the table with its Ranks
Ranks.getRanksFromValues(Di);
double W=0;
for(Map.Entry<Object, Object> entry : Di.entrySet()) {
if(entry.getKey().toString().charAt(0)=='+') {
W+=TypeInference.toDouble(entry.getValue());
}
}
double pvalue = scoreToPvalue(W, n);
return pvalue;
}
|
java
|
public Post postReblog(String blogName, Long postId, String reblogKey, Map<String, ?> options) {
if (options == null) {
options = new HashMap<String, String>();
}
Map<String, Object> soptions = JumblrClient.safeOptionMap(options);
soptions.put("id", postId.toString());
soptions.put("reblog_key", reblogKey);
final Long reblogId = requestBuilder.post(JumblrClient.blogPath(blogName, "/post/reblog"), soptions).getId();
return this.blogPost(blogName, reblogId);
}
|
java
|
@Override
public File call() throws Exception {
try {
latch.await();
if (isTimeoutEnabled()) {
timedExecutor.schedule(new Runnable() {
public void run() {
try {
if (abortableDownload.getState() != Transfer.TransferState.Completed) {
abortableDownload.abort();
}
} catch (Exception e) {
throw new SdkClientException(
"Unable to abort download after timeout", e);
}
}
}, timeout, TimeUnit.MILLISECONDS);
}
setState(Transfer.TransferState.InProgress);
createParentDirectoryIfNecessary(dstfile);
if (isDownloadParallel) {
downloadInParallel();
} else {
downloadAsSingleObject();
}
return dstfile;
} catch (Throwable t) {
cleanupAfterException();
if (t instanceof Exception) {
throw (Exception) t;
} else {
throw (Error) t;
}
}
}
|
python
|
def _convert(self, datetime=False, numeric=False, timedelta=False,
coerce=False, copy=True):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
return self._constructor(
self._data.convert(datetime=datetime, numeric=numeric,
timedelta=timedelta, coerce=coerce,
copy=copy)).__finalize__(self)
|
java
|
@Override
public Object set(int index, Object obj) {
State state = isState(obj);
return set(index, obj, state);
}
|
python
|
def load_nddata(self, ndd, naxispath=None):
"""Load from an astropy.nddata.NDData object.
"""
self.clear_metadata()
# Make a header based on any NDData metadata
ahdr = self.get_header()
ahdr.update(ndd.meta)
self.setup_data(ndd.data, naxispath=naxispath)
if ndd.wcs is None:
# no wcs in ndd obj--let's try to make one from the header
self.wcs = wcsmod.WCS(logger=self.logger)
self.wcs.load_header(ahdr)
else:
# already have a valid wcs in the ndd object
# we assume it needs an astropy compatible wcs
wcsinfo = wcsmod.get_wcs_class('astropy')
self.wcs = wcsinfo.wrapper_class(logger=self.logger)
self.wcs.load_nddata(ndd)
|
python
|
def wait_for_setting_value(self, section, setting, value, wait_time=5.0):
"""Function to wait wait_time seconds to see a
SBP_MSG_SETTINGS_READ_RESP message with a user-specified value
"""
expire = time.time() + wait_time
ok = False
while not ok and time.time() < expire:
settings = [x for x in self.settings if (x[0], x[1]) == (section, setting)]
# Check to see if the last setting has the value we want
if len(settings) > 0:
ok = settings[-1][2] == value
time.sleep(0.1)
return ok
|
python
|
def display_bounding_boxes(img, blocks, alternatecolors=False, color=Color("blue")):
"""
Displays each of the bounding boxes passed in 'boxes' on an image of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0)")
draw.stroke_color = color
for block in blocks:
top, left, bottom, right = block[-4:]
if alternatecolors:
draw.stroke_color = Color(
"rgba({},{},{}, 1)".format(
str(np.random.randint(255)),
str(np.random.randint(255)),
str(np.random.randint(255)),
)
)
draw.rectangle(
left=float(left), top=float(top), right=float(right), bottom=float(bottom)
)
draw(img)
display(img)
|
python
|
def sanitize_ssl(self):
"""Use local installed certificate file if available.
Tries to get system, then certifi, then the own
installed certificate file."""
if self["sslverify"] is True:
try:
self["sslverify"] = get_system_cert_file()
except ValueError:
try:
self["sslverify"] = get_certifi_file()
except (ValueError, ImportError):
try:
self["sslverify"] = get_share_file('cacert.pem')
except ValueError:
pass
|
java
|
public Matrix4f lookAlong(float dirX, float dirY, float dirZ,
float upX, float upY, float upZ) {
return lookAlong(dirX, dirY, dirZ, upX, upY, upZ, thisOrNew());
}
|
java
|
public DateIterator getDateIterator(ICalDate startDate, TimeZone timezone) {
RecurrenceIterator iterator = Google2445Utils.createRecurrenceIterator(this, startDate, timezone);
return DateIteratorFactory.createDateIterator(iterator);
}
|
python
|
def delete_resource(self, resource_id):
"""Link a resource to an individual."""
resource_obj = self.resource(resource_id)
logger.debug("Deleting resource {0}".format(resource_obj.name))
self.session.delete(resource_obj)
self.save()
|
python
|
def all_units_idle(self):
"""Return True if all units are idle.
"""
for unit in self.units.values():
unit_status = unit.data['agent-status']['current']
if unit_status != 'idle':
return False
return True
|
java
|
@Override
public void close() throws Exception {
// flag this as not running any more
isRunning = false;
// clean up in locked scope, so there is no concurrent change to the stream and client
synchronized (lock) {
// we notify first (this statement cannot fail). The notified thread will not continue
// anyways before it can re-acquire the lock
lock.notifyAll();
try {
if (outputStream != null) {
outputStream.close();
}
}
finally {
if (client != null) {
client.close();
}
}
}
}
|
java
|
public int accumulatorInfoCountOfMap(String mapName) {
ConcurrentMap<String, AccumulatorInfo> accumulatorInfo = cacheInfoPerMap.get(mapName);
if (accumulatorInfo == null) {
return 0;
} else {
return accumulatorInfo.size();
}
}
|
java
|
public static int floorDiv(int x, int y) {
int r = x / y;
// if the signs are different and modulo not zero, round down
if ((x ^ y) < 0 && (r * y != x)) {
r--;
}
return r;
}
|
java
|
@Override
public void setData(final Object data) {
// This override is necessary to maintain other internal state
String value = data == null ? null : data.toString();
// Empty date is treated as null
if (Util.empty(value)) {
value = null;
}
// Check valid format
PartialDateFieldModel model = getOrCreateComponentModel();
if (value == null || isValidPartialDateStringFormat(value, getPaddingChar())) {
// Valid
model.text = null;
model.validDate = true;
super.setData(value);
} else {
// Invalid
model.text = value;
model.validDate = false;
super.setData(data);
}
}
|
python
|
def masters(self):
"""Get an iterator over master UFOs that match the given family_name.
"""
if self._sources:
for source in self._sources.values():
yield source.font
return
# Store set of actually existing master (layer) ids. This helps with
# catching dangling layer data that Glyphs may ignore, e.g. when
# copying glyphs from other fonts with, naturally, different master
# ids. Note: Masters have unique ids according to the Glyphs
# documentation and can therefore be stored in a set.
master_layer_ids = {m.id for m in self.font.masters}
# stores background data from "associated layers"
supplementary_layer_data = []
# TODO(jamesgk) maybe create one font at a time to reduce memory usage
# TODO: (jany) in the future, return a lazy iterator that builds UFOs
# on demand.
self.to_ufo_font_attributes(self.family_name)
# Generate the main (master) layers first.
for glyph in self.font.glyphs:
for layer in glyph.layers.values():
if layer.associatedMasterId != layer.layerId:
# The layer is not the main layer of a master
# Store all layers, even the invalid ones, and just skip
# them and print a warning below.
supplementary_layer_data.append((glyph, layer))
continue
ufo_layer = self.to_ufo_layer(glyph, layer)
ufo_glyph = ufo_layer.newGlyph(glyph.name)
self.to_ufo_glyph(ufo_glyph, layer, glyph)
# And sublayers (brace, bracket, ...) second.
for glyph, layer in supplementary_layer_data:
if (
layer.layerId not in master_layer_ids
and layer.associatedMasterId not in master_layer_ids
):
if self.minimize_glyphs_diffs:
self.logger.warning(
'{}, glyph "{}": Layer "{}" is dangling and will be '
"skipped. Did you copy a glyph from a different font?"
" If so, you should clean up any phantom layers not "
"associated with an actual master.".format(
self.font.familyName, glyph.name, layer.layerId
)
)
continue
if not layer.name:
# Empty layer names are invalid according to the UFO spec.
if self.minimize_glyphs_diffs:
self.logger.warning(
'{}, glyph "{}": Contains layer without a name which '
"will be skipped.".format(self.font.familyName, glyph.name)
)
continue
# Save processing bracket layers for when designspace() is called, as we
# have to extract them to free-standing glyphs.
if (
"[" in layer.name
and "]" in layer.name
and ".background" not in layer.name
):
self.bracket_layers.append(layer)
else:
ufo_layer = self.to_ufo_layer(glyph, layer)
ufo_glyph = ufo_layer.newGlyph(glyph.name)
self.to_ufo_glyph(ufo_glyph, layer, layer.parent)
for source in self._sources.values():
ufo = source.font
if self.propagate_anchors:
self.to_ufo_propagate_font_anchors(ufo)
for layer in ufo.layers:
self.to_ufo_layer_lib(layer)
# Sanitize skip list and write it to both Designspace- and UFO-level lib keys.
# The latter is unnecessary when using e.g. the `ufo2ft.compile*FromDS`
# functions, but the data may take a different path. Writing it everywhere can
# save on surprises/logic in other software.
skip_export_glyphs = self._designspace.lib.get("public.skipExportGlyphs")
if skip_export_glyphs is not None:
skip_export_glyphs = sorted(set(skip_export_glyphs))
self._designspace.lib["public.skipExportGlyphs"] = skip_export_glyphs
for source in self._sources.values():
source.font.lib["public.skipExportGlyphs"] = skip_export_glyphs
self.to_ufo_features() # This depends on the glyphOrder key
self.to_ufo_groups()
self.to_ufo_kerning()
for source in self._sources.values():
yield source.font
|
python
|
def entry(argv):
'''
Command entry
'''
command_dic = {
'init': run_init,
}
try:
# 这里的 h 就表示该选项无参数,i:表示 i 选项后需要有参数
opts, args = getopt.getopt(argv, "hi:")
except getopt.GetoptError:
print('Error: helper.py -i cmd')
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print('helper.py -i cmd')
print('cmd list ----------------------')
print(' init: ')
sys.exit()
elif opt == "-i":
if arg in command_dic:
command_dic[arg](args)
print('QED!')
else:
print('Wrong Command.')
|
java
|
public void merge(Properties properties) throws PropertiesException {
if(isLocked()) {
throw new PropertiesException("properties map is locked, its contents cannot be altered.");
}
assert properties != null;
for(Entry<String, String> entry : properties.entrySet()) {
this.put(entry.getKey(), entry.getValue());
}
}
|
python
|
def resolve_data_objects(objects, project=None, folder=None, batchsize=1000):
"""
:param objects: Data object specifications, each with fields "name"
(required), "folder", and "project"
:type objects: list of dictionaries
:param project: ID of project context; a data object's project defaults
to this if not specified for that object
:type project: string
:param folder: Folder path within the project; a data object's folder
path defaults to this if not specified for that object
:type folder: string
:param batchsize: Number of objects to resolve in each batch call to
system_resolve_data_objects; defaults to 1000 and is
only used for testing (must be a positive integer not
exceeding 1000)
:type batchsize: int
:returns: List of results parallel to input objects, where each
entry is a list containing 0 or more dicts, each corresponding
to a resolved object
:rtype: List of lists of dictionaries
Each returned element is a list of dictionaries with keys "project" and
"id". The number of dictionaries for each element may be 0, 1, or more.
"""
if not isinstance(batchsize, int) or batchsize <= 0 or batchsize > 1000:
raise ValueError("batchsize for resolve_data_objects must be a positive integer not exceeding 1000")
args = {}
if project:
args.update({'project': project})
if folder:
args.update({'folder': folder})
results = []
# Call API method /system/resolveDataObjects in groups of size batchsize
for i in range(0, len(objects), batchsize):
args.update({'objects': objects[i:(i+batchsize)]})
results.extend(dxpy.api.system_resolve_data_objects(args)['results'])
return results
|
java
|
public void printStackTrace(PrintStream s) {
if (!isJDK14OrAbove && causeOnJDK13OrBelow != null) {
printStackTrace0(new PrintWriter(s));
}
else {
super.printStackTrace(s);
}
}
|
java
|
public void buildAnnotationTypeSummary(XMLNode node, Content summaryContentTree) {
String annotationtypeTableSummary =
configuration.getText("doclet.Member_Table_Summary",
configuration.getText("doclet.Annotation_Types_Summary"),
configuration.getText("doclet.annotationtypes"));
String[] annotationtypeTableHeader = new String[] {
configuration.getText("doclet.AnnotationType"),
configuration.getText("doclet.Description")
};
ClassDoc[] annotationTypes =
packageDoc.isIncluded()
? packageDoc.annotationTypes()
: configuration.classDocCatalog.annotationTypes(
Util.getPackageName(packageDoc));
annotationTypes = Util.filterOutPrivateClasses(annotationTypes, configuration.javafx);
if (annotationTypes.length > 0) {
packageWriter.addClassesSummary(
annotationTypes,
configuration.getText("doclet.Annotation_Types_Summary"),
annotationtypeTableSummary, annotationtypeTableHeader,
summaryContentTree);
}
}
|
python
|
def unlocked(self):
"""``True`` if achievement is unlocked.
:rtype: bool
"""
achieved = CRef.cbool()
result = self._iface.get_ach(self.name, achieved)
if not result:
return False
return bool(achieved)
|
java
|
public static PreparedStatement createWayNodeTable(Connection connection, String wayNodeTableName) throws SQLException{
try (Statement stmt = connection.createStatement()) {
StringBuilder sb = new StringBuilder("CREATE TABLE ");
sb.append(wayNodeTableName);
sb.append("(ID_WAY BIGINT, ID_NODE BIGINT, NODE_ORDER INT);");
stmt.execute(sb.toString());
}
return connection.prepareStatement("INSERT INTO " + wayNodeTableName + " VALUES ( ?, ?,?);");
}
|
python
|
def _make_ip_subnet_lookup(vpc_info):
"""
Updates the vpc-info object with a lookup for IP -> subnet.
"""
# We create a reverse lookup from the instances private IP addresses to the
# subnets they are associated with. This is used later on in order to
# determine whether routes should be set in an RT: Is the RT's subnet
# associated with ANY of the IP addresses in the route spec? To make this
# easy, we collect the IPs and subnets of all EC2 instances in the VPC.
# Once we get a route spec, we create a list of subnets for only the
# cluster nodes. The assumption is that not all EC2 instances in the VPC
# necessarily belong to the cluster. We really want to narrow it down to
# the cluster nodes only. See make_cluster_node_subnet_list().
vpc_info['ip_subnet_lookup'] = {}
for instance in vpc_info['instances']:
for interface in instance.interfaces:
subnet_id = interface.subnet_id
for priv_addr in interface.private_ip_addresses:
vpc_info['ip_subnet_lookup'][priv_addr.private_ip_address] = \
subnet_id
|
java
|
public Object parseAs(Type dataType) throws IOException {
if (!hasMessageBody()) {
return null;
}
return request.getParser().parseAndClose(getContent(), getContentCharset(), dataType);
}
|
java
|
public void setCodeBuffer(CodeBuffer code) {
mCodeBuffer = code;
mOldLineNumberTable = mLineNumberTable;
mOldLocalVariableTable = mLocalVariableTable;
mOldStackMapTable = mStackMapTable;
mAttributes.remove(mLineNumberTable);
mAttributes.remove(mLocalVariableTable);
mAttributes.remove(mStackMapTable);
mLineNumberTable = null;
mLocalVariableTable = null;
mStackMapTable = null;
}
|
python
|
def _get_linear_lookup_table_and_weight(nbits, wp):
"""
Generate a linear lookup table.
:param nbits: int
Number of bits to represent a quantized weight value
:param wp: numpy.array
Weight blob to be quantized
Returns
-------
lookup_table: numpy.array
Lookup table of shape (2^nbits, )
qw: numpy.array
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
w = wp.reshape(1, -1)
qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0)
indices = _np.array(range(0, 2**nbits))
lookup_table = indices * scales[0] + biases[0]
return lookup_table, qw
|
python
|
def get_tcntobj(go2obj, **kws):
"""Return a TermCounts object if the user provides an annotation file, otherwise None."""
# kws: gaf gene2go
annots = read_annotations(**kws)
if annots:
return TermCounts(go2obj, annots)
|
python
|
def fit_freq_min_max(self, training_signal):
"""Defines a spectral mask based on training data using min and max values of each
frequency component
Args:
training_signal: Training data
"""
window_length = len(self.window)
window_weight = sum(self.window)
max_mask = np.zeros(int(window_length / 2) + 1)
min_mask = np.zeros(int(window_length / 2) + 1)
for i in range(0, len(training_signal) - window_length - 1):
rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window)
temp = np.abs(rfft) / window_weight
max_mask = np.maximum(max_mask, temp)
min_mask = np.minimum(min_mask, temp)
self.mask_top = self.gain * max_mask
self.mask_bottom = min_mask / self.gain
|
java
|
public Observable<ServiceResponse<UsagesResultInner>> getUsagesWithServiceResponseAsync(String resourceGroupName, String accountName, String filter) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (accountName == null) {
throw new IllegalArgumentException("Parameter accountName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
if (this.client.apiVersion() == null) {
throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null.");
}
return service.getUsages(resourceGroupName, accountName, this.client.subscriptionId(), this.client.apiVersion(), filter, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<UsagesResultInner>>>() {
@Override
public Observable<ServiceResponse<UsagesResultInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<UsagesResultInner> clientResponse = getUsagesDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
|
python
|
def _create_figure(self):
"""
Create Matplotlib figure and axes
"""
# Good for development
if get_option('close_all_figures'):
plt.close('all')
figure = plt.figure()
axs = self.facet.make_axes(
figure,
self.layout.layout,
self.coordinates)
# Dictionary to collect matplotlib objects that will
# be targeted for theming by the themeables
figure._themeable = {}
self.figure = figure
self.axs = axs
return figure, axs
|
python
|
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes
|
python
|
def register_dependency(self, data_src, data_sink):
""" registers a dependency of data_src -> data_sink
by placing appropriate entries in provides_for and depends_on
"""
pdebug("registering dependency %s -> %s" % (data_src, data_sink))
if (data_src not in self._gettask(data_sink).depends_on):
self._gettask(data_sink).depends_on.append(data_src)
if (data_sink not in self._gettask(data_src).provides_for):
self._gettask(data_src).provides_for.append(data_sink)
|
python
|
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
|
java
|
public void getLink(String path, final I_CmsStringCallback callback) {
m_apiRoot.getRpcHelper().executeRpc(CmsXmlContentUgcApi.SERVICE.getLink(path, new AsyncCallback<String>() {
@SuppressWarnings("synthetic-access")
public void onFailure(Throwable caught) {
m_apiRoot.handleError(caught, null);
}
public void onSuccess(String result) {
callback.call(result);
}
}));
}
|
python
|
def set_name(self, name):
"""Set a client name."""
if not name:
name = ''
self._client['config']['name'] = name
yield from self._server.client_name(self.identifier, name)
|
java
|
public static Method getAccessor(Class<?> clazz, String field) {
LOG.trace( "getAccessor({}, {})", clazz, field );
try {
return clazz.getMethod( "get" + ucFirst( field ) );
} catch ( NoSuchMethodException e ) {
try {
return clazz.getMethod( field );
} catch ( NoSuchMethodException e1 ) {
try {
return clazz.getMethod( "is" + ucFirst( field ) );
} catch ( NoSuchMethodException e2 ) {
return null;
}
}
}
}
|
python
|
def find_reactions_with_identical_genes(model):
"""
Return reactions that have identical genes.
Identify duplicate reactions globally by checking if any
two reactions have the same genes.
This can be useful to curate merged models or to clean-up bulk model
modifications, but also to identify promiscuous enzymes.
The heuristic compares reactions in a pairwise manner and reports on
reaction pairs whose genes are identical. Reactions with missing genes are
skipped.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
dict
A mapping from sets of genes to all the reactions containing those
genes.
int
The total number of unique reactions that appear duplicates based on
their gene-protein-reaction associations.
"""
duplicates = dict()
for rxn_a, rxn_b in combinations(model.reactions, 2):
if not (rxn_a.genes and rxn_b.genes):
continue
if rxn_a.genes == rxn_b.genes:
# This works because the `genes` are frozen sets.
identifiers = rxn_a.genes
duplicates.setdefault(identifiers, set()).update(
[rxn_a.id, rxn_b.id])
# Transform the object for JSON compatibility
num_duplicated = set()
duplicated = {}
for key in duplicates:
# Object keys must be strings in JSON.
new_key = ",".join(sorted(g.id for g in key))
duplicated[new_key] = rxns = list(duplicates[key])
num_duplicated.update(rxns)
return duplicated, len(num_duplicated)
|
python
|
def context_by_id(self, context_id, via_id=None, create=True, name=None):
"""
Messy factory/lookup function to find a context by its ID, or construct
it. This will eventually be replaced by a more sensible interface.
"""
context = self._context_by_id.get(context_id)
if context:
return context
if create and via_id is not None:
via = self.context_by_id(via_id)
else:
via = None
self._write_lock.acquire()
try:
context = self._context_by_id.get(context_id)
if create and not context:
context = self.context_class(self, context_id, name=name)
context.via = via
self._context_by_id[context_id] = context
finally:
self._write_lock.release()
return context
|
java
|
public static String getMethodDescriptor(final Type returnType, final Type... argumentTypes) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append('(');
for (int i = 0; i < argumentTypes.length; ++i) {
argumentTypes[i].appendDescriptor(stringBuilder);
}
stringBuilder.append(')');
returnType.appendDescriptor(stringBuilder);
return stringBuilder.toString();
}
|
java
|
public static void runExample(
AdManagerServices adManagerServices, AdManagerSession session, long placementId)
throws RemoteException {
// Get the PlacementService.
PlacementServiceInterface placementService =
adManagerServices.get(session, PlacementServiceInterface.class);
// Create a statement to select a placement.
StatementBuilder statementBuilder =
new StatementBuilder()
.where("WHERE id = :id")
.orderBy("id ASC")
.limit(StatementBuilder.SUGGESTED_PAGE_LIMIT)
.withBindVariableValue("id", placementId);
// Default for total result set size.
int totalResultSetSize = 0;
do {
// Get placements by statement.
PlacementPage page =
placementService.getPlacementsByStatement(statementBuilder.toStatement());
if (page.getResults() != null) {
totalResultSetSize = page.getTotalResultSetSize();
int i = page.getStartIndex();
for (Placement placement : page.getResults()) {
System.out.printf(
"%d) Placement with ID %d will be deactivated.%n", i++, placement.getId());
}
}
statementBuilder.increaseOffsetBy(StatementBuilder.SUGGESTED_PAGE_LIMIT);
} while (statementBuilder.getOffset() < totalResultSetSize);
System.out.printf("Number of placements to be deactivated: %d%n", totalResultSetSize);
if (totalResultSetSize > 0) {
// Remove limit and offset from statement.
statementBuilder.removeLimitAndOffset();
// Create action.
com.google.api.ads.admanager.axis.v201902.DeactivatePlacements action =
new com.google.api.ads.admanager.axis.v201902.DeactivatePlacements();
// Perform action.
UpdateResult result =
placementService.performPlacementAction(action, statementBuilder.toStatement());
if (result != null && result.getNumChanges() > 0) {
System.out.printf("Number of placements deactivated: %d%n", result.getNumChanges());
} else {
System.out.println("No placements were deactivated.");
}
}
}
|
java
|
public void setTags(java.util.Collection<TagRef> tags) {
if (tags == null) {
this.tags = null;
return;
}
this.tags = new java.util.ArrayList<TagRef>(tags);
}
|
java
|
public boolean containsMembers(Address ... mbrs) {
if(mbrs == null || members == null)
return false;
for(Address mbr: mbrs) {
if(!containsMember(mbr))
return false;
}
return true;
}
|
java
|
private void readCrlf() throws IOException {
final int crsymbol = this.origin.read();
final int lfsymbol = this.origin.read();
if (crsymbol != '\r' || lfsymbol != '\n') {
throw new IOException(
String.format(
"%s %d%s%d",
"CRLF expected at end of chunk: ",
crsymbol,
"/",
lfsymbol
)
);
}
}
|
java
|
public ServiceFuture<List<DetectorResponseInner>> listSiteDetectorResponsesSlotAsync(final String resourceGroupName, final String siteName, final String slot, final ListOperationCallback<DetectorResponseInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listSiteDetectorResponsesSlotSinglePageAsync(resourceGroupName, siteName, slot),
new Func1<String, Observable<ServiceResponse<Page<DetectorResponseInner>>>>() {
@Override
public Observable<ServiceResponse<Page<DetectorResponseInner>>> call(String nextPageLink) {
return listSiteDetectorResponsesSlotNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
}
|
java
|
public PExp caseABooleanPattern(ABooleanPattern node)
throws AnalysisException
{
ABooleanConstExp b = new ABooleanConstExp();
b.setValue(node.getValue().clone());
addPossibleType(b, node);
return b;
}
|
java
|
List<W3CEndpointReference> lookupEndpoints(QName serviceName,
MatcherDataType matcherData) throws ServiceLocatorFault,
InterruptedExceptionFault {
SLPropertiesMatcher matcher = createMatcher(matcherData);
List<String> names = null;
List<W3CEndpointReference> result = new ArrayList<W3CEndpointReference>();
String adress;
try {
initLocator();
if (matcher == null) {
names = locatorClient.lookup(serviceName);
} else {
names = locatorClient.lookup(serviceName, matcher);
}
} catch (ServiceLocatorException e) {
ServiceLocatorFaultDetail serviceFaultDetail = new ServiceLocatorFaultDetail();
serviceFaultDetail.setLocatorFaultDetail(serviceName.toString()
+ "throws ServiceLocatorFault");
throw new ServiceLocatorFault(e.getMessage(), serviceFaultDetail);
} catch (InterruptedException e) {
InterruptionFaultDetail interruptionFaultDetail = new InterruptionFaultDetail();
interruptionFaultDetail.setInterruptionDetail(serviceName
.toString() + "throws InterruptionFault");
throw new InterruptedExceptionFault(e.getMessage(),
interruptionFaultDetail);
}
if (names != null && !names.isEmpty()) {
for (int i = 0; i < names.size(); i++) {
adress = names.get(i);
result.add(buildEndpoint(serviceName, adress));
}
} else {
if (LOG.isLoggable(Level.WARNING)) {
LOG.log(Level.WARNING, "lookup Endpoints for " + serviceName
+ " failed, service is not known.");
}
ServiceLocatorFaultDetail serviceFaultDetail = new ServiceLocatorFaultDetail();
serviceFaultDetail.setLocatorFaultDetail("lookup Endpoint for "
+ serviceName + " failed, service is not known.");
throw new ServiceLocatorFault("Can not find Endpoint",
serviceFaultDetail);
}
return result;
}
|
python
|
def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs):
"""Evaluates the surfinBH7dq2 model.
"""
chiA = np.array(chiA)
chiB = np.array(chiB)
# Warn/Exit if extrapolating
allow_extrap = kwargs.pop('allow_extrap', False)
self._check_param_limits(q, chiA, chiB, allow_extrap)
omega0 = kwargs.pop('omega0', None)
PN_approximant = kwargs.pop('PN_approximant', 'SpinTaylorT4')
PN_dt = kwargs.pop('PN_dt', 0.1)
PN_spin_order = kwargs.pop('PN_spin_order', 7)
PN_phase_order = kwargs.pop('PN_phase_order', 7)
omega_switch = kwargs.pop('omega_switch', 0.018)
self._check_unused_kwargs(kwargs)
if omega0 is None:
# If omega0 is given, assume chiA, chiB are the coorbital frame
# spins at t=-100 M.
x = np.concatenate(([q], chiA, chiB))
else:
# If omega0 is given, evolve the spins from omega0
# to t = -100 M from the peak.
chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \
orbphase_fitnode \
= self._evolve_spins(q, chiA, chiB, omega0,
PN_approximant, PN_dt, PN_spin_order,
PN_phase_order, omega_switch)
# x should contain coorbital frame spins at t=-100M
x = np.concatenate(([q], chiA_coorb_fitnode, chiB_coorb_fitnode))
def eval_vector_fit(x, fit_key):
res = self._evaluate_fits(x, fit_key)
fit_val = res.T[0]
fit_err = res.T[1]
if omega0 is not None:
# If spins were given in inertial frame at omega0,
# transform vectors and errors back to the same frame.
fit_val = utils.transform_vector_coorb_to_inertial(fit_val,
orbphase_fitnode, quat_fitnode)
fit_err = utils.transform_error_coorb_to_inertial(fit_val,
fit_err, orbphase_fitnode, quat_fitnode)
return fit_val, fit_err
if fit_key == 'mf' or fit_key == 'all':
mf, mf_err = self._evaluate_fits(x, 'mf')
if fit_key == 'mf':
return mf, mf_err
if fit_key == 'chif' or fit_key == 'all':
chif, chif_err = eval_vector_fit(x, 'chif')
if fit_key == 'chif':
return chif, chif_err
if fit_key == 'vf' or fit_key == 'all':
vf, vf_err = eval_vector_fit(x, 'vf')
if fit_key == 'vf':
return vf, vf_err
if fit_key == 'all':
return mf, chif, vf, mf_err, chif_err, vf_err
|
java
|
public ConnectionOptions withUri(URI uri)
throws URISyntaxException, NoSuchAlgorithmException, KeyManagementException
{
factory.setUri(Assert.notNull(uri, "URI"));
return this;
}
|
java
|
protected void storeCommon(Activation a, XMLStreamWriter writer) throws Exception
{
if (a.getBeanValidationGroups() != null && !a.getBeanValidationGroups().isEmpty())
{
writer.writeStartElement(CommonXML.ELEMENT_BEAN_VALIDATION_GROUPS);
for (int i = 0; i < a.getBeanValidationGroups().size(); i++)
{
writer.writeStartElement(CommonXML.ELEMENT_BEAN_VALIDATION_GROUP);
writer.writeCharacters(a.getValue(CommonXML.ELEMENT_BEAN_VALIDATION_GROUP, Integer.toString(i),
a.getBeanValidationGroups().get(i)));
writer.writeEndElement();
}
writer.writeEndElement();
}
if (a.getBootstrapContext() != null)
{
writer.writeStartElement(CommonXML.ELEMENT_BOOTSTRAP_CONTEXT);
writer.writeCharacters(a.getValue(CommonXML.ELEMENT_BOOTSTRAP_CONTEXT, a.getBootstrapContext()));
writer.writeEndElement();
}
if (a.getConfigProperties() != null && !a.getConfigProperties().isEmpty())
{
Iterator<Map.Entry<String, String>> it = a.getConfigProperties().entrySet().iterator();
while (it.hasNext())
{
Map.Entry<String, String> entry = it.next();
writer.writeStartElement(CommonXML.ELEMENT_CONFIG_PROPERTY);
writer.writeAttribute(CommonXML.ATTRIBUTE_NAME, entry.getKey());
writer.writeCharacters(a.getValue(CommonXML.ELEMENT_CONFIG_PROPERTY, entry.getKey(), entry.getValue()));
writer.writeEndElement();
}
}
if (a.getTransactionSupport() != null)
{
writer.writeStartElement(CommonXML.ELEMENT_TRANSACTION_SUPPORT);
writer.writeCharacters(a.getValue(CommonXML.ELEMENT_TRANSACTION_SUPPORT,
a.getTransactionSupport().toString()));
writer.writeEndElement();
}
if (a.getWorkManager() != null)
storeWorkManager(a.getWorkManager(), writer);
if (a.getConnectionDefinitions() != null && !a.getConnectionDefinitions().isEmpty())
{
writer.writeStartElement(CommonXML.ELEMENT_CONNECTION_DEFINITIONS);
for (ConnectionDefinition cd : a.getConnectionDefinitions())
{
storeConnectionDefinition(cd, writer);
}
writer.writeEndElement();
}
if (a.getAdminObjects() != null && !a.getAdminObjects().isEmpty())
{
writer.writeStartElement(CommonXML.ELEMENT_ADMIN_OBJECTS);
for (AdminObject ao : a.getAdminObjects())
{
storeAdminObject(ao, writer);
}
writer.writeEndElement();
}
}
|
java
|
@Override
protected FileBasedReader<KV<K, V>> createSingleFileReader(PipelineOptions options) {
Set<String> serializationNames = Sets.newHashSet(
keySerializationClass.getName(),
valueSerializationClass.getName()
);
return new SeqFileReader<>(this, keyClass, valueClass,
serializationNames.toArray(new String[serializationNames.size()]));
}
|
python
|
def run(self):
"""Execute the process"""
env = dict(os.environ)
file_path = self.file.real_path
path_folders = self.pycore.project.get_source_folders() + \
self.pycore.project.get_python_path_folders()
env['PYTHONPATH'] = os.pathsep.join(folder.real_path
for folder in path_folders)
runmod_path = self.pycore.project.find_module('rope.base.oi.runmod').real_path
self.receiver = None
self._init_data_receiving()
send_info = '-'
if self.receiver:
send_info = self.receiver.get_send_info()
args = [sys.executable, runmod_path, send_info,
self.pycore.project.address, self.file.real_path]
if self.analyze_data is None:
del args[1:4]
if self.args is not None:
args.extend(self.args)
self.process = subprocess.Popen(
executable=sys.executable, args=args, env=env,
cwd=os.path.split(file_path)[0], stdin=self.stdin,
stdout=self.stdout, stderr=self.stdout, close_fds=os.name != 'nt')
|
python
|
def get_page(pno, zoom = False, max_size = None, first = False):
"""Return a PNG image for a document page number.
"""
dlist = dlist_tab[pno] # get display list of page number
if not dlist: # create if not yet there
dlist_tab[pno] = doc[pno].getDisplayList()
dlist = dlist_tab[pno]
r = dlist.rect # the page rectangle
clip = r
# ensure image fits screen:
# exploit, but do not exceed width or height
zoom_0 = 1
if max_size:
zoom_0 = min(1, max_size[0] / r.width, max_size[1] / r.height)
if zoom_0 == 1:
zoom_0 = min(max_size[0] / r.width, max_size[1] / r.height)
mat_0 = fitz.Matrix(zoom_0, zoom_0)
if not zoom: # show total page
pix = dlist.getPixmap(matrix = mat_0, alpha=False)
else:
mp = r.tl + (r.br - r.tl) * 0.5 # page rect center
w2 = r.width / 2
h2 = r.height / 2
clip = r * 0.5
tl = zoom[0] # old top-left
tl.x += zoom[1] * (w2 / 2)
tl.x = max(0, tl.x)
tl.x = min(w2, tl.x)
tl.y += zoom[2] * (h2 / 2)
tl.y = max(0, tl.y)
tl.y = min(h2, tl.y)
clip = fitz.Rect(tl, tl.x + w2, tl.y + h2)
mat = mat_0 * fitz.Matrix(2, 2) # zoom matrix
pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip)
if first: # first call: tkinter still inactive
img = pix.getPNGData() # so use fitz png output
else: # else take tk photo image
pilimg = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
img = ImageTk.PhotoImage(pilimg)
return img, clip.tl
|
python
|
def balance(self, account):
"""
Return the balance, a tuple with the eth and ocn balance.
:param account: Account instance to return the balance of
:return: Balance tuple of (eth, ocn)
"""
return Balance(self._keeper.get_ether_balance(account.address),
self._keeper.token.get_token_balance(account.address))
|
java
|
@Override
public void deserializeInstance(SerializationStreamReader streamReader, OWLEquivalentDataPropertiesAxiomImpl instance) throws SerializationException {
deserialize(streamReader, instance);
}
|
java
|
public void marshall(LoadBalancerTlsCertificateRenewalSummary loadBalancerTlsCertificateRenewalSummary, ProtocolMarshaller protocolMarshaller) {
if (loadBalancerTlsCertificateRenewalSummary == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(loadBalancerTlsCertificateRenewalSummary.getRenewalStatus(), RENEWALSTATUS_BINDING);
protocolMarshaller.marshall(loadBalancerTlsCertificateRenewalSummary.getDomainValidationOptions(), DOMAINVALIDATIONOPTIONS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
java
|
public SelectStatement getPreparedSelectStatement(Query query, ClassDescriptor cld)
{
SelectStatement sql = new SqlSelectStatement(m_platform, cld, query, logger);
if (logger.isDebugEnabled())
{
logger.debug("SQL:" + sql.getStatement());
}
return sql;
}
|
python
|
def labels(self):
"""Retrieve or set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
.. note::
The getter for this property returns a dict which is a *copy*
of the bucket's labels. Mutating that dict has no effect unless
you then re-assign the dict via the setter. E.g.:
>>> labels = bucket.labels
>>> labels['new_key'] = 'some-label'
>>> del labels['old_key']
>>> bucket.labels = labels
>>> bucket.update()
:setter: Set labels for this bucket.
:getter: Gets the labels for this bucket.
:rtype: :class:`dict`
:returns: Name-value pairs (string->string) labelling the bucket.
"""
labels = self._properties.get("labels")
if labels is None:
return {}
return copy.deepcopy(labels)
|
python
|
def run(config_file):
"""load the config, create a population, evolve and show the result"""
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
# Run for up to 300 generations.
pe = neat.ThreadedEvaluator(4, eval_genome)
winner = p.run(pe.evaluate, 300)
pe.stop()
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against training data.
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print(
"input {!r}, expected output {!r}, got {!r}".format(xi, xo, output)
)
if visualize is not None:
node_names = {-1: 'A', -2: 'B', 0: 'A XOR B'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
|
python
|
def connect_success(self, connection_id):
"""
Check to see if the successful connection is meant to be peered with.
If not, it should be used to get the peers from the endpoint.
"""
endpoint = self._network.connection_id_to_endpoint(connection_id)
endpoint_info = self._temp_endpoints.get(endpoint)
LOGGER.debug("Endpoint has completed authorization: %s (id: %s)",
endpoint,
connection_id)
if endpoint_info is None:
LOGGER.debug("Received unknown endpoint: %s", endpoint)
elif endpoint_info.status == EndpointStatus.PEERING:
self._connect_success_peering(connection_id, endpoint)
elif endpoint_info.status == EndpointStatus.TOPOLOGY:
self._connect_success_topology(connection_id)
else:
LOGGER.debug("Endpoint has unknown status: %s", endpoint)
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
|
java
|
private static boolean checkAnnotation(AnnotationsAttribute invisible, AnnotationsAttribute visible,
String annotationName) {
boolean exist1 = false;
boolean exist2 = false;
if (invisible != null) {
exist1 = invisible.getAnnotation(annotationName) != null;
}
if (visible != null) {
exist2 = visible.getAnnotation(annotationName) != null;
}
return exist1 || exist2;
}
|
java
|
public synchronized void removeDeferredReferenceData(DeferredReferenceData refData) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) {
Tr.entry(tc, "removeDeferredReferenceData", "this=" + this, refData);
}
if (deferredReferenceDatas != null) {
deferredReferenceDatas.remove(refData);
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) {
Tr.exit(tc, "removeDeferredReferenceData");
}
}
|
python
|
def get_quote_or_rt_text(tweet):
"""
Get the quoted or retweeted text in a Tweet
(this is not the text entered by the posting user)
- tweet: empty string (there is no quoted or retweeted text)
- quote: only the text of the quoted Tweet
- retweet: the text of the retweet
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: text of the retweeted-tweet or the quoted-tweet
(empty string if this is an original Tweet)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_quote_or_rt_text
>>> # a quote tweet
>>> quote = {"created_at": "Wed May 24 20:17:19 +0000 2017",
... "text": "adding my own commentary",
... "truncated": False,
... "quoted_status": {
... "created_at": "Mon May 01 05:00:05 +0000 2017",
... "truncated": False,
... "text": "an interesting Tweet"
... }
... }
>>> get_quote_or_rt_text(quote)
'an interesting Tweet'
"""
tweet_type = get_tweet_type(tweet)
if tweet_type == "tweet":
return ""
if tweet_type == "quote":
if is_original_format(tweet):
return get_full_text(tweet["quoted_status"])
else:
return get_full_text(tweet["twitter_quoted_status"])
if tweet_type == "retweet":
if is_original_format(tweet):
return get_full_text(tweet["retweeted_status"])
else:
return get_full_text(tweet["object"])
|
python
|
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii += 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
|
python
|
def _nemo_accpars(self,vo,ro):
"""
NAME:
_nemo_accpars
PURPOSE:
return the accpars potential parameters for use of this potential with NEMO
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
accpars string
HISTORY:
2014-12-18 - Written - Bovy (IAS)
"""
ampl= self._amp*vo**2.*ro**(self.alpha-2.)
return "0,%s,%s,%s" % (ampl,self.alpha,self.rc*ro)
|
java
|
public int read(byte[] b, int off, int len) throws IOException {
int c = super.read(b, off, len);
if( c == -1 && filenames.hasNext() ) {
cueStream();
return read(b,off,len);
}
return c;
}
|
python
|
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
attr = '{type}_from_string'.format(type=msgtype)
_func = getattr(saml, attr, None)
_func = getattr(samlp, attr, _func)
msg = _func(decoded_xml)
if not msg:
raise TypeError('Not a {type}'.format(type=msgtype))
if not msg.signature:
if must:
err_msg = 'Required signature missing on {type}'
err_msg = err_msg.format(type=msgtype)
raise SignatureError(err_msg)
else:
return msg
return self._check_signature(
decoded_xml,
msg,
class_name(msg),
origdoc,
must=must,
only_valid_cert=only_valid_cert)
|
java
|
public View waitForView(Object tag, int index, int timeout, boolean scroll){
//Because https://github.com/android/platform_frameworks_base/blob/master/core/java/android/view/View.java#L17005-L17007
if(tag == null) {
return null;
}
Set<View> uniqueViewsMatchingId = new HashSet<View>();
long endTime = SystemClock.uptimeMillis() + timeout;
while (SystemClock.uptimeMillis() <= endTime) {
sleeper.sleep();
for (View view : viewFetcher.getAllViews(false)) {
if (tag.equals(view.getTag())) {
uniqueViewsMatchingId.add(view);
if(uniqueViewsMatchingId.size() > index) {
return view;
}
}
}
if(scroll) {
scroller.scrollDown();
}
}
return null;
}
|
python
|
def get_editor_widget(self, request, plugins, plugin):
"""
Returns the Django form Widget to be used for
the text area
"""
cancel_url_name = self.get_admin_url_name('delete_on_cancel')
cancel_url = reverse('admin:%s' % cancel_url_name)
render_plugin_url_name = self.get_admin_url_name('render_plugin')
render_plugin_url = reverse('admin:%s' % render_plugin_url_name)
action_token = self.get_action_token(request, plugin)
# should we delete the text plugin when
# the user cancels?
delete_text_on_cancel = (
'delete-on-cancel' in request.GET and # noqa
not plugin.get_plugin_instance()[0]
)
widget = TextEditorWidget(
installed_plugins=plugins, pk=plugin.pk,
placeholder=plugin.placeholder,
plugin_language=plugin.language,
configuration=self.ckeditor_configuration,
render_plugin_url=render_plugin_url,
cancel_url=cancel_url,
action_token=action_token,
delete_on_cancel=delete_text_on_cancel,
)
return widget
|
java
|
public void unintern(InternTable table)
{
// This implementation is inherited by Identifier and Literal which have no children,
// but is overridden in Operator, which does.
refCount--;
if (refCount < 0)
throw new IllegalStateException();
if (refCount == 0)
{
Object res = table.remove(this);
if (res == null)
throw new IllegalStateException();
}
}
|
python
|
def scenario(ctx, dependency_name, driver_name, lint_name, provisioner_name,
role_name, scenario_name, verifier_name): # pragma: no cover
""" Initialize a new scenario for use with Molecule. """
command_args = {
'dependency_name': dependency_name,
'driver_name': driver_name,
'lint_name': lint_name,
'provisioner_name': provisioner_name,
'role_name': role_name,
'scenario_name': scenario_name,
'subcommand': __name__,
'verifier_name': verifier_name,
}
if verifier_name == 'inspec':
command_args['verifier_lint_name'] = 'rubocop'
if verifier_name == 'goss':
command_args['verifier_lint_name'] = 'yamllint'
if verifier_name == 'ansible':
command_args['verifier_lint_name'] = 'ansible-lint'
s = Scenario(command_args)
s.execute()
|
java
|
public void set(String key, String value) {
cache.put(key, value);
user.sendGlobal("JWWF-storageSet", "{\"key\":" + Json.escapeString(key) + ",\"value\":" + Json.escapeString(value) + "}");
}
|
python
|
def dict_value_hint(key, mapper=None):
"""Returns a function that takes a dictionary and returns value of
particular key. The returned value can be optionally processed by `mapper`
function.
To be used as a type hint in :class:`OneOf`.
"""
if mapper is None:
mapper = identity
def hinter(data):
return mapper(data.get(key))
return hinter
|
python
|
def create_wiki(self, wiki_create_params, project=None):
"""CreateWiki.
Creates the wiki resource.
:param :class:`<WikiCreateParametersV2> <azure.devops.v5_0.wiki.models.WikiCreateParametersV2>` wiki_create_params: Parameters for the wiki creation.
:param str project: Project ID or project name
:rtype: :class:`<WikiV2> <azure.devops.v5_0.wiki.models.WikiV2>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(wiki_create_params, 'WikiCreateParametersV2')
response = self._send(http_method='POST',
location_id='288d122c-dbd4-451d-aa5f-7dbbba070728',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('WikiV2', response)
|
java
|
public RelationshipPrefetcher createRelationshipPrefetcher(ClassDescriptor anOwnerCld, String aRelationshipName)
{
ObjectReferenceDescriptor ord;
ord = anOwnerCld.getCollectionDescriptorByName(aRelationshipName);
if (ord == null)
{
ord = anOwnerCld.getObjectReferenceDescriptorByName(aRelationshipName);
if (ord == null)
{
throw new PersistenceBrokerException("Relationship named '" + aRelationshipName
+ "' not found in owner class " + (anOwnerCld != null ? anOwnerCld.getClassNameOfObject() : null));
}
}
return createRelationshipPrefetcher(ord);
}
|
python
|
def iteritems(self):
"""Iterator across all the non-duplicate keys and their values.
Only yields the first key of duplicates.
"""
keys_yielded = set()
for k, v in self._pairs:
if k not in keys_yielded:
keys_yielded.add(k)
yield k, v
|
python
|
def resnet_imagenet_34_td_unit_no_drop():
"""Set of hyperparameters."""
hp = resnet_imagenet_34()
hp.use_td = "unit"
hp.targeting_rate = 0.0
hp.keep_prob = 1.0
return hp
|
java
|
public boolean setAttributes(IAttributeStore attributes) {
int successes = 0;
for (Map.Entry<String, Object> entry : attributes.getAttributes().entrySet()) {
if (scope.setAttribute(entry.getKey(), entry.getValue())) {
successes++;
}
}
// expect every value to have been added
return (successes == attributes.size());
}
|
python
|
def rank_velocity_genes(data, vkey='velocity', n_genes=10, groupby=None, match_with=None, resolution=None,
min_counts=None, min_r2=None, min_dispersion=None, copy=False):
"""Rank genes for velocity characterizing groups.
Arguments
----------
data : :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Key of velocities computed in `tl.velocity`
n_genes : `int`, optional (default: 100)
The number of genes that appear in the returned tables.
groupby: `str`, `list` or `np.ndarray` (default: `None`)
Key of observations grouping to consider.
min_counts: `float` (default: None)
Minimum count of genes for consideration.
min_r2: `float` (default: None)
Minimum r2 value of genes for consideration.
min_dispersion: `float` (default: None)
Minimum dispersion norm value of genes for consideration.
copy: `bool` (default: `False`)
Return a copy instead of writing to data.
Returns
-------
Returns or updates `data` with the attributes
rank_velocity_genes : `.uns`
Structured array to be indexed by group id storing the gene
names. Ordered according to scores.
velocity_score : `.var`
Storing the score for each gene for each group. Ordered according to scores.
"""
adata = data.copy() if copy else data
if groupby is None or groupby == 'velocity_clusters':
velocity_clusters(adata, vkey=vkey, match_with=match_with, resolution=resolution)
groupby = 'velocity_clusters'
logg.info('ranking velocity genes', r=True)
tmp_filter = np.ones(adata.n_vars, dtype=bool)
if vkey + '_genes' in adata.var.keys():
tmp_filter &= adata.var[vkey + '_genes']
if 'unspliced' in adata.layers.keys():
n_counts = (adata.layers['unspliced'] > 0).sum(0)
n_counts = n_counts.A1 if issparse(adata.layers['unspliced']) else n_counts
min_counts = min(50, np.percentile(n_counts, 50)) if min_counts is None else min_counts
tmp_filter &= (n_counts > min_counts)
if 'r2' in adata.var.keys():
r2 = adata.var.velocity_r2
min_r2 = .1 if min_r2 is None else min_r2 # np.percentile(r2[r2 > 0], 50)
tmp_filter &= (r2 > min_r2)
if 'dispersions_norm' in adata.var.keys():
dispersions = adata.var.dispersions_norm
min_dispersion = 0 if min_dispersion is None else min_dispersion # np.percentile(dispersions, 20)
tmp_filter &= (dispersions > min_dispersion)
X = adata[:, tmp_filter].layers[vkey]
groups, groups_masks = select_groups(adata[:, tmp_filter], key=groupby)
n_groups = groups_masks.shape[0]
sizes = groups_masks.sum(1)
mean, var = np.zeros((n_groups, X.shape[1])), np.zeros((n_groups, X.shape[1]))
for i, mask in enumerate(groups_masks): mean[i], var[i] = get_mean_var(X[mask])
# test each against the union of all other groups
rankings_gene_names, rankings_gene_scores, indices = [], [], []
for i in range(n_groups):
mask_rest = ~groups_masks[i]
mean_rest, var_rest = get_mean_var(X[mask_rest])
size_rest = sizes[i] # else mask_rest.sum() if method == 't-test'
scores = (mean[i] - mean_rest) / np.sqrt(var[i] / sizes[i] + var_rest / size_rest)
scores = np.nan_to_num(scores)
# equivalent to but much faster than np.argsort(scores)[-10:]
if n_genes > X.shape[1]: n_genes = X.shape[1]
idx = np.argpartition(scores, -n_genes)[-n_genes:]
idx = idx[np.argsort(scores[idx])[::-1]]
rankings_gene_names.append(adata[:, tmp_filter].var_names[idx].values)
rankings_gene_scores.append(scores[idx])
rankings_gene_names = np.array([list(n) for n in rankings_gene_names])
rankings_gene_scores = np.array([list(n) for n in rankings_gene_scores])
all_names = rankings_gene_names.T.flatten()
all_scores = rankings_gene_scores.T.flatten()
vscore = np.zeros(adata.n_vars, dtype=np.int)
for i, name in enumerate(adata.var_names):
if name in all_names: vscore[i] = all_scores[np.where(name == all_names)[0][0]]
adata.var['velocity_score'] = vscore
key = 'rank_velocity_genes'
if key not in adata.uns.keys(): adata.uns[key] = {}
#adata.uns[key] = {'groups': groups, 'names': rankings_gene_names, 'scores': rankings_gene_scores.round(0)}
adata.uns[key] = \
{'names': np.rec.fromarrays([n for n in rankings_gene_names], dtype=[(rn, 'U50') for rn in groups]),
'scores': np.rec.fromarrays([n.round(2) for n in rankings_gene_scores], dtype=[(rn, 'float32') for rn in groups]),
'params': {'groupby': groupby, 'reference': 'rest', 'method': 't-test_overestim_var', 'use_raw': True}}
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'' + key + '\', sorted scores by group ids (adata.uns)')
return adata if copy else None
|
python
|
def not_query(expression):
"""Apply logical not operator to expression."""
compiled_expression = compile_query(expression)
def _not(index, expression=compiled_expression):
"""Return store key for documents that satisfy expression."""
all_keys = index.get_all_keys()
returned_keys = expression(index)
return [key for key in all_keys if key not in returned_keys]
return _not
|
java
|
static boolean doesParameterTypesMatchForVarArgsInvocation(boolean isVarArgs, Class<?>[] parameterTypes,
Object[] arguments) {
if (isVarArgs && arguments != null && arguments.length >= 1 && parameterTypes != null
&& parameterTypes.length >= 1) {
final Class<?> componentType = parameterTypes[parameterTypes.length - 1].getComponentType();
final Object lastArgument = arguments[arguments.length - 1];
if (lastArgument != null) {
final Class<?> lastArgumentTypeAsPrimitive = getTypeAsPrimitiveIfWrapped(lastArgument);
final Class<?> varArgsParameterTypeAsPrimitive = getTypeAsPrimitiveIfWrapped(componentType);
isVarArgs = varArgsParameterTypeAsPrimitive.isAssignableFrom(lastArgumentTypeAsPrimitive);
}
}
return isVarArgs && checkArgumentTypesMatchParameterTypes(isVarArgs, parameterTypes, arguments);
}
|
python
|
def get_next_objective(self):
"""Gets the next Objective in this list.
return: (osid.learning.Objective) - the next Objective in this
list. The has_next() method should be used to test that
a next Objective is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
"""
try:
next_object = next(self)
except StopIteration:
raise IllegalState('no more elements available in this list')
except Exception: # Need to specify exceptions here!
raise OperationFailed()
else:
return next_object
|
python
|
def add_profile_point(self,
value,
source='',
reference='',
method='',
ticket='',
campaign=None,
confidence=None,
bucket_list=[]):
"""
Add an indicator to CRITs
Args:
value: The profile point itself
source: Source of the information
reference: A reference where more information can be found
method: The method for adding this indicator
campaign: If the indicator has a campaign, add it here
confidence: The confidence this indicator belongs to the given
campaign
bucket_list: Bucket list items for this indicator
ticket: A ticket associated with this indicator
Returns:
JSON object for the indicator or None if it failed.
"""
# Time to upload these indicators
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': '',
'campaign': campaign,
'confidence': confidence,
'bucket_list': ','.join(bucket_list),
'ticket': ticket,
'value': value,
}
r = requests.post("{0}/profile_points/".format(self.url), data=data,
verify=self.verify, proxies=self.proxies)
if r.status_code == 200:
log.debug("Profile Point uploaded successfully - {}".format(value))
pp = json.loads(r.text)
return pp
return None
|
java
|
public void commitStacktraceMD5() throws IOException, ServiceException {
listEntry.getCustomElements().setValueLocal(AcraReportHeader.STACK_TRACE_MD5.tagName(),
getStacktraceMD5());
listEntry.update();
}
|
python
|
def items(self):
"""Return result values"""
if self._result_cache:
return self._result_cache.items
return self.all().items
|
python
|
def find_rt_jar(javahome=None):
"""Find the path to the Java standard library jar.
The jar is expected to exist at the path 'jre/lib/rt.jar' inside a
standard Java installation directory. The directory is found using
the following procedure:
1. If the javehome argument is provided, use the value as the
directory.
2. If the JAVA_HOME environment variable is set, use the value as
the directory.
3. Find the location of the ``java`` binary in the current PATH and
compute the installation directory from this location.
Args:
javahome: A path to a Java installation directory (optional).
"""
if not javahome:
if 'JAVA_HOME' in os.environ:
javahome = os.environ['JAVA_HOME']
elif sys.platform == 'darwin':
# The default java binary on OS X is not part of a standard Oracle
# install, so building paths relative to it does not work like it
# does on other platforms.
javahome = _find_osx_javahome()
else:
javahome = _get_javahome_from_java(_find_java_binary())
rtpath = os.path.join(javahome, 'jre', 'lib', 'rt.jar')
if not os.path.isfile(rtpath):
msg = 'Could not find rt.jar: {} is not a file'.format(rtpath)
raise ExtensionError(msg)
return rtpath
|
java
|
private void processFailedBlocks(Block []failed,
int failedPendingRequests) {
synchronized (receivedAndDeletedBlockList) {
// We are adding to the front of a linked list and hence to preserve
// order we should add the blocks in the reverse order.
for (int i = failed.length - 1; i >= 0; i--) {
receivedAndDeletedBlockList.add(0, failed[i]);
}
pendingReceivedRequests += failedPendingRequests;
}
}
|
java
|
public boolean abuts(Interval other) {
Objects.requireNonNull(other, "other");
return end.equals(other.start) ^ start.equals(other.end);
}
|
java
|
public static CommercePriceList fetchByCompanyId_First(long companyId,
OrderByComparator<CommercePriceList> orderByComparator) {
return getPersistence()
.fetchByCompanyId_First(companyId, orderByComparator);
}
|
java
|
private int countLines() {
InputStream is = IO.buffered(IO.inputStream(file));
try {
byte[] c = new byte[1024];
int readChars = is.read(c);
if (readChars == -1) {
// bail out if nothing to read
return 0;
}
// make it easy for the optimizer to tune this loop
int count = 1;
while (readChars == 1024) {
for (int i = 0; i < 1024; ) {
if (c[i++] == '\n') {
++count;
}
}
readChars = is.read(c);
}
// count remaining characters
while (readChars != -1) {
for (int i = 0; i < readChars; ++i) {
if (c[i] == '\n') {
++count;
}
}
readChars = is.read(c);
}
return count;
} catch (IOException e) {
throw E.ioException(e);
} finally {
IO.close(is);
}
}
|
java
|
public Observable<DiskInner> beginCreateOrUpdateAsync(String resourceGroupName, String diskName, DiskInner disk) {
return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, diskName, disk).map(new Func1<ServiceResponse<DiskInner>, DiskInner>() {
@Override
public DiskInner call(ServiceResponse<DiskInner> response) {
return response.body();
}
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.