language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
java
|
private FormLayout initForm() {
FormLayout form = new FormLayout();
form.setWidth("100%");
m_dateField = new CmsDateField();
m_dateField.setCaption(
CmsVaadinUtils.getMessageText(org.opencms.workplace.commons.Messages.GUI_LABEL_DATE_PUBLISH_SCHEDULED_0));
form.addComponent(m_dateField);
m_includeSubResources = new CheckBox(
CmsVaadinUtils.getMessageText(org.opencms.workplace.commons.Messages.GUI_PUBLISH_MULTI_SUBRESOURCES_0));
if (hasFolders()) {
form.addComponent(m_includeSubResources);
}
return form;
}
|
python
|
def load_sgems_exp_var(filename):
""" read an SGEM experimental variogram into a sequence of
pandas.DataFrames
Parameters
----------
filename : (str)
an SGEMS experimental variogram XML file
Returns
-------
dfs : list
a list of pandas.DataFrames of x, y, pairs for each
division in the experimental variogram
"""
assert os.path.exists(filename)
import xml.etree.ElementTree as etree
tree = etree.parse(filename)
root = tree.getroot()
dfs = {}
for variogram in root:
#print(variogram.tag)
for attrib in variogram:
#print(attrib.tag,attrib.text)
if attrib.tag == "title":
title = attrib.text.split(',')[0].split('=')[-1]
elif attrib.tag == "x":
x = [float(i) for i in attrib.text.split()]
elif attrib.tag == "y":
y = [float(i) for i in attrib.text.split()]
elif attrib.tag == "pairs":
pairs = [int(i) for i in attrib.text.split()]
for item in attrib:
print(item,item.tag)
df = pd.DataFrame({"x":x,"y":y,"pairs":pairs})
df.loc[df.y<0.0,"y"] = np.NaN
dfs[title] = df
return dfs
|
python
|
def gen_df(cls, options, width, spread_type="call", spread_kind="buy"):
"""
Generate Pandas Dataframe of Vertical
:param options: python dict of options.
:param width: offset for spread. Must be integer.
:param spread_type: call or put. defaults to "call".
:param spread_kind: buy or sell. defaults to "buy".
"""
assert type(width) is int
assert spread_type in ["call", "put"]
assert spread_kind in ["buy", "sell"]
# get CALLs or PUTs
options = list(filter(lambda x: x["type"] == spread_type, options))
coef = (1 if spread_type == "put" else -1)
shift = width * coef
df = pd.DataFrame.from_dict(options)
df['expiration_date'] = pd.to_datetime(
df['expiration_date'], format="%Y-%m-%d")
df['adjusted_mark_price'] = pd.to_numeric(df['adjusted_mark_price'])
df['strike_price'] = pd.to_numeric(df['strike_price'])
df.sort_values(["expiration_date", "strike_price"], inplace=True)
for k, v in df.groupby("expiration_date"):
sdf = v.shift(shift)
df.loc[v.index, "strike_price_shifted"] = sdf["strike_price"]
df.loc[v.index, "delta_shifted"] = sdf["delta"]
df.loc[v.index, "volume_shifted"] = sdf["volume"]
df.loc[v.index, "open_interest_shifted"] = sdf["open_interest"]
df.loc[v.index, "instrument_shifted"] = sdf["instrument"]
df.loc[v.index, "adjusted_mark_price_shift"] = \
sdf["adjusted_mark_price"]
if spread_kind == "sell":
df.loc[v.index, "margin"] = \
abs(sdf["strike_price"] - v["strike_price"])
else:
df.loc[v.index, "margin"] = 0.0
if spread_kind == "buy":
df.loc[v.index, "premium_adjusted_mark_price"] = (
v["adjusted_mark_price"] - sdf["adjusted_mark_price"])
elif spread_kind == "sell":
df.loc[v.index, "premium_adjusted_mark_price"] = (
sdf["adjusted_mark_price"] - v["adjusted_mark_price"])
return df
|
python
|
def _populate_common_request(self, request):
'''Populate the Request with common fields.'''
url_record = self._item_session.url_record
# Note that referrer may have already been set by the --referer option
if url_record.parent_url and not request.fields.get('Referer'):
self._add_referrer(request, url_record)
if self._fetch_rule.http_login:
request.username, request.password = self._fetch_rule.http_login
|
python
|
def addStreamHandler(self,lvl=20):
"""
This function will add a stream handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the screen with
the stream handler, default = 20.
"""
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(lvl)
sFrmt = logging.Formatter('%(message)s')
if False:
#Another format example
sFrmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
sh.setFormatter(sFrmt)
self.addHandler(sh)
|
java
|
private static String toXMLString(Collection<TypedDependency> dependencies) {
StringBuilder buf = new StringBuilder("<dependencies style=\"typed\">\n");
for (TypedDependency td : dependencies) {
String reln = td.reln().toString();
String gov = td.gov().value();
int govIdx = td.gov().index();
String dep = td.dep().value();
int depIdx = td.dep().index();
boolean extra = td.extra();
// add an attribute if the node is a copy
// (this happens in collapsing when different prepositions are conjuncts)
String govCopy = "";
Integer copyGov = td.gov().label.get(CopyAnnotation.class);
if (copyGov != null) {
govCopy = " copy=\"" + copyGov + "\"";
}
String depCopy = "";
Integer copyDep = td.dep().label.get(CopyAnnotation.class);
if (copyDep != null) {
depCopy = " copy=\"" + copyDep + "\"";
}
// add an attribute if the typed dependency is an extra relation (do not preserve the tree structure)
String extraAttr = "";
if (extra) {
extraAttr = " extra=\"yes\"";
}
buf.append(" <dep type=\"").append(XMLUtils.escapeXML(reln)).append('\"').append(extraAttr).append(">\n");
buf.append(" <governor idx=\"").append(govIdx).append('\"').append(govCopy).append('>').append(XMLUtils.escapeXML(gov)).append("</governor>\n");
buf.append(" <dependent idx=\"").append(depIdx).append('\"').append(depCopy).append('>').append(XMLUtils.escapeXML(dep)).append("</dependent>\n");
buf.append(" </dep>\n");
}
buf.append("</dependencies>");
return buf.toString();
}
|
java
|
public synchronized void removeObserver(final ApptentiveNotificationObserver observer) {
for (ApptentiveNotificationObserverList observers : observerListLookup.values()) {
observers.removeObserver(observer);
}
}
|
python
|
def as_json_range(self, name):
"""Represent the parameter range as a dictionary suitable for a request to
create an Amazon SageMaker hyperparameter tuning job using one of the deep learning frameworks.
The deep learning framework images require that hyperparameters be serialized as JSON.
Args:
name (str): The name of the hyperparameter.
Returns:
dict[str, list[str]]: A dictionary that contains the name and values of the hyperparameter,
where the values are serialized as JSON.
"""
return {'Name': name, 'Values': [json.dumps(v) for v in self.values]}
|
python
|
def Runtime_compileScript(self, expression, sourceURL, persistScript, **kwargs
):
"""
Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression.
"""
assert isinstance(expression, (str,)
), "Argument 'expression' must be of type '['str']'. Received type: '%s'" % type(
expression)
assert isinstance(sourceURL, (str,)
), "Argument 'sourceURL' must be of type '['str']'. Received type: '%s'" % type(
sourceURL)
assert isinstance(persistScript, (bool,)
), "Argument 'persistScript' must be of type '['bool']'. Received type: '%s'" % type(
persistScript)
expected = ['executionContextId']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['executionContextId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Runtime.compileScript',
expression=expression, sourceURL=sourceURL, persistScript=
persistScript, **kwargs)
return subdom_funcs
|
java
|
@Override
public void execute(IntuitMessage intuitMessage) throws FMSException {
LOG.debug("Enter CompressionInterceptor...");
String serializedData = intuitMessage.getRequestElements().getSerializedData();
if (StringUtils.hasText(serializedData)) {
byte[] compressedData = null;
String compressFormat = Config.getProperty(Config.COMPRESSION_REQUEST_FORMAT);
LOG.info("compression format : " + compressFormat);
byte[] uploadFile = intuitMessage.getRequestElements().getUploadFile();
if (StringUtils.hasText(compressFormat)) {
// compress the body data
ICompressor compressor = CompressorFactory.getCompressor(compressFormat);
compressedData = compressor.compress(serializedData, uploadFile);
} else {
if (uploadFile != null) {
byte[] serializedDateByte = serializedData.getBytes();
compressedData = new byte[serializedDateByte.length + uploadFile.length];
// construct the new byte[] by concatenating serializedData and uploadFile data
System.arraycopy(serializedDateByte, 0, compressedData, 0, serializedDateByte.length);
System.arraycopy(uploadFile, 0, compressedData, serializedDateByte.length, uploadFile.length);
} else {
compressedData = serializedData.getBytes();
}
}
// set the compressed data
intuitMessage.getRequestElements().setCompressedData(compressedData);
}
LOG.debug("Exit CompressionInterceptor.");
}
|
python
|
def iterGrid(self, minZoom, maxZoom):
"Yields the tileBounds, zoom, tileCol and tileRow"
assert minZoom in range(0, len(self.RESOLUTIONS))
assert maxZoom in range(0, len(self.RESOLUTIONS))
assert minZoom <= maxZoom
for zoom in xrange(minZoom, maxZoom + 1):
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
for row in xrange(minRow, maxRow + 1):
for col in xrange(minCol, maxCol + 1):
tileBounds = self.tileBounds(zoom, col, row)
yield (tileBounds, zoom, col, row)
|
python
|
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value (bytes): In structs, the user can assign other value instead
of this class' instance. Here, in such cases, ``self`` is a
class attribute of the struct.
Returns:
int: The address size in bytes.
"""
if value is None:
value = self._value
if hasattr(value, 'get_size'):
return value.get_size()
return len(self.pack(value))
|
java
|
@Override
public void writeXML(Document xml) {
writeAMF3();
buf.put(AMF3.TYPE_XML);
if (hasReference(xml)) {
putInteger(getReferenceId(xml) << 1);
return;
}
final byte[] encoded = encodeString(XMLUtils.docToString(xml));
putInteger(encoded.length << 1 | 1);
buf.put(encoded);
storeReference(xml);
}
|
java
|
public static int getLastIndexOfIgnoreCase (@Nullable final String sText,
final char cSearch,
@Nonnull final Locale aSortLocale)
{
return sText != null && sText.length () >= 1
? sText.toLowerCase (aSortLocale)
.lastIndexOf (Character.toLowerCase (cSearch))
: STRING_NOT_FOUND;
}
|
java
|
public Result run(Database db, Relation<O> relation) {
DistanceQuery<O> dq = db.getDistanceQuery(relation, getDistanceFunction());
ArrayDBIDs ids = DBIDUtil.ensureArray(relation.getDBIDs());
final int size = ids.size();
if(size > 0x10000) {
throw new AbortException("This implementation does not scale to data sets larger than " + 0x10000 + " instances (~17 GB RAM), which results in an integer overflow.");
}
if(Linkage.SINGLE.equals(linkage)) {
LOG.verbose("Notice: SLINK is a much faster algorithm for single-linkage clustering!");
}
// Compute the initial (lower triangular) distance matrix.
double[] scratch = new double[triangleSize(size)];
DBIDArrayIter ix = ids.iter(), iy = ids.iter();
// Position counter - must agree with computeOffset!
int pos = 0;
boolean square = Linkage.WARD.equals(linkage) && !getDistanceFunction().isSquared();
for(int x = 0; ix.valid(); x++, ix.advance()) {
iy.seek(0);
for(int y = 0; y < x; y++, iy.advance()) {
scratch[pos] = dq.distance(ix, iy);
// Ward uses variances -- i.e. squared values
if(square) {
scratch[pos] *= scratch[pos];
}
pos++;
}
}
// Initialize space for result:
double[] height = new double[size];
Arrays.fill(height, Double.POSITIVE_INFINITY);
// Parent node, to track merges
// have every object point to itself initially
ArrayModifiableDBIDs parent = DBIDUtil.newArray(ids);
// Active clusters, when not trivial.
Int2ReferenceMap<ModifiableDBIDs> clusters = new Int2ReferenceOpenHashMap<>();
// Repeat until everything merged, except the desired number of clusters:
final int stop = size - numclusters;
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Agglomerative clustering", stop, LOG) : null;
for(int i = 0; i < stop; i++) {
double min = Double.POSITIVE_INFINITY;
int minx = -1, miny = -1;
for(int x = 0; x < size; x++) {
if(height[x] < Double.POSITIVE_INFINITY) {
continue;
}
final int xbase = triangleSize(x);
for(int y = 0; y < x; y++) {
if(height[y] < Double.POSITIVE_INFINITY) {
continue;
}
final int idx = xbase + y;
if(scratch[idx] < min) {
min = scratch[idx];
minx = x;
miny = y;
}
}
}
assert (minx >= 0 && miny >= 0);
// Avoid allocating memory, by reusing existing iterators:
ix.seek(minx);
iy.seek(miny);
// Perform merge in data structure: x -> y
// Since y < x, prefer keeping y, dropping x.
height[minx] = min;
parent.set(minx, iy);
// Merge into cluster
ModifiableDBIDs cx = clusters.get(minx);
ModifiableDBIDs cy = clusters.get(miny);
int sizex = 1, sizey = 1; // cluster sizes, for averaging
if(cy == null) {
cy = DBIDUtil.newHashSet();
cy.add(iy);
}
else {
sizey = cy.size();
}
if(cx == null) {
cy.add(ix);
}
else {
sizex = cx.size();
cy.addDBIDs(cx);
clusters.remove(minx);
}
clusters.put(miny, cy);
// Update distance matrix. Note: miny < minx
// Implementation note: most will not need sizej, and could save the
// hashmap lookup.
final int xbase = triangleSize(minx), ybase = triangleSize(miny);
// Write to (y, j), with j < y
for(int j = 0; j < miny; j++) {
if(height[j] < Double.POSITIVE_INFINITY) {
continue;
}
final DBIDs idsj = clusters.get(j);
final int sizej = (idsj == null) ? 1 : idsj.size();
scratch[ybase + j] = linkage.combine(sizex, scratch[xbase + j], sizey, scratch[ybase + j], sizej, min);
}
// Write to (j, y), with y < j < x
for(int j = miny + 1; j < minx; j++) {
if(height[j] < Double.POSITIVE_INFINITY) {
continue;
}
final int jbase = triangleSize(j);
final DBIDs idsj = clusters.get(j);
final int sizej = (idsj == null) ? 1 : idsj.size();
scratch[jbase + miny] = linkage.combine(sizex, scratch[xbase + j], sizey, scratch[jbase + miny], sizej, min);
}
// Write to (j, y), with y < x < j
for(int j = minx + 1; j < size; j++) {
if(height[j] < Double.POSITIVE_INFINITY) {
continue;
}
final DBIDs idsj = clusters.get(j);
final int sizej = (idsj == null) ? 1 : idsj.size();
final int jbase = triangleSize(j);
scratch[jbase + miny] = linkage.combine(sizex, scratch[jbase + minx], sizey, scratch[jbase + miny], sizej, min);
}
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
// Build the clustering result
final Clustering<Model> dendrogram = new Clustering<>("Hierarchical-Clustering", "hierarchical-clustering");
for(int x = 0; x < size; x++) {
if(height[x] < Double.POSITIVE_INFINITY) {
DBIDs cids = clusters.get(x);
if(cids == null) {
ix.seek(x);
cids = DBIDUtil.deref(ix);
}
Cluster<Model> cluster = new Cluster<>("Cluster", cids);
dendrogram.addToplevelCluster(cluster);
}
}
return dendrogram;
}
|
python
|
def _set_process_list(self, v, load=False):
"""
Setter method for process_list, mapped from YANG variable /cpu_state/process_list (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_process_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_process_list() directly.
YANG Description: CPU utilization summary and list of all the process
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=process_list.process_list, is_container='container', presence=False, yang_name="process-list", rest_name="process-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'RAS-process-cpu', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """process_list must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=process_list.process_list, is_container='container', presence=False, yang_name="process-list", rest_name="process-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'RAS-process-cpu', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='container', is_config=False)""",
})
self.__process_list = t
if hasattr(self, '_set'):
self._set()
|
python
|
def get_bond(iface):
'''
Return the content of a bond script
CLI Example:
.. code-block:: bash
salt '*' ip.get_bond bond0
'''
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
return _read_file(path)
|
java
|
void resetHead(long index) {
for (SegmentedJournalReader reader : readers) {
if (reader.getNextIndex() < index) {
reader.reset(index);
}
}
}
|
java
|
public Optional<String> getEncryptionKey(final RegisteredService registeredService) {
val property = getEncryptionKeyRegisteredServiceProperty();
if (property.isAssignedTo(registeredService)) {
val key = property.getPropertyValue(registeredService).getValue();
return Optional.of(key);
}
return Optional.empty();
}
|
java
|
public static File.PSetAclAction toProto(SetAclAction aclAction) {
switch (aclAction) {
case REPLACE:
return File.PSetAclAction.REPLACE;
case MODIFY:
return File.PSetAclAction.MODIFY;
case REMOVE:
return File.PSetAclAction.REMOVE;
case REMOVE_ALL:
return File.PSetAclAction.REMOVE_ALL;
case REMOVE_DEFAULT:
return File.PSetAclAction.REMOVE_DEFAULT;
default:
throw new IllegalStateException("Unrecognized set acl action: " + aclAction);
}
}
|
java
|
public static Builder builder(final Resource r) {
return builder(r.getIdentifier()).interactionModel(r.getInteractionModel())
.container(r.getContainer().orElse(null))
.memberRelation(r.getMemberRelation().orElse(null))
.membershipResource(r.getMembershipResource().orElse(null))
.memberOfRelation(r.getMemberOfRelation().orElse(null))
.insertedContentRelation(r.getInsertedContentRelation().orElse(null));
}
|
java
|
public void setSelectAllText(final String selectAllText) {
if (selectAllText != null)
attrMixin.setAttribute(SELECT_ALL_TEXT, selectAllText);
else
attrMixin.removeAttribute(SELECT_ALL_TEXT);
}
|
java
|
public FullyQualifiedNameFQNFormat createFullyQualifiedNameFQNFormatFromString(EDataType eDataType, String initialValue) {
FullyQualifiedNameFQNFormat result = FullyQualifiedNameFQNFormat.get(initialValue);
if (result == null) throw new IllegalArgumentException("The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'");
return result;
}
|
python
|
def set_eps(self, eps):
"""
If :obj:`eps` is True,
the PostScript surface will output Encapsulated PostScript.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface.
An Encapsulated PostScript file should never contain
more than one page.
"""
cairo.cairo_ps_surface_set_eps(self._pointer, bool(eps))
self._check_status()
|
java
|
@Override
public void onViewChild(ModelNode address, String childName) {
TreeItem rootNode = findTreeItem(tree, address);
TreeItem childNode = null;
for(int i=0; i<rootNode.getChildCount(); i++)
{
TreeItem candidate = rootNode.getChild(i);
if(childName.equals(candidate.getText()))
{
childNode = candidate;
break;
}
}
if(null==childNode)
throw new IllegalArgumentException("No such child "+ childName + " on "+ address.toString());
// deselect previous
tree.setSelectedItem(null, false);
// select next
tree.setSelectedItem(childNode, false);
tree.ensureSelectedItemVisible();
onItemSelected(childNode);
}
|
java
|
public static String nullifyBadInput(String input)
{
if (input != null)
{
if (input.matches(emptyRegex))
{
return null;
}
return input.trim();
}
return null;
}
|
python
|
def _all_queue_names(self):
"""
Return a list of all unique queue names in our config.
:return: list of all queue names (str)
:rtype: :std:term:`list`
"""
queues = set()
endpoints = self.config.get('endpoints')
for e in endpoints:
for q in endpoints[e]['queues']:
queues.add(q)
return sorted(queues)
|
python
|
def fetch(self):
"""Load from the associated Git repository."""
os.makedirs(os.path.dirname(self.cached_repo), exist_ok=True)
if not os.path.exists(self.cached_repo):
self._log.warning("Index not found, caching %s in %s", self.repo, self.cached_repo)
git.clone(self.remote_url, self.cached_repo, checkout=True)
else:
self._log.debug("Index is cached")
if self._are_local_and_remote_heads_different():
self._log.info("Cached index is not up to date, pulling %s", self. repo)
git.pull(self.cached_repo, self.remote_url)
with open(os.path.join(self.cached_repo, self.INDEX_FILE), encoding="utf-8") as _in:
self.contents = json.load(_in)
|
python
|
def retrieve_file_handles_of_same_dataset(self, **args):
'''
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
'''
mandatory_args = ['drs_id', 'version_number', 'data_node', 'prefix']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__reset_error_messages()
# Try plan A
file_handles = None
try:
file_handles = self.__strategy1(args) # can raise SolrError or SolrSwitchedOff, but can't return None
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during first query: '+e.message)
if file_handles is not None and len(file_handles)>0:
LOGGER.debug('Retrieved file handles from solr in first query.')
return file_handles
# Try plan B
try:
file_handles = self.__strategy2(args) # can raise SolrError or SolrSwitchedOff, but can't return None
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during second query: '+e.message)
msg = '/n'.join(self.__error_messages)
raise esgfpid.exceptions.SolrError('Failure in both queries. Messages:\n'+msg)
return file_handles
|
python
|
def from_networkx_graph(cls, G, vartype=None, node_attribute_name='bias',
edge_attribute_name='bias'):
"""Create a binary quadratic model from a NetworkX graph.
Args:
G (:obj:`networkx.Graph`):
A NetworkX graph with biases stored as node/edge attributes.
vartype (:class:`.Vartype`/str/set, optional):
Variable type for the binary quadratic model. Accepted input
values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
If not provided, the `G` should have a vartype attribute. If
`vartype` is provided and `G.vartype` exists then the argument
overrides the property.
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases. If the node does not have a
matching attribute then the bias defaults to 0.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases. If the edge does not have a
matching attribute then the bias defaults to 0.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import networkx as nx
...
>>> G = nx.Graph()
>>> G.add_node('a', bias=.5)
>>> G.add_edge('a', 'b', bias=-1)
>>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN')
>>> bqm.adj['a']['b']
-1
"""
if vartype is None:
if not hasattr(G, 'vartype'):
msg = ("either 'vartype' argument must be provided or "
"the given graph should have a vartype attribute.")
raise ValueError(msg)
vartype = G.vartype
linear = G.nodes(data=node_attribute_name, default=0)
quadratic = G.edges(data=edge_attribute_name, default=0)
offset = getattr(G, 'offset', 0)
return cls(linear, quadratic, offset, vartype)
|
python
|
def separators(self, reordered = True):
"""
Returns a list of separator sets
"""
if reordered:
return [list(self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)]
else:
return [list(self.__p[self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]]) for k in range(self.Nsn)]
|
python
|
def _get_key_info(self):
"""EscapedKeyAction doesn't send it as Unicode and the vk and
scan code are generated differently"""
vkey_scan = LoByte(VkKeyScan(self.key))
return (vkey_scan, MapVirtualKey(vkey_scan, 0), 0)
|
java
|
void removeExistingGlue() {
for (int i = outputStream.size() - 1; i >= 0; i--) {
RTObject c = outputStream.get(i);
if (c instanceof Glue) {
outputStream.remove(i);
} else if (c instanceof ControlCommand) { // e.g.
// BeginString
break;
}
}
outputStreamDirty();
}
|
java
|
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case AfplibPackage.BMO__OVLY_NAME:
return getOvlyName();
case AfplibPackage.BMO__TRIPLETS:
return getTriplets();
}
return super.eGet(featureID, resolve, coreType);
}
|
python
|
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
""" Returns a list of matching WebElements that are visible.
If "limit" is set and > 0, will only return that many elements. """
self.wait_for_ready_state_complete()
if page_utils.is_xpath_selector(selector):
by = By.XPATH
if page_utils.is_link_text_selector(selector):
selector = page_utils.get_link_text_from_selector(selector)
by = By.LINK_TEXT
v_elems = page_actions.find_visible_elements(self.driver, selector, by)
if limit and limit > 0 and len(v_elems) > limit:
v_elems = v_elems[:limit]
return v_elems
|
python
|
def p_program(p):
""" program : line
"""
if p[1] is not None:
[MEMORY.add_instruction(x) for x in p[1] if isinstance(x, Asm)]
|
python
|
def _get_ANSI_colored_font( color ):
''' Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
'''
color = (color.replace('-','')).lower()
#
# Bright colors:
#
if color == 'white':
return '\033[97m'
elif color in ['cyan', 'aqua']:
return '\033[96m'
elif color in ['purple', 'magneta', 'fuchsia']:
return '\033[95m'
elif color == 'blue':
return '\033[94m'
elif color in ['yellow', 'gold']:
return '\033[93m'
elif color in ['green', 'lime']:
return '\033[92m'
elif color == 'red':
return '\033[91m'
#
# Dark colors:
#
elif color in ['grey', 'gray', 'silver']:
return '\033[37m'
elif color in ['darkcyan', 'teal']:
return '\033[36m'
elif color in ['darkpurple', 'darkmagneta']:
return '\033[35m'
elif color in ['darkblue', 'navy']:
return '\033[34m'
elif color in ['darkyellow', 'olive']:
return '\033[33m'
elif color == 'darkgreen':
return '\033[32m'
elif color in ['darkred', 'maroon']:
return '\033[31m'
return None
|
java
|
private void onBytesRead(int bytesRead) {
unnotifiedByteCount += bytesRead;
if (unnotifiedByteCount >= notifyThresHold) {
onNotifyBytesRead();
notifiedByteCount += unnotifiedByteCount;
unnotifiedByteCount = 0;
}
}
|
python
|
async def send_notification(self, method, args=()):
'''Send an RPC notification over the network.'''
message = self.connection.send_notification(Notification(method, args))
await self._send_message(message)
|
python
|
def refresh_content(self, order=None, name=None):
"""
Re-download all submissions and reset the page index
"""
order = order or self.content.order
# Preserve the query if staying on the current page
if name is None:
query = self.content.query
else:
query = None
name = name or self.content.name
# Hack to allow an order specified in the name by prompt_subreddit() to
# override the current default
if order == 'ignore':
order = None
with self.term.loader('Refreshing page'):
self.content = SubredditContent.from_name(
self.reddit, name, self.term.loader, order=order, query=query)
if not self.term.loader.exception:
self.nav = Navigator(self.content.get)
|
java
|
protected static List<CmsContainerElementBean> loadGroupContainerElements(CmsObject cms, CmsResource resource)
throws CmsException {
CmsXmlGroupContainer xmlGroupContainer = CmsXmlGroupContainerFactory.unmarshal(cms, resource);
CmsGroupContainerBean groupContainer = xmlGroupContainer.getGroupContainer(cms);
List<CmsContainerElementBean> elemBeans = groupContainer.getElements();
List<CmsContainerElementBean> result = new ArrayList<CmsContainerElementBean>();
for (CmsContainerElementBean elementBean : elemBeans) {
if (!elementBean.isInMemoryOnly()) {
elementBean.initResource(cms);
result.add(elementBean);
}
}
return result;
}
|
python
|
def delete_authoring_nodes(self, editor):
"""
Deletes the Model authoring Nodes associated with given editor.
:param editor: Editor.
:type editor: Editor
:return: Method success.
:rtype: bool
"""
editor_node = foundations.common.get_first_item(self.get_editor_nodes(editor))
file_node = editor_node.parent
self.unregister_editor(editor_node)
self.unregister_file(file_node, raise_exception=False)
return True
|
python
|
def copy_with_new_relations(self, new_relations):
"""Create a new match object extended with new relations"""
result = self.__class__(self.forward.items())
result.add_relations(new_relations.items())
result.previous_ends1 = set(new_relations.values())
return result
|
python
|
def resolve_orm_path(model, orm_path):
"""
Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the
path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will
be raised.
"""
bits = orm_path.split('__')
endpoint_model = reduce(get_model_at_related_field, [model] + bits[:-1])
if bits[-1] == 'pk':
field = endpoint_model._meta.pk
else:
field = endpoint_model._meta.get_field(bits[-1])
return field
|
java
|
@SuppressWarnings("unchecked")
public static <T, F extends FileInputFormat<T>> List<F> openAllInputs(
Class<F> inputFormatClass, String path, Configuration configuration) throws IOException {
Path nephelePath = new Path(path);
FileSystem fs = nephelePath.getFileSystem();
FileStatus fileStatus = fs.getFileStatus(nephelePath);
if (!fileStatus.isDir()) {
return Arrays.asList(openInput(inputFormatClass, path, configuration));
}
FileStatus[] list = fs.listStatus(nephelePath);
List<F> formats = new ArrayList<F>();
for (int index = 0; index < list.length; index++) {
formats.add(openInput(inputFormatClass, list[index].getPath().toString(), configuration));
}
return formats;
}
|
python
|
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers:
auth_header = None
for k, v in headers.items():
if k.lower() == 'authorization' or \
k.upper() == 'HTTP_AUTHORIZATION':
auth_header = v
# Check that the authorization header is OAuth.
if auth_header and auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
|
python
|
def convert_uv(pinyin):
"""ü 转换,还原原始的韵母
ü行的韵跟声母j,q,x拼的时候,写成ju(居),qu(区),xu(虚),
ü上两点也省略;但是跟声母n,l拼的时候,仍然写成nü(女),lü(吕)。
"""
return UV_RE.sub(
lambda m: ''.join((m.group(1), UV_MAP[m.group(2)], m.group(3))),
pinyin)
|
java
|
public List<String> getMolecularProperties(String notation) throws BuilderMoleculeException, CTKException,
ExtinctionCoefficientException, ValidationException, MonomerLoadingException, ChemistryException {
MoleculeProperty result = MoleculePropertyCalculator.getMoleculeProperties(validate(notation));
setMonomerFactoryToDefault(notation);
return new LinkedList<String>(
Arrays.asList(result.getMolecularFormula(), Double.toString(result.getMolecularWeight()),
Double.toString(result.getExactMass()), Double.toString(result.getExtinctionCoefficient())));
}
|
python
|
def _checkRelatesTo(self, value):
'''WS-Address From
value -- From server returned.
'''
if value != self._messageID:
raise WSActionException, 'wrong WS-Address RelatesTo(%s), expecting %s'%(value,self._messageID)
|
python
|
def down_alpha_beta(returns, factor_returns, **kwargs):
"""
Computes alpha and beta for periods when the benchmark return is negative.
Parameters
----------
see documentation for `alpha_beta`.
Returns
-------
alpha : float
beta : float
"""
return down(returns, factor_returns, function=alpha_beta_aligned, **kwargs)
|
python
|
def _AddStopTimeObjectUnordered(self, stoptime, schedule):
"""Add StopTime object to this trip.
The trip isn't checked for duplicate sequence numbers so it must be
validated later."""
stop_time_class = self.GetGtfsFactory().StopTime
cursor = schedule._connection.cursor()
insert_query = "INSERT INTO stop_times (%s) VALUES (%s);" % (
','.join(stop_time_class._SQL_FIELD_NAMES),
','.join(['?'] * len(stop_time_class._SQL_FIELD_NAMES)))
cursor = schedule._connection.cursor()
cursor.execute(
insert_query, stoptime.GetSqlValuesTuple(self.trip_id))
|
python
|
def main(self, *args, **kwargs):
"""Catch all exceptions."""
try:
result = super().main(*args, **kwargs)
return result
except Exception:
if HAS_SENTRY:
self._handle_sentry()
if not (sys.stdin.isatty() and sys.stdout.isatty()):
raise
self._handle_github()
|
python
|
def isoratio_init(self,isos):
'''
This file returns the isotopic ratio of two isotopes specified
as iso1 and iso2. The isotopes are given as, e.g.,
['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility)
-> list.
'''
if len(isos) == 2:
dumb = []
dumb = isos[0].split('-')
dumb.append(isos[1].split('-')[0])
dumb.append(isos[1].split('-')[1])
isos = dumb
ssratio = old_div(self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)], self.habu[isos[2].ljust(2).lower() + str(int(isos[3])).rjust(3)])
return ssratio
|
java
|
public static double metersYToLatitude( double y ) {
return Math.toDegrees(Math.atan(Math.sinh(y / EQUATORIALRADIUS)));
}
|
python
|
def main():
"""
Runs a associator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Executes an associator from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("associator", help="associator classname, e.g., weka.associations.Apriori")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional associator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
associator = Associator(classname=parsed.associator)
if len(parsed.option) > 0:
associator.options = parsed.option
loader = converters.loader_for_file(parsed.train)
data = loader.load_file(parsed.train)
associator.build_associations(data)
print(str(associator))
except Exception, e:
print(e)
finally:
jvm.stop()
|
java
|
public void marshall(ZoneAwarenessConfig zoneAwarenessConfig, ProtocolMarshaller protocolMarshaller) {
if (zoneAwarenessConfig == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(zoneAwarenessConfig.getAvailabilityZoneCount(), AVAILABILITYZONECOUNT_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
"""
Choose the transition that lands us in the location we've touched the least
often. Break ties randomly, i.e. choose the first candidate in a shuffled
list.
"""
candidates = list(transition
for transition in self.transitions.keys()
if (allocentricLocation[0] + transition[0],
allocentricLocation[1] + transition[1]) in objectDict)
random.shuffle(candidates)
selectedVisitCount = None
selectedTransition = None
selectedAllocentricLocation = None
for transition in candidates:
candidateLocation = (allocentricLocation[0] + transition[0],
allocentricLocation[1] + transition[1])
if (selectedVisitCount is None or
visitCounts[candidateLocation] < selectedVisitCount):
selectedVisitCount = visitCounts[candidateLocation]
selectedTransition = transition
selectedAllocentricLocation = candidateLocation
return selectedAllocentricLocation, selectedTransition
|
java
|
@Deprecated
public static int hashCodeMulti(final Object... objects) {
int hash = 1;
if (objects != null) {
for (final Object object : objects) {
final int tmpHash = ObjectUtils.hashCode(object);
hash = hash * 31 + tmpHash;
}
}
return hash;
}
|
python
|
def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller", require_bam=False):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras
|
java
|
public void marshall(RegisterThingRequest registerThingRequest, ProtocolMarshaller protocolMarshaller) {
if (registerThingRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(registerThingRequest.getTemplateBody(), TEMPLATEBODY_BINDING);
protocolMarshaller.marshall(registerThingRequest.getParameters(), PARAMETERS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def CheckPreviousBarline(self, staff):
"""method which checks the bar before the current for changes we need to make to it's barlines"""
measure_before_last = self.getMeasureAtPosition(-2, staff)
last_measure = self.getMeasureAtPosition(-1, staff)
if last_measure is not None and measure_before_last is not None:
bline1 = measure_before_last.GetBarline("right")
bline2 = last_measure.GetBarline("left")
if bline1 is not None:
if hasattr(bline1, "ending"):
if bline2 is not None:
if not hasattr(bline2, "ending"):
bline1.ending.type = "discontinue"
else:
bline1.ending.type = "discontinue"
|
python
|
def CreatedField(name='created', tz_aware=False, **kwargs):
''' A shortcut field for creation time. It sets the current date and time
when it enters the database and then doesn't update on further saves.
If you've used the Django ORM, this is the equivalent of auto_now_add
:param tz_aware: If this is True, the value will be returned in the
local time of the session. It is always saved in UTC
'''
@computed_field(DateTimeField(), one_time=True, **kwargs)
def created(obj):
if tz_aware:
import pytz
return pytz.utc.localize(datetime.utcnow())
return datetime.utcnow()
created.__name__ = name
return created
|
python
|
def adjust_for_triggers(self):
"""Remove trigger-related plugins when needed
If there are no triggers defined, it's assumed the
feature is disabled and all trigger-related plugins
are removed.
If there are triggers defined, and this is a custom
base image, some trigger-related plugins do not apply.
Additionally, this method ensures that custom base
images never have triggers since triggering a base
image rebuild is not a valid scenario.
"""
triggers = self.template['spec'].get('triggers', [])
remove_plugins = [
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled"),
]
should_remove = False
if triggers and (self.is_custom_base_image() or self.is_from_scratch_image()):
if self.is_custom_base_image():
msg = "removing %s from request because custom base image"
elif self.is_from_scratch_image():
msg = 'removing %s from request because FROM scratch image'
del self.template['spec']['triggers']
should_remove = True
elif not triggers:
msg = "removing %s from request because there are no triggers"
should_remove = True
if should_remove:
for when, which in remove_plugins:
logger.info(msg, which)
self.dj.remove_plugin(when, which)
|
java
|
public static void makeWindowNonOpaque(Window window) {
if (PlatformUtils.isJava6()) {
// on non-mac platforms, try to use the facilities of Java 6 update 10.
if (!PlatformUtils.isMac()) {
quietlyTryToMakeWindowNonOqaque(window);
} else {
window.setBackground(UIManager.getColor("seaGlassTransparent"));
}
}
}
|
java
|
public void getAllRecipeID(Callback<List<Integer>> callback) throws NullPointerException {
gw2API.getAllRecipeIDs().enqueue(callback);
}
|
java
|
public static GraphRelationship[] toGRArray(Object[] o) {
GraphRelationship[] result = new GraphRelationship[o.length];
System.arraycopy(o, 0, result, 0, o.length);
return result;
}
|
java
|
public static DeviceType getDevice(final HttpServletRequest request) {
// User agent
String userAgent = ((HttpServletRequest) request).getHeader("User-Agent");
if (Util.empty(userAgent)) {
LOG.warn("No User-Agent details in the request headers. Will assume normal device.");
return DeviceType.NORMAL;
}
// Check for device type
UAgentInfo agentInfo = new UAgentInfo(userAgent, null);
if (agentInfo.detectMobileQuick()) {
return DeviceType.MOBILE;
} else if (agentInfo.detectTierTablet()) {
return DeviceType.TABLET;
}
return DeviceType.NORMAL;
}
|
python
|
def get_by(self, field, value):
"""
Gets all Users that match the filter.
The search is case-insensitive.
Args:
field: Field name to filter. Accepted values: 'name', 'userName', 'role'
value: Value to filter.
Returns:
list: A list of Users.
"""
if field == 'userName' or field == 'name':
return self._client.get(self.URI + '/' + value)
elif field == 'role':
value = value.replace(" ", "%20")
return self._client.get(self.URI + '/roles/users/' + value)['members']
else:
raise HPOneViewException('Only userName, name and role can be queried for this resource.')
|
java
|
private List<Renderer<Video>> getRendererVideoPrototypes() {
List<Renderer<Video>> prototypes = new LinkedList<Renderer<Video>>();
LikeVideoRenderer likeVideoRenderer = new LikeVideoRenderer();
prototypes.add(likeVideoRenderer);
FavoriteVideoRenderer favoriteVideoRenderer = new FavoriteVideoRenderer();
prototypes.add(favoriteVideoRenderer);
LiveVideoRenderer liveVideoRenderer = new LiveVideoRenderer();
prototypes.add(liveVideoRenderer);
return prototypes;
}
|
java
|
public String getRestfulArtifactUrl(JobIdentifier jobIdentifier, String filePath) {
return format("/%s/%s", "files", jobIdentifier.artifactLocator(filePath));
}
|
python
|
def _simplify_block(self, ail_block, stack_pointer_tracker=None):
"""
Simplify a single AIL block.
:param ailment.Block ail_block: The AIL block to simplify.
:param stack_pointer_tracker: The RegisterDeltaTracker analysis instance.
:return: A simplified AIL block.
"""
simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker)
return simp.result_block
|
python
|
def governor(self, Xgov, Pgov, Vgov):
""" Governor model.
Based on Governor.m from MatDyn by Stijn Cole, developed at Katholieke
Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/
matdyn/} for more information.
"""
governors = self.governors
omegas = 2 * pi * self.freq
F = zeros(Xgov.shape)
typ1 = [g.generator._i for g in governors if g.model == CONST_POWER]
typ2 = [g.generator._i for g in governors if g.model == GENERAL_IEEE]
# Governor type 1: constant power
F[typ1, 0] = 0
# Governor type 2: IEEE general speed-governing system
Pm = Xgov[typ2, 0]
P = Xgov[typ2, 1]
x = Xgov[typ2, 2]
z = Xgov[typ2, 3]
K = Pgov[typ2, 0]
T1 = Pgov[typ2, 1]
T2 = Pgov[typ2, 2]
T3 = Pgov[typ2, 3]
Pup = Pgov[typ2, 4]
Pdown = Pgov[typ2, 5]
Pmax = Pgov[typ2, 6]
Pmin = Pgov[typ2, 7]
P0 = Pgov[typ2, 8]
omega = Vgov[typ2, 0]
dx = K * (-1 / T1 * x + (1 - T2 / T1) * (omega - omegas))
dP = 1 / T1 * x + T2 / T1 * (omega - omegas)
y = 1 / T3 * (P0 - P - Pm)
y2 = y
if sum(flatnonzero(y > Pup)) >= 1:
y2 = (1 - flatnonzero(y > Pup)) * y2 + flatnonzero(y > Pup) * Pup
if sum(flatnonzero(y < Pdown)) >= 1:
y2 = (1 - flatnonzero(y<Pdown)) * y2 + flatnonzero(y<Pdown) * Pdown
dz = y2
dPm = y2
if sum(flatnonzero(z > Pmax)) >= 1:
dPm = (1 - flatnonzero(z > Pmax)) * dPm + flatnonzero(z > Pmax) * 0
if sum(flatnonzero(z < Pmin)) >= 1:
dPm = (1 - flatnonzero(z < Pmin)) * dPm + flatnonzero(z < Pmin) * 0
F[typ2, :] = c_[dPm, dP, dx, dz]
# Governor type 3:
# Governor type 4:
return F
|
java
|
public static HttpServletResponse noCache(HttpServletResponse response) {
response.setHeader("cache-control", "no-cache");
response.setHeader("pragma", "no-cache");
response.setHeader("expires", "0");
return response;
}
|
java
|
private Cluster expandCluster(final Cluster cluster,
final Point2D point,
final List<Point2D> neighbors,
final KDTree<Point2D> points,
final Map<Point2D, PointStatus> visited) {
cluster.addPoint(point);
visited.put(point, PointStatus.PART_OF_CLUSTER);
List<Point2D> seeds = new ArrayList<Point2D>(neighbors);
int index = 0;
while (index < seeds.size()) {
Point2D current = seeds.get(index);
PointStatus pStatus = visited.get(current);
// only check non-visited points
if (pStatus == null) {
final List<Point2D> currentNeighbors = getNeighbors(current, points);
if (currentNeighbors.size() >= minPoints) {
seeds = merge(seeds, currentNeighbors);
}
}
if (pStatus != PointStatus.PART_OF_CLUSTER) {
visited.put(current, PointStatus.PART_OF_CLUSTER);
cluster.addPoint(current);
}
index++;
}
return cluster;
}
|
python
|
def _assemble_modification(stmt):
"""Assemble Modification statements into text."""
sub_str = _assemble_agent_str(stmt.sub)
if stmt.enz is not None:
enz_str = _assemble_agent_str(stmt.enz)
if _get_is_direct(stmt):
mod_str = ' ' + _mod_process_verb(stmt) + ' '
else:
mod_str = ' leads to the ' + _mod_process_noun(stmt) + ' of '
stmt_str = enz_str + mod_str + sub_str
else:
stmt_str = sub_str + ' is ' + _mod_state_stmt(stmt)
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str)
|
python
|
def get_path(num):
"""Gets a path from the workitem number.
For example: 31942 will return 30000-39999/31000-31999/31900-31999
"""
num = int(num)
dig_len = len(str(num))
paths = []
for i in range(dig_len - 2):
divisor = 10 ** (dig_len - i - 1)
paths.append(
"{}-{}".format((num // divisor) * divisor, (((num // divisor) + 1) * divisor) - 1)
)
return "/".join(paths)
|
java
|
protected void startInitialAlignment(boolean resetTxOffset) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("(%s) Starting initial alignment", name));
}
// Comment from Oleg: this is done initialy to setup correct spot in tx
// buffer: dunno, I just believe, for now.
if (resetTxOffset) {
// txOffset = 3;
this.txFrame.offset = 3; // I really dont get this shift.
}
this.reset();
// switch state
// this.state = MTP2_NOT_ALIGNED;
this.setState(MTP2_NOT_ALIGNED);
// starting T2 timer
start_T2();
}
|
python
|
def format(ctx):
""" Auto format package source files.
"""
isort_command = f"isort -rc {ctx.package.directory!s}"
black_command = f"black {ctx.package.directory.parent!s}"
report.info(ctx, "package.format", "sorting imports")
ctx.run(isort_command)
report.info(ctx, "package.format", "formatting code")
ctx.run(black_command)
|
java
|
@Override
public void setValue(org.openprovenance.prov.model.LangString value) {
this.value = value;
}
|
python
|
def _sub_latlon(self, other):
'''
Called when subtracting a LatLon object from self
'''
inv = self._pyproj_inv(other)
heading = inv['heading_reverse']
distance = inv['distance']
return GeoVector(initial_heading = heading, distance = distance)
|
java
|
public static UNode parse(File file, ContentType contentType) throws Exception {
try (Reader reader = new BufferedReader(new FileReader(file))) {
UNode result = null;
if (contentType.isJSON()) {
result = parseJSON(reader);
} else if (contentType.isXML()) {
result = parseXML(reader);
} else {
Utils.require(false, "Unsupported content-type: " + contentType);
}
return result;
}
}
|
python
|
def refresh_editor(self, color_scheme):
"""
Refresh editor settings (background and highlight colors) when color
scheme changed.
:param color_scheme: new color scheme.
"""
self.editor.background = color_scheme.background
self.editor.foreground = color_scheme.formats[
'normal'].foreground().color()
self.editor.whitespaces_foreground = color_scheme.formats[
'whitespace'].foreground().color()
try:
mode = self.editor.modes.get('CaretLineHighlighterMode')
except KeyError:
pass
else:
mode.background = color_scheme.highlight
mode.refresh()
try:
mode = self.editor.panels.get('FoldingPanel')
except KeyError:
pass
else:
mode.refresh_decorations(force=True)
self.editor._reset_stylesheet()
|
python
|
def get_array_items_description(item: Array) -> str:
"""Returns a description for an array's items.
:param item: The Array type whose items should be documented.
:returns: A string documenting what type the array's items should be.
"""
desc = ''
if isinstance(item.items, list):
# This means the type has a list of types where each position is
# mapped to a different type. Document what each type should be.
desc = ''
item_pos_template = (
' *Item {pos} must be*: {description}{enum}{ref}')
for pos, item in enumerate(item.items):
_enum = ''
ref = ''
if issubclass(item, Enum):
_enum = ' Must be one of: `{}`'.format(item.enum)
if item.case_insensitive:
_enum += ' (case-insensitive)'
_enum += '.'
elif issubclass(item, Object):
ref = get_object_reference(item)
desc += item_pos_template.format(
pos=pos, description=item.description, enum=_enum,
ref=ref)
else:
# Otherwise just document the type assigned to `items`.
desc = item.items.description
_enum = ''
ref = ''
if issubclass(item.items, Enum):
_enum = ' Must be one of: `{}`'.format(
item.items.enum)
if item.items.case_insensitive:
_enum += ' (case-insensitive)'
_enum += '.'
elif issubclass(item.items, Object):
ref = get_object_reference(item.items)
desc = ' *Items must be*: {description}{enum}{ref}'.format(
description=desc, enum=_enum, ref=ref)
return desc
|
python
|
def update(self, collection, selector, modifier, callback=None):
"""Insert an item into a collection
Arguments:
collection - the collection to be modified
selector - specifies which documents to modify
modifier - Specifies how to modify the documents
Keyword Arguments:
callback - Optional. If present, called with an error object as the first argument and,
if no error, the number of affected documents as the second."""
self.call("/" + collection + "/update", [selector, modifier], callback=callback)
|
java
|
@Override
public EClass getIfcModulusOfSubgradeReactionSelect() {
if (ifcModulusOfSubgradeReactionSelectEClass == null) {
ifcModulusOfSubgradeReactionSelectEClass = (EClass) EPackage.Registry.INSTANCE
.getEPackage(Ifc4Package.eNS_URI).getEClassifiers().get(1144);
}
return ifcModulusOfSubgradeReactionSelectEClass;
}
|
python
|
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
|
java
|
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "coverageFunction")
public JAXBElement<CoverageFunctionType> createCoverageFunction(CoverageFunctionType value) {
return new JAXBElement<CoverageFunctionType>(_CoverageFunction_QNAME, CoverageFunctionType.class, null, value);
}
|
python
|
def _combine_results(self, match_as_dict):
'''Combine results from different parsed parts:
we look for non-empty results in values like
'postal_code_b' or 'postal_code_c' and store
them as main value.
So 'postal_code_b':'123456'
becomes:
'postal_code' :'123456'
'''
keys = []
vals = []
for k, v in six.iteritems(match_as_dict):
if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m':
if v:
# strip last 2 chars: '..._b' -> '...'
keys.append(k[:-2])
vals.append(v)
else:
if k not in keys:
keys.append(k)
vals.append(v)
return dict(zip(keys, vals))
|
python
|
def end_element(self, name):
"""If end tag is our tag, call add_url()."""
self.in_tag = False
if name == self.tag:
self.add_url()
|
java
|
@Pure
public static boolean epsilonEqualsDistance(Point3D<?, ?> p1, Point3D<?, ?> p2) {
final double distance = p1.getDistance(p2);
return distance >= -distancePrecision && distance <= distancePrecision;
}
|
java
|
private void setStrokeColor(Color3f color) {
if (cacheStrokeR == color.x && cacheStrokeG == color.y && cacheStrokeB == color.z) {
// no need to re-set the stroke color, just use the cached values
} else {
cacheStrokeR = color.x;
cacheStrokeG = color.y;
cacheStrokeB = color.z;
setStrokeColorFromCache();
}
}
|
python
|
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
response = render_in_page(request, template_name)
if response:
return response
template = Template(
'<h1>Not Found</h1>'
'<p>The requested URL {{ request_path }} was not found on this server.</p>')
body = template.render(RequestContext(
request, {'request_path': request.path}))
return http.HttpResponseNotFound(body, content_type=CONTENT_TYPE)
|
java
|
protected String getLogString(ProfileRequestContext<?, ?> context) {
try {
AuthnRequest authnRequest = this.getAuthnRequest(context);
return String.format("request-id='%s',sp='%s'", authnRequest.getID(), authnRequest.getIssuer().getValue());
}
catch (Exception e) {
return "";
}
}
|
python
|
def batch_fn(dataset, training, shapes, target_names,
batch_size=32, eval_batch_size=32, bucket_batch_length=32,
bucket_max_length=256, bucket_min_length=8,
bucket_length_step=1.1, buckets=None):
"""Batching function."""
del target_names
# If bucketing is not specified, check if target shapes are variable.
cur_batch_size = batch_size if training else eval_batch_size
if buckets is None:
variable_target_shapes = False
target_shape = shapes[1]
for dim in target_shape:
if dim is None:
variable_target_shapes = True
tf.logging.info("Heuristically setting bucketing to %s based on shapes "
"of target tensors." % variable_target_shapes)
if variable_target_shapes:
batch_size_per_token = cur_batch_size * bucket_batch_length
scheme = data_reader.batching_scheme(batch_size_per_token,
bucket_max_length,
bucket_min_length,
bucket_length_step,
drop_long_sequences=training)
buckets = (scheme["boundaries"], scheme["batch_sizes"])
if buckets:
tf.logging.info("Bucketing with buckets %s." % str(buckets))
def example_length(_, target):
return tf.shape(target)[0]
boundaries, batch_sizes = buckets
dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length(
example_length, boundaries, batch_sizes))
else:
dataset = dataset.padded_batch(cur_batch_size, shapes)
return dataset
|
java
|
private void internalClose(Status internalError) {
log.log(Level.WARNING, "Cancelling the stream with status {0}", new Object[] {internalError});
stream.cancel(internalError);
serverCallTracer.reportCallEnded(internalError.isOk()); // error so always false
}
|
python
|
def set_http_application_url_input_config_http_app_url_op_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
set_http_application_url = ET.Element("set_http_application_url")
config = set_http_application_url
input = ET.SubElement(set_http_application_url, "input")
config_http_app_url = ET.SubElement(input, "config-http-app-url")
op_type = ET.SubElement(config_http_app_url, "op-type")
op_type.text = kwargs.pop('op_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
python
|
def snake_case_to_headless_camel_case(snake_string):
"""Convert snake_case to headlessCamelCase.
Args:
snake_string: The string to be converted.
Returns:
The input string converted to headlessCamelCase.
"""
return ''.join([snake_string.split('_')[0]] +
list(sub_string.capitalize()
for sub_string in snake_string.split('_')[1:]))
|
java
|
private void getCounts(String path, Counts counts) {
DataNode node = getNode(path);
if (node == null) {
return;
}
String[] children = null;
int len = 0;
synchronized (node) {
Set<String> childs = node.getChildren();
if (childs != null) {
children = childs.toArray(new String[childs.size()]);
}
len = (node.data == null ? 0 : node.data.length);
}
// add itself
counts.count += 1;
counts.bytes += len;
if (children == null || children.length == 0) {
return;
}
for (String child : children) {
getCounts(path + "/" + child, counts);
}
}
|
java
|
@XmlElementDecl(namespace = "http://belframework.org/schema/1.0/xbel", name = "evidence")
public
JAXBElement<String> createEvidence(String value) {
return new JAXBElement<String>(_Evidence_QNAME, String.class, null,
value);
}
|
java
|
public static String makeHTMLTable(String[][] table, String[] rowLabels, String[] colLabels) {
StringBuilder buff = new StringBuilder();
buff.append("<table class=\"auto\" border=\"1\" cellspacing=\"0\">\n");
// top row
buff.append("<tr>\n");
buff.append("<td></td>\n"); // the top left cell
for (int j = 0; j < table[0].length; j++) { // assume table is a rectangular matrix
buff.append("<td class=\"label\">").append(colLabels[j]).append("</td>\n");
}
buff.append("</tr>\n");
// all other rows
for (int i = 0; i < table.length; i++) {
// one row
buff.append("<tr>\n");
buff.append("<td class=\"label\">").append(rowLabels[i]).append("</td>\n");
for (int j = 0; j < table[i].length; j++) {
buff.append("<td class=\"data\">");
buff.append(((table[i][j] != null) ? table[i][j] : ""));
buff.append("</td>\n");
}
buff.append("</tr>\n");
}
buff.append("</table>");
return buff.toString();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.