language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
|---|---|
java
|
@Override
public Collection<PartnerSubscription> deserialize(JsonElement element, Type type, JsonDeserializationContext context)
throws JsonParseException
{
JsonObject obj = element.getAsJsonObject();
JsonArray subscriptions = obj.getAsJsonArray("subscriptions");
List<PartnerSubscription> values = new ArrayList<PartnerSubscription>();
if(subscriptions != null && subscriptions.isJsonArray())
{
for(JsonElement subscription : subscriptions)
values.add(gson.fromJson(subscription, PartnerSubscription.class));
}
return values;
}
|
java
|
public void setVideoPositionWithinPod(com.google.api.ads.admanager.axis.v201811.VideoPositionWithinPod videoPositionWithinPod) {
this.videoPositionWithinPod = videoPositionWithinPod;
}
|
java
|
@Deprecated
protected int getTimeOffset(SignableRequest<?> request) {
final int globleOffset = SDKGlobalTime.getGlobalTimeOffset();
return globleOffset == 0 ? request.getTimeOffset() : globleOffset;
}
|
java
|
public ServiceFuture<LiveOutputInner> getAsync(String resourceGroupName, String accountName, String liveEventName, String liveOutputName, final ServiceCallback<LiveOutputInner> serviceCallback) {
return ServiceFuture.fromResponse(getWithServiceResponseAsync(resourceGroupName, accountName, liveEventName, liveOutputName), serviceCallback);
}
|
java
|
@Override
public void validate() throws Exception {
super.validate();
this.parse();
Assert.assertTrue("The upper date should be after the initial date",
this.upper.after(this.lower));
}
|
python
|
def add_resource(self, resource):
"""Add a resource to the minimum needs table.
:param resource: The resource to be added
:type resource: dict
"""
updated_sentence = NeedsProfile.format_sentence(
resource['Readable sentence'], resource)
if self.edit_item:
item = self.edit_item
item.setText(updated_sentence)
self.edit_item = None
else:
item = QtWidgets.QListWidgetItem(updated_sentence)
item.resource_full = resource
self.resources_list.addItem(item)
|
java
|
private void updateNPMExecutable(NodeInstallationInformation information) throws MojoExecutionException {
getLog().info("Installing specified npm version " + npmVersion);
NpmInstallTask npmInstallTask = new NpmInstallTask();
npmInstallTask.setLog(getLog());
npmInstallTask.setNpmBundledWithNodeJs(true);
npmInstallTask.setArguments(new String[] {
"--prefix", information.getNodeModulesRootPath(), "--global", "npm@" + npmVersion
});
npmInstallTask.execute(information);
}
|
python
|
def insert(self, space, t, *, replace=False, timeout=-1) -> _MethodRet:
"""
Insert request coroutine.
Examples:
.. code-block:: pycon
# Basic usage
>>> await conn.insert('tester', [0, 'hello'])
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
# Using dict as an argument tuple
>>> await conn.insert('tester', {
... 'id': 0
... 'text': 'hell0'
... })
<Response sync=3 rowcount=1 data=[
<TarantoolTuple id=0 name='hello'>
]>
:param space: space id or space name.
:param t: tuple to insert (list object)
:param replace: performs replace request instead of insert
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.insert(space, t,
replace=replace,
timeout=timeout)
|
python
|
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
java
|
@SuppressWarnings("unchecked")
@Override
public Boolean execute() {
SmartFoxServer.getInstance()
.getAPIManager()
.getGameApi()
.sendInvitation(CommandUtil.getSfsUser(inviter, api),
CommandUtil.getSFSUserList(invitees, api),
expirySeconds,
createCallback(),
MapUtill.map2SFSObject(context, params));
return Boolean.TRUE;
}
|
python
|
def docs(context: Context):
"""
Generates static documentation
"""
try:
from sphinx.application import Sphinx
except ImportError:
context.pip_command('install', 'Sphinx')
from sphinx.application import Sphinx
context.shell('cp', 'README.rst', 'docs/README.rst')
app = Sphinx('docs', 'docs', 'docs/build', 'docs/build/.doctrees', buildername='html', parallel=True,
verbosity=context.verbosity)
app.build()
|
java
|
protected void renderTableHeaderRow(FacesContext facesContext, ResponseWriter writer, UIComponent component,
UIComponent headerFacet, String headerStyleClass, int colspan) throws IOException
{
renderTableHeaderOrFooterRow(facesContext, writer, component, headerFacet, headerStyleClass,
determineHeaderCellTag(facesContext, component),
colspan, true);
}
|
java
|
static int glhInvertMatrixf2(float[] m, float[] out) {
float[][] wtmp = new float[4][8];
float m0, m1, m2, m3, s;
float[] r0, r1, r2, r3;
r0 = wtmp[0];
r1 = wtmp[1];
r2 = wtmp[2];
r3 = wtmp[3];
r0[0] = MAT(m, 0, 0);
r0[1] = MAT(m, 0, 1);
r0[2] = MAT(m, 0, 2);
r0[3] = MAT(m, 0, 3);
r0[4] = 1.0f;
r0[5] = r0[6] = r0[7] = 0.0f;
r1[0] = MAT(m, 1, 0);
r1[1] = MAT(m, 1, 1);
r1[2] = MAT(m, 1, 2);
r1[3] = MAT(m, 1, 3);
r1[5] = 1.0f;
r1[4] = r1[6] = r1[7] = 0.0f;
r2[0] = MAT(m, 2, 0);
r2[1] = MAT(m, 2, 1);
r2[2] = MAT(m, 2, 2);
r2[3] = MAT(m, 2, 3);
r2[6] = 1.0f;
r2[4] = r2[5] = r2[7] = 0.0f;
r3[0] = MAT(m, 3, 0);
r3[1] = MAT(m, 3, 1);
r3[2] = MAT(m, 3, 2);
r3[3] = MAT(m, 3, 3);
r3[7] = 1.0f;
r3[4] = r3[5] = r3[6] = 0.0f;
/* choose pivot - or die */
if (Math.abs(r3[0]) > Math.abs(r2[0])) {
float[] r = r2;
r2 = r3;
r3 = r;
}
if (Math.abs(r2[0]) > Math.abs(r1[0])) {
float[] r = r2;
r2 = r1;
r1 = r;
}
if (Math.abs(r1[0]) > Math.abs(r0[0])) {
float[] r = r1;
r1 = r0;
r0 = r;
}
if (0.0 == r0[0])
return 0;
/* eliminate first variable */
m1 = r1[0] / r0[0];
m2 = r2[0] / r0[0];
m3 = r3[0] / r0[0];
s = r0[1];
r1[1] -= m1 * s;
r2[1] -= m2 * s;
r3[1] -= m3 * s;
s = r0[2];
r1[2] -= m1 * s;
r2[2] -= m2 * s;
r3[2] -= m3 * s;
s = r0[3];
r1[3] -= m1 * s;
r2[3] -= m2 * s;
r3[3] -= m3 * s;
s = r0[4];
if (s != 0.0) {
r1[4] -= m1 * s;
r2[4] -= m2 * s;
r3[4] -= m3 * s;
}
s = r0[5];
if (s != 0.0) {
r1[5] -= m1 * s;
r2[5] -= m2 * s;
r3[5] -= m3 * s;
}
s = r0[6];
if (s != 0.0) {
r1[6] -= m1 * s;
r2[6] -= m2 * s;
r3[6] -= m3 * s;
}
s = r0[7];
if (s != 0.0) {
r1[7] -= m1 * s;
r2[7] -= m2 * s;
r3[7] -= m3 * s;
}
/* choose pivot - or die */
if (Math.abs(r3[1]) > Math.abs(r2[1])) {
float[] r = r2;
r2 = r3;
r3 = r;
}
if (Math.abs(r2[1]) > Math.abs(r1[1])) {
float[] r = r2;
r2 = r1;
r1 = r;
}
if (0.0 == r1[1])
return 0;
/* eliminate second variable */
m2 = r2[1] / r1[1];
m3 = r3[1] / r1[1];
r2[2] -= m2 * r1[2];
r3[2] -= m3 * r1[2];
r2[3] -= m2 * r1[3];
r3[3] -= m3 * r1[3];
s = r1[4];
if (0.0 != s) {
r2[4] -= m2 * s;
r3[4] -= m3 * s;
}
s = r1[5];
if (0.0 != s) {
r2[5] -= m2 * s;
r3[5] -= m3 * s;
}
s = r1[6];
if (0.0 != s) {
r2[6] -= m2 * s;
r3[6] -= m3 * s;
}
s = r1[7];
if (0.0 != s) {
r2[7] -= m2 * s;
r3[7] -= m3 * s;
}
/* choose pivot - or die */
if (Math.abs(r3[2]) > Math.abs(r2[2])) {
float[] r = r2;
r2 = r3;
r3 = r;
}
if (0.0 == r2[2])
return 0;
/* eliminate third variable */
m3 = r3[2] / r2[2];
r3[3] -= m3 * r2[3];
r3[4] -= m3 * r2[4];
r3[5] -= m3 * r2[5];
r3[6] -= m3 * r2[6];
r3[7] -= m3 * r2[7];
/* last check */
if (0.0 == r3[3])
return 0;
s = 1.0f / r3[3]; /* now back substitute row 3 */
r3[4] *= s;
r3[5] *= s;
r3[6] *= s;
r3[7] *= s;
m2 = r2[3]; /* now back substitute row 2 */
s = 1.0f / r2[2];
r2[4] = s * (r2[4] - r3[4] * m2);
r2[5] = s * (r2[5] - r3[5] * m2);
r2[6] = s * (r2[6] - r3[6] * m2);
r2[7] = s * (r2[7] - r3[7] * m2);
m1 = r1[3];
r1[4] -= r3[4] * m1;
r1[5] -= r3[5] * m1;
r1[6] -= r3[6] * m1;
r1[7] -= r3[7] * m1;
m0 = r0[3];
r0[4] -= r3[4] * m0;
r0[5] -= r3[5] * m0;
r0[6] -= r3[6] * m0;
r0[7] -= r3[7] * m0;
m1 = r1[2]; /* now back substitute row 1 */
s = 1.0f / r1[1];
r1[4] = s * (r1[4] - r2[4] * m1);
r1[5] = s * (r1[5] - r2[5] * m1);
r1[6] = s * (r1[6] - r2[6] * m1);
r1[7] = s * (r1[7] - r2[7] * m1);
m0 = r0[2];
r0[4] -= r2[4] * m0;
r0[5] -= r2[5] * m0;
r0[6] -= r2[6] * m0;
r0[7] -= r2[7] * m0;
m0 = r0[1]; /* now back substitute row 0 */
s = 1.0f / r0[0];
r0[4] = s * (r0[4] - r1[4] * m0);
r0[5] = s * (r0[5] - r1[5] * m0);
r0[6] = s * (r0[6] - r1[6] * m0);
r0[7] = s * (r0[7] - r1[7] * m0);
MAT(out, 0, 0, r0[4]);
MAT(out, 0, 1, r0[5]);
MAT(out, 0, 2, r0[6]);
MAT(out, 0, 3, r0[7]);
MAT(out, 1, 0, r1[4]);
MAT(out, 1, 1, r1[5]);
MAT(out, 1, 2, r1[6]);
MAT(out, 1, 3, r1[7]);
MAT(out, 2, 0, r2[4]);
MAT(out, 2, 1, r2[5]);
MAT(out, 2, 2, r2[6]);
MAT(out, 2, 3, r2[7]);
MAT(out, 3, 0, r3[4]);
MAT(out, 3, 1, r3[5]);
MAT(out, 3, 2, r3[6]);
MAT(out, 3, 3, r3[7]);
return 1;
}
|
python
|
def generate_feature_matrix(self, mode='tfidf'):
"""
Returns a feature matrix in the form of a list of lists which
represents the terms and documents in this Inverted Index using
the tf-idf weighting by default. The term counts in each
document can alternatively be used by specifying scheme='count'.
A custom weighting function can also be passed which receives a term
and document as parameters.
The size of the matrix is equal to m x n where m is
the number of documents and n is the number of terms.
The list-of-lists format returned by this function can be very easily
converted to a numpy matrix if required using the `np.as_matrix`
method.
"""
result = []
for doc in self._documents:
result.append(self.generate_document_vector(doc, mode))
return result
|
java
|
protected String getDiscoveryResponseURL(String entityBaseURL, String entityAlias) {
if (extendedMetadata != null && extendedMetadata.getIdpDiscoveryResponseURL() != null
&& extendedMetadata.getIdpDiscoveryResponseURL().length() > 0) {
return extendedMetadata.getIdpDiscoveryResponseURL();
} else {
Map<String, String> params = new HashMap<String, String>();
params.put(SAMLEntryPoint.DISCOVERY_RESPONSE_PARAMETER, "true");
return getServerURL(entityBaseURL, entityAlias, getSAMLEntryPointPath(), params);
}
}
|
java
|
@Override
public void writeRef(int ref)
{
require(1);
_buffer[_offset++] = (byte) ConstH3.REF;
writeUnsigned(ref);
}
|
java
|
@Override
public DescribeMatchmakingResult describeMatchmaking(DescribeMatchmakingRequest request) {
request = beforeClientExecution(request);
return executeDescribeMatchmaking(request);
}
|
python
|
def get_response(self):
"""Generate the response block of this request.
Careful: it only sets the fields which can be set from the request
"""
res = IODControlRes()
for field in ["ARUUID", "SessionKey", "AlarmSequenceNumber"]:
res.setfieldval(field, self.getfieldval(field))
res.block_type = self.block_type + 0x8000
return res
|
java
|
public Set<CommandDefinition> getAllCommandDefinition(final String pluginName) {
Set<CommandDefinition> res = new HashSet<CommandDefinition>();
for (CommandDefinition cd : commandDefinitionsMap.values()) {
if (cd.getPluginName().equals(pluginName)) {
res.add(cd);
}
}
return res;
}
|
java
|
private void flush() {
if (pendingAbsoluteValueBug != null) {
absoluteValueAccumulator.accumulateBug(pendingAbsoluteValueBug, pendingAbsoluteValueBugSourceLine);
pendingAbsoluteValueBug = null;
pendingAbsoluteValueBugSourceLine = null;
}
accumulator.reportAccumulatedBugs();
if (sawLoadOfMinValue) {
absoluteValueAccumulator.clearBugs();
} else {
absoluteValueAccumulator.reportAccumulatedBugs();
}
if (gcInvocationBugReport != null && !sawCurrentTimeMillis) {
// Make sure the GC invocation is not in an exception handler
// for OutOfMemoryError.
boolean outOfMemoryHandler = false;
for (CodeException handler : exceptionTable) {
if (gcInvocationPC < handler.getHandlerPC() || gcInvocationPC > handler.getHandlerPC() + OOM_CATCH_LEN) {
continue;
}
int catchTypeIndex = handler.getCatchType();
if (catchTypeIndex > 0) {
ConstantPool cp = getThisClass().getConstantPool();
Constant constant = cp.getConstant(catchTypeIndex);
if (constant instanceof ConstantClass) {
String exClassName = (String) ((ConstantClass) constant).getConstantValue(cp);
if ("java/lang/OutOfMemoryError".equals(exClassName)) {
outOfMemoryHandler = true;
break;
}
}
}
}
if (!outOfMemoryHandler) {
bugReporter.reportBug(gcInvocationBugReport);
}
}
sawCurrentTimeMillis = false;
gcInvocationBugReport = null;
exceptionTable = null;
}
|
java
|
public void updateXIDToRolledback(PersistentTranId xid)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "updateXIDToRolledback", "XID="+xid);
if (_deferredException == null)
{
// We are rolling back a transaction. This should only be
// applied to prepared transactions as single-phase rollbacks
// should not get as far as the persistence layer.
if (_state == STATE_PREPARED)
{
_state = STATE_ROLLINGBACK;
}
else
{
_deferredException = new PersistenceException("Cannot ROLLBACK batch as it not in the correct state! State="+_stateToString[_state]);
}
}
else
{
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "No work attempted as an exception has already been thrown during this batch!");
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "updateXIDToRolledback");
}
|
python
|
def p_iteration_statement_3(self, p):
"""
iteration_statement \
: FOR LPAREN expr_noin_opt SEMI expr_opt SEMI expr_opt RPAREN \
statement
| FOR LPAREN VAR variable_declaration_list_noin SEMI expr_opt SEMI\
expr_opt RPAREN statement
"""
if len(p) == 10:
p[0] = ast.For(init=p[3], cond=p[5], count=p[7], statement=p[9])
else:
init = ast.VarStatement(p[4])
p[0] = ast.For(init=init, cond=p[6], count=p[8], statement=p[10])
|
java
|
@Override
public ApiZone find(String name) {
Zone zone = SmartFoxServer.getInstance()
.getZoneManager()
.getZoneByName(name);
if(zone != null && zone.containsProperty(APIKey.ZONE))
return (ApiZone) zone.getProperty(APIKey.ZONE);
return null;
}
|
python
|
def group_entries_by_structure(entries, species_to_remove=None,
ltol=0.2, stol=.4, angle_tol=5,
primitive_cell=True, scale=True,
comparator=SpeciesComparator(),
ncpus=None):
"""
Given a sequence of ComputedStructureEntries, use structure fitter to group
them by structural similarity.
Args:
entries: Sequence of ComputedStructureEntries.
species_to_remove: Sometimes you want to compare a host framework
(e.g., in Li-ion battery analysis). This allows you to specify
species to remove before structural comparison.
ltol (float): Fractional length tolerance. Default is 0.2.
stol (float): Site tolerance in Angstrom. Default is 0.4 Angstrom.
angle_tol (float): Angle tolerance in degrees. Default is 5 degrees.
primitive_cell (bool): If true: input structures will be reduced to
primitive cells prior to matching. Defaults to True.
scale: Input structures are scaled to equivalent volume if true;
For exact matching, set to False.
comparator: A comparator object implementing an equals method that
declares equivalency of sites. Default is SpeciesComparator,
which implies rigid species mapping.
ncpus: Number of cpus to use. Use of multiple cpus can greatly improve
fitting speed. Default of None means serial processing.
Returns:
Sequence of sequence of entries by structural similarity. e.g,
[[ entry1, entry2], [entry3, entry4, entry5]]
"""
start = datetime.datetime.now()
logger.info("Started at {}".format(start))
entries_host = [(entry, _get_host(entry.structure, species_to_remove))
for entry in entries]
if ncpus:
symm_entries = collections.defaultdict(list)
for entry, host in entries_host:
symm_entries[comparator.get_structure_hash(host)].append((entry,
host))
import multiprocessing as mp
logging.info("Using {} cpus".format(ncpus))
manager = mp.Manager()
groups = manager.list()
p = mp.Pool(ncpus)
# Parallel processing only supports Python primitives and not objects.
p.map(_perform_grouping,
[(json.dumps([e[0] for e in eh], cls=MontyEncoder),
json.dumps([e[1] for e in eh], cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups)
for eh in symm_entries.values()])
else:
groups = []
hosts = [host for entry, host in entries_host]
_perform_grouping((json.dumps(entries, cls=MontyEncoder),
json.dumps(hosts, cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups))
entry_groups = []
for g in groups:
entry_groups.append(json.loads(g, cls=MontyDecoder))
logging.info("Finished at {}".format(datetime.datetime.now()))
logging.info("Took {}".format(datetime.datetime.now() - start))
return entry_groups
|
python
|
def get_page(self, page_id):
""" Get short page info and body html code """
try:
result = self._request('/getpage/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return []
|
java
|
private static Calendar getEndOfDay(Calendar calendar) {
// Copy given calender only up to given month
Calendar endOfMonth = Calendar.getInstance();
endOfMonth.clear();
endOfMonth.set(Calendar.YEAR, calendar.get(Calendar.YEAR));
endOfMonth.set(Calendar.MONTH, calendar.get(Calendar.MONTH));
endOfMonth.set(Calendar.DAY_OF_MONTH, calendar.get(Calendar.DAY_OF_MONTH));
// Advance to the last millisecond of the given day
endOfMonth.add(Calendar.DAY_OF_MONTH, 1);
endOfMonth.add(Calendar.MILLISECOND, -1);
return endOfMonth;
}
|
java
|
boolean isReady() {
final C[] ds = destinations.get();
if (ds == null)
return false;
for (final C d : ds)
if (d == null)
return false;
final boolean ret = ds.length != 0; // this method is only called in tests and this needs to be true there.
if (ret && LOGGER.isDebugEnabled())
LOGGER.debug("at {} to {} is Ready " + shorthand(ds), thisNodeId, groupName);
return ret;
}
|
python
|
def conjugate(self):
"""Complex conjugate of of the indexed sum"""
return self.__class__.create(self.term.conjugate(), *self.ranges)
|
python
|
def is_diff(self):
"""Return True if there are any differences logged"""
if not isinstance(self.details, dict):
return False
for key in ['additions', 'updates', 'deletions']:
if self.details.get(key, None):
return True
return False
|
java
|
public static List<Integer> createListOfNonExistentFields(List<String> list, JavacNode type, boolean excludeStandard, boolean excludeTransient) {
boolean[] matched = new boolean[list.size()];
for (JavacNode child : type.down()) {
if (list.isEmpty()) break;
if (child.getKind() != Kind.FIELD) continue;
JCVariableDecl field = (JCVariableDecl)child.get();
if (excludeStandard) {
if ((field.mods.flags & Flags.STATIC) != 0) continue;
if (field.name.toString().startsWith("$")) continue;
}
if (excludeTransient && (field.mods.flags & Flags.TRANSIENT) != 0) continue;
int idx = list.indexOf(child.getName());
if (idx > -1) matched[idx] = true;
}
ListBuffer<Integer> problematic = new ListBuffer<Integer>();
for (int i = 0 ; i < list.size() ; i++) {
if (!matched[i]) problematic.append(i);
}
return problematic.toList();
}
|
java
|
public void saveGlobalProperties() throws IOException {
File propFile = new File(getGlobalPropertiesFile());
if (propFile.createNewFile() || propFile.isFile()) {
java.util.Properties prop = new java.util.Properties();
if (getRoundingMode() != null)
prop.put("roundingMode", getRoundingMode().name());
if (getScale() != null)
prop.put("scale", getScale());
prop.put("stripTrailingZeros", Boolean.toString(hasStripTrailingZeros()));
prop.put("decimalSeparator.in", "'" + getInputDecimalSeparator() + "'");
prop.put("decimalSeparator.out", "'" + getOutputDecimalSeparator() + "'");
if (getGroupingSeparator() != null)
prop.put("groupingSeparator", getGroupingSeparator());
if (getOutputFormat() != null)
prop.put("outputFormat", getOutputFormat());
//
// Global NumConverter
//
HashMap<Class, NumConverter> cncs = CacheExtension.getAllNumConverter();
int count = 0;
for (Entry<Class, NumConverter> cnc : cncs.entrySet()) {
prop.put("numconverter[" + count++ + "]", cnc.getKey().getName() + " > " + cnc.getValue().getClass().getName());
}
//
// Global Operator
//
HashMap<Class<? extends Operator>, Operator> cops = CacheExtension.getOperators();
count = 0;
for (Entry<Class<? extends Operator>, Operator> cop : cops.entrySet()) {
prop.put("operator[" + count++ + "]", cop.getKey().getName());
}
//
// Global Function
//
HashMap<Class<? extends Function>, Function> cfns = CacheExtension.getFunctions();
count = 0;
for (Entry<Class<? extends Function>, Function> cfn : cfns.entrySet()) {
prop.put("function[" + count++ + "]", cfn.getKey().getName());
}
FileOutputStream fos = new FileOutputStream(propFile);
prop.store(fos, "Global properties for jCalc");
fos.close();
fos.flush();
}
}
|
python
|
def register_cmaps(category, provider, source, bg, names):
"""
Maintain descriptions of colormaps that include the following information:
name - string name for the colormap
category - intended use or purpose, mostly following matplotlib
provider - package providing the colormap directly
source - original source or creator of the colormaps
bg - base/background color expected for the map
('light','dark','medium','any' (unknown or N/A))
"""
for name in names:
bisect.insort(cmap_info, CMapInfo(name=name, provider=provider,
category=category, source=source,
bg=bg))
|
java
|
public static boolean urisEqualAfterPortNormalization(final URI uri1, final URI uri2) {
if (uri1 == null && uri2 == null) {
return true;
}
if (uri1 == null || uri2 == null) {
return false;
}
try {
URI normalizedUri1 = normalizePortNumbersInUri(uri1);
URI normalizedUri2 = normalizePortNumbersInUri(uri2);
boolean eq = normalizedUri1.equals(normalizedUri2);
return eq;
} catch (URISyntaxException use) {
logger.error("Cannot compare 2 URIs.", use);
return false;
}
}
|
java
|
protected void findCreds() {
if (System.getProperty(ACCESS_KEY) != null && System.getProperty(ACCESS_SECRET) != null) {
accessKey = System.getProperty(ACCESS_KEY);
secretKey = System.getProperty(ACCESS_SECRET);
}
else if (System.getenv(AWS_ACCESS_KEY) != null && System.getenv(AWS_SECRET_KEY) != null) {
accessKey = System.getenv(AWS_ACCESS_KEY);
secretKey = System.getenv(AWS_SECRET_KEY);
}
}
|
python
|
def _get_addresses(self, text):
'''Returns a list of addresses found in text'''
# find addresses
addresses = []
matches = utils.findall(
self.rules,
text,
flags=re.VERBOSE | re.U)
if(matches):
for match in matches:
addresses.append(match[0].strip())
return addresses
|
python
|
def del_properties(elt, keys=None, ctx=None):
"""Delete elt property.
:param elt: properties elt to del. Not None methods.
:param keys: property keys to delete from elt. If empty, delete all
properties.
"""
# get the best context
if ctx is None:
ctx = find_ctx(elt=elt)
elt_properties = _ctx_elt_properties(elt=elt, ctx=ctx, create=False)
# if elt properties exist
if elt_properties is not None:
if keys is None:
keys = list(elt_properties.keys())
else:
keys = ensureiterable(keys, iterable=tuple, exclude=str)
for key in keys:
if key in elt_properties:
del elt_properties[key]
# delete property component if empty
if not elt_properties:
# case of dynamic object
if isinstance(getattr(ctx, '__dict__', None), dict):
try:
if elt in ctx.__dict__[__B3J0F__PROPERTIES__]:
del ctx.__dict__[__B3J0F__PROPERTIES__][elt]
except TypeError: # if elt is unhashable
elt = id(elt)
if elt in ctx.__dict__[__B3J0F__PROPERTIES__]:
del ctx.__dict__[__B3J0F__PROPERTIES__][elt]
# if ctx_properties is empty, delete it
if not ctx.__dict__[__B3J0F__PROPERTIES__]:
del ctx.__dict__[__B3J0F__PROPERTIES__]
# case of static object and hashable
else:
if isinstance(ctx, Hashable):
cache = __STATIC_ELEMENTS_CACHE__
else: # case of static and unhashable object
cache = __UNHASHABLE_ELTS_CACHE__
ctx = id(ctx)
if not isinstance(elt, Hashable):
elt = id(elt)
# in case of static object
if ctx in cache:
del cache[ctx][elt]
if not cache[ctx]:
del cache[ctx]
|
python
|
def _offset_of_next_ff_byte(self, start):
"""
Return the offset of the next '\xFF' byte in *stream* starting with
the byte at offset *start*. Returns *start* if the byte at that
offset is a hex 255; it does not necessarily advance in the stream.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ != b'\xFF':
byte_ = self._read_byte()
offset_of_ff_byte = self._stream.tell() - 1
return offset_of_ff_byte
|
python
|
def type_last(self, obj: JsonObj) -> JsonObj:
""" Move the type identifiers to the end of the object for print purposes """
def _tl_list(v: List) -> List:
return [self.type_last(e) if isinstance(e, JsonObj)
else _tl_list(e) if isinstance(e, list) else e for e in v if e is not None]
rval = JsonObj()
for k in as_dict(obj).keys():
v = obj[k]
if v is not None and k not in ('type', '_context'):
rval[k] = _tl_list(v) if isinstance(v, list) else self.type_last(v) if isinstance(v, JsonObj) else v
if 'type' in obj and obj.type:
rval.type = obj.type
return rval
|
python
|
def codemirror_field_js_bundle(field):
"""
Filter to get CodeMirror Javascript bundle name needed for a single field.
Example:
::
{% load djangocodemirror_tags %}
{{ form.myfield|codemirror_field_js_bundle }}
Arguments:
field (django.forms.fields.Field): A form field that contains a widget
:class:`djangocodemirror.widget.CodeMirrorWidget`.
Raises:
CodeMirrorFieldBundleError: If Codemirror configuration form field
does not have a bundle name.
Returns:
string: Bundle name to load with webassets.
"""
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(field)
try:
bundle_name = manifesto.js_bundle_names()[0]
except IndexError:
msg = ("Given field with configuration name '{}' does not have a "
"Javascript bundle name")
raise CodeMirrorFieldBundleError(msg.format(field.config_name))
return bundle_name
|
java
|
public static boolean checkAggregateReferences(Application app) {
Map<DomainObject, Set<DomainObject>> aggregateGroups = getAggregateGroups(app);
for (Set<DomainObject> group1 : aggregateGroups.values()) {
for (Set<DomainObject> group2 : aggregateGroups.values()) {
if (group1 == group2) {
continue;
}
// find only the elements common to both sets, i.e. the
// intersection
Set<DomainObject> intersection = new HashSet<DomainObject>(
group1);
intersection.retainAll(group2);
if (!intersection.isEmpty()) {
// found two groups with some non-root objects in common,
// i.e. reference directly to a non-root from outside the
// aggregate boundary
LOG.warn("checkAggregateReferences failed with intersection: "
+ intersection);
return false;
}
}
}
// everything alright
return true;
}
|
python
|
def load_target(cls, scheme, path, fragment, username,
password, hostname, port, query,
load_method, **kwargs):
"""Override this method to use values from the parsed uri to initialize
the expected target.
"""
raise NotImplementedError("load_target must be overridden")
|
python
|
def check_config_mode(self, check_string="config", pattern=""):
"""Checks if the device is in configuration mode or not."""
if not pattern:
pattern = re.escape(self.base_prompt)
return super(CiscoWlcSSH, self).check_config_mode(check_string, pattern)
|
java
|
@Override
public boolean handles(String command) {
if (command.startsWith(this.tree.getTopNode().getData())) {
return true;
}
return false;
}
|
python
|
def param_dict_to_list(dict,skeys=None):
"""convert from param dictionary to list"""
#sort keys
RV = SP.concatenate([dict[key].flatten() for key in skeys])
return RV
pass
|
python
|
def reply():
"""Fetch a reply from RiveScript.
Parameters (JSON):
* username
* message
* vars
"""
params = request.json
if not params:
return jsonify({
"status": "error",
"error": "Request must be of the application/json type!",
})
username = params.get("username")
message = params.get("message")
uservars = params.get("vars", dict())
# Make sure the required params are present.
if username is None or message is None:
return jsonify({
"status": "error",
"error": "username and message are required keys",
})
# Copy and user vars from the post into RiveScript.
if type(uservars) is dict:
for key, value in uservars.items():
bot.set_uservar(username, key, value)
# Get a reply from the bot.
reply = bot.reply(username, message)
# Get all the user's vars back out of the bot to include in the response.
uservars = bot.get_uservars(username)
# Send the response.
return jsonify({
"status": "ok",
"reply": reply,
"vars": uservars,
})
|
python
|
def napi_or(values, **kwargs):
"""Perform element-wise logical *or* operation on arrays.
If *values* contains a non-array object with truth_ value **True**, the
outcome will be an array of **True**\s with suitable shape without arrays
being evaluated. Non-array objects with truth value **False** are omitted.
If array shapes do not match (after squeezing when enabled by user),
:exc:`ValueError` is raised.
This function uses :obj:`numpy.logical_or` or :obj:`numpy.any`."""
arrays = []
result = None
shapes = set()
for value in values:
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif value:
result = value
if len(shapes) > 1 and kwargs.get('squeeze', kwargs.get('sq', False)):
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
if len(shapes) > 1:
raise ValueError('array shape mismatch')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.ones(shape, bool)
else:
return result
elif arrays:
sc = kwargs.get('sc', kwargs.get('shortcircuit', 0))
if sc and numpy.prod(shape) >= sc:
return short_circuit_or(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_or(*arrays)
else:
return numpy.any(arrays, 0)
else:
return value
|
python
|
def decorate_event_js(js_code):
"""setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
"""
def add_annotation(method):
setattr(method, "__is_event", True )
setattr(method, "_js_code", js_code )
return method
return add_annotation
|
java
|
protected void notifySendMessage(String method, List<?> params) {
for (ISharedObjectListener listener : listeners) {
listener.onSharedObjectSend(this, method, params);
}
}
|
python
|
def _log_future_exception(future, logger):
"""Log any exception raised by future."""
if not future.done():
return
try:
future.result()
except: #pylint:disable=bare-except;This is a background logging helper
logger.warning("Exception in ignored future: %s", future, exc_info=True)
|
java
|
public static PlanarImage convertCMYK2RGB(PlanarImage src) {
ColorSpace srcColorSpace = src.getColorModel().getColorSpace();
// check if BufferedImage is cmyk format
if (srcColorSpace.getType() != ColorSpace.TYPE_CMYK) {
return src;
}
/**
* ICC_ColorSpace object mean jai read ColorSpace from image embed profile, we can not inverted cmyk color, and
* can not repace BufferedImage's ColorSpace
*/
if (srcColorSpace instanceof ICC_ColorSpace) {
// -- Convert CMYK to RGB
ColorSpace rgbColorSpace = ColorSpace.getInstance(ColorSpace.CS_sRGB);
ColorModel rgbColorModel = RasterFactory.createComponentColorModel(DataBuffer.TYPE_BYTE, rgbColorSpace,
false, true, Transparency.OPAQUE);
ImageLayout rgbImageLayout = new ImageLayout();
rgbImageLayout.setSampleModel(rgbColorModel.createCompatibleSampleModel(src.getWidth(), src.getHeight()));
RenderingHints rgbHints = new RenderingHints(JAI.KEY_IMAGE_LAYOUT, rgbImageLayout);
rgbHints.put(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
ParameterBlockJAI pb = new ParameterBlockJAI("colorconvert");
pb.addSource(src);
pb.setParameter("colormodel", rgbColorModel);
return JAI.create("colorconvert", pb, rgbHints);
} else {
// get user defined color from ColorProfile data
ColorSpace cmykColorSpace = CMMColorSpace.getInstance(src.getColorModel().getColorSpace().getType());
ColorModel cmykColorModel = RasterFactory.createComponentColorModel(src.getSampleModel().getDataType(),
cmykColorSpace, false, true,
Transparency.OPAQUE);
// replace ColorSpace by format convertor with CMYK ColorSpace
ImageLayout cmykImageLayout = new ImageLayout();
cmykImageLayout.setColorModel(cmykColorModel);
RenderingHints cmykHints = new RenderingHints(JAI.KEY_IMAGE_LAYOUT, cmykImageLayout);
cmykHints.put(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
ParameterBlockJAI pb = new ParameterBlockJAI("format");
pb.addSource(src);
pb.setParameter("datatype", src.getSampleModel().getDataType());
PlanarImage op = JAI.create("format", pb, cmykHints);
// invert CMYK pixel value
pb = new ParameterBlockJAI("invert");
pb.addSource(src);
op = JAI.create("invert", pb, cmykHints);
// -- Convert CMYK to RGB
ColorSpace rgbColorSpace = ColorSpace.getInstance(ColorSpace.CS_sRGB);
ColorModel rgbColorModel = RasterFactory.createComponentColorModel(DataBuffer.TYPE_BYTE, rgbColorSpace,
false, true, Transparency.OPAQUE);
ImageLayout rgbImageLayout = new ImageLayout();
rgbImageLayout.setSampleModel(rgbColorModel.createCompatibleSampleModel(op.getWidth(), op.getHeight()));
RenderingHints rgbHints = new RenderingHints(JAI.KEY_IMAGE_LAYOUT, rgbImageLayout);
rgbHints.put(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
pb = new ParameterBlockJAI("colorconvert");
pb.addSource(op);
pb.setParameter("colormodel", rgbColorModel);
return JAI.create("colorconvert", pb, rgbHints);
}// endif
}
|
python
|
def unset_env():
"""Remove coverage info from env."""
os.environ.pop('COV_CORE_SOURCE', None)
os.environ.pop('COV_CORE_DATA_FILE', None)
os.environ.pop('COV_CORE_CONFIG', None)
|
python
|
def get_as_integer_with_default(self, key, default_value):
"""
Converts map element into an integer or returns default value if conversion is not possible.
:param key: an index of element to get.
:param default_value: the default value
:return: integer value ot the element or default value if conversion is not supported.
"""
value = self.get(key)
return IntegerConverter.to_integer_with_default(value, default_value)
|
python
|
def form_to_json(form):
"""
Takes the form from the POST request in the web interface, and generates the JSON config\
file
:param form: The form from the POST request
:return: None
"""
config = dict()
if form['project_name'] == "":
raise Exception('Project name cannot be empty.')
if form['selector_type'] not in ["css", "xpath"]:
raise Exception('Selector type has to css or xpath')
config['project_name'] = form['project_name']
config['selector_type'] = form['selector_type']
config['scraping'] = dict()
if form['url'] == "":
raise Exception('URL cannot be empty')
config['scraping']['url'] = form['url']
config['scraping']['data'] = list()
for i in itertools.count(start=1):
try:
data = {
'field': form['field_' + str(i)],
'selector': form['selector_' + str(i)],
'attr': form['attribute_' + str(i)],
'default': form['default_' + str(i)]
}
config['scraping']['data'].append(data)
except KeyError:
break
# TODO : Crawler 'next' parameter handling
with open(os.path.join(os.getcwd(), form['project_name'] + '.json'), 'w') as f:
json.dump(config, f)
return
|
python
|
def rank_targets(sample_frame, ref_targets, ref_sample):
"""Uses the geNorm algorithm to determine the most stably expressed
genes from amongst ref_targets in your sample.
See Vandesompele et al.'s 2002 Genome Biology paper for information about
the algorithm: http://dx.doi.org/10.1186/gb-2002-3-7-research0034
:param DataFrame sample_frame: A sample data frame.
:param iterable ref_targets: A sequence of targets from the Target column
of sample_frame to consider for ranking.
:param string ref_sample: The name of a sample from the Sample
column of sample_frame. It doesn't really matter what it is but it
should exist for every target.
:return: a sorted DataFrame with two columns, 'Target' and 'M' (the
relative stability; lower means more stable).
:rtype: DataFrame
"""
table = collect_expression(sample_frame, ref_targets, ref_sample)
all_samples = sample_frame['Sample'].unique()
t = table.groupby(['Sample', 'Target']).mean()
logt = log2(t)
ref_targets = set(ref_targets)
worst = []
worst_m = []
while len(ref_targets) - len(worst) > 1:
M = []
for test_target in ref_targets:
if test_target in worst: continue
Vs = []
for ref_target in ref_targets:
if ref_target == test_target or ref_target in worst: continue
A = logt.ix[zip(all_samples, repeat(test_target)), ref_target]
Vs.append(A.std())
M.append( (sum(Vs)/(len(ref_targets)-len(worst)-1), test_target) )
worst.append(max(M)[1])
worst_m.append(max(M)[0])
best = ref_targets - set(worst)
worst.reverse()
worst_m.reverse()
worst_m = [worst_m[0]] + worst_m
return pd.DataFrame({'Target': list(best) + worst, 'M': worst_m}, columns=['Target', 'M'])
|
python
|
def _assert_ssl_exc_contains(exc, *msgs):
"""Check whether SSL exception contains either of messages provided."""
if len(msgs) < 1:
raise TypeError(
'_assert_ssl_exc_contains() requires '
'at least one message to be passed.',
)
err_msg_lower = str(exc).lower()
return any(m.lower() in err_msg_lower for m in msgs)
|
java
|
public DescribeScheduledActionsRequest withScheduledActionNames(String... scheduledActionNames) {
if (this.scheduledActionNames == null) {
setScheduledActionNames(new java.util.ArrayList<String>(scheduledActionNames.length));
}
for (String ele : scheduledActionNames) {
this.scheduledActionNames.add(ele);
}
return this;
}
|
python
|
def pair(self):
"""
Returns a callable and an iterable respectively. Those can be used to
both transmit a message and/or iterate over incoming messages, that were
sent by a pair socket. Note that the iterable returns as many parts as
sent by a pair. Also, the sender function has a ``print`` like signature,
with an infinite number of arguments. Each one being a part of the
complete message.
:rtype: (function, generator)
"""
sock = self.__sock(zmq.PAIR)
return self.__send_function(sock), self.__recv_generator(sock)
|
python
|
def _validate_columns(self):
"""Validate the options in the styles"""
geom_cols = {'the_geom', 'the_geom_webmercator', }
col_overlap = set(self.style_cols) & geom_cols
if col_overlap:
raise ValueError('Style columns cannot be geometry '
'columns. `{col}` was chosen.'.format(
col=','.join(col_overlap)))
|
python
|
def NotificationsGet(self, notification_id = -1):
"""
Obtain either all notifications from CommonSense, or the details of a specific notification.
If successful, the result can be obtained from getResponse(), and should be a json string.
@param notification_id (int) (optional) - Notification id of the notification to obtain details from.
@return (bool) - Boolean indicating whether NotificationsGet was successful.
"""
if notification_id == -1:
url = '/notifications.json'
else:
url = '/notifications/{0}.json'.format(notification_id)
if self.__SenseApiCall__(url, 'GET'):
return True
else:
self.__error__ = "api call unsuccessful"
return False
|
java
|
public List<String> getValues(final String propertyName) {
Preconditions.checkNotNull(propertyName);
List<String> values = properties.get(propertyName);
if (values == null) {
return null;
}
// creates a shallow defensive copy
return new ArrayList<>(values);
}
|
python
|
def update(self, fname):
"""
Adds a handler to save to a file. Includes debug stuff.
"""
ltfh = FileHandler(fname)
self._log.addHandler(ltfh)
|
python
|
def get_new_access_token(refresh_token, client_id, client_secret,
scope=None, **kwargs):
"""使用 Refresh Token 刷新以获得新的 Access Token.
:param refresh_token: 用于刷新 Access Token 用的 Refresh Token;
:param client_id: 应用的 API Key;
:param client_secret: 应用的 Secret Key;
:param scope: 以空格分隔的权限列表,若不传递此参数,代表请求的数据访问
操作权限与上次获取 Access Token 时一致。通过 Refresh Token
刷新 Access Token 时所要求的 scope 权限范围必须小于等于上次
获取 Access Token 时授予的权限范围。 关于权限的具体信息请参考
“ `权限列表`__ ”。
:return: Response 对象
关于 ``response.json()`` 字典的内容所代表的含义,
请参考 `相关的百度帮助文档`__ 。
__ http://developer.baidu.com/wiki/index.php?title=docs/oauth/baiduoauth/list
__ http://developer.baidu.com/wiki/index.php?title=docs/oauth/refresh
"""
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret,
}
if scope:
data['scope'] = scope
url = 'https://openapi.baidu.com/oauth/2.0/token'
return requests.post(url, data=data)
|
python
|
def sign(allocate_quota_request):
"""Obtains a signature for an operation in a `AllocateQuotaRequest`
Args:
op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an
operation used in a `AllocateQuotaRequest`
Returns:
string: a secure hash generated from the operation
"""
if not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest):
raise ValueError(u'Invalid request')
op = allocate_quota_request.allocateOperation
if op is None or op.methodName is None or op.consumerId is None:
logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request)
raise ValueError(u'allocate_quota request must be initialized with an operation')
md5 = hashlib.md5()
md5.update(op.methodName.encode('utf-8'))
md5.update(b'\x00')
md5.update(op.consumerId.encode('utf-8'))
if op.labels:
signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))
for value_set in op.quotaMetrics:
md5.update(b'\x00')
md5.update(value_set.metricName.encode('utf-8'))
for mv in value_set.metricValues:
metric_value.update_hash(md5, mv)
md5.update(b'\x00')
return md5.digest()
|
python
|
def _get_searchable_regex(basic=None, hidden=None):
"""Return the searchable regular expressions for the single keyword."""
# Hidden labels are used to store regular expressions.
basic = basic or []
hidden = hidden or []
hidden_regex_dict = {}
for hidden_label in hidden:
if _is_regex(hidden_label):
hidden_regex_dict[hidden_label] = \
re.compile(
current_app.config["CLASSIFIER_WORD_WRAP"]
% hidden_label[1:-1]
)
else:
pattern = _get_regex_pattern(hidden_label)
hidden_regex_dict[hidden_label] = re.compile(
current_app.config["CLASSIFIER_WORD_WRAP"] % pattern
)
# We check if the basic label (preferred or alternative) is matched
# by a hidden label regex. If yes, discard it.
regex_dict = {}
# Create regex for plural forms and add them to the hidden labels.
for label in basic:
pattern = _get_regex_pattern(label)
regex_dict[label] = re.compile(
current_app.config["CLASSIFIER_WORD_WRAP"] % pattern
)
# Merge both dictionaries.
regex_dict.update(hidden_regex_dict)
return list(regex_dict.values())
|
python
|
def _modem_sm(self):
"""Handle modem response state machine."""
import datetime
read_timeout = READ_IDLE_TIMEOUT
while self.ser:
try:
resp = self.read(read_timeout)
except (serial.SerialException, SystemExit, TypeError):
_LOGGER.debug('Unable to read from port %s', self.port)
break
if self.state != self.STATE_IDLE and len(resp) == 0:
read_timeout = READ_IDLE_TIMEOUT
self.set_state(self.STATE_IDLE)
self.incomingcallnotificationfunc(self.state)
continue
resp = resp.decode()
resp = resp.strip('\r\n')
if self.cmd_response == '':
self.cmd_responselines.append(resp)
_LOGGER.debug('mdm: %s', resp)
if resp in ['OK', 'ERROR']:
self.cmd_response = resp
continue
if resp in ['RING']:
if self.state == self.STATE_IDLE:
self.cid_name = ''
self.cid_number = ''
self.cid_time = datetime.datetime.now()
self.set_state(self.STATE_RING)
self.incomingcallnotificationfunc(self.state)
read_timeout = READ_RING_TIMOUT
continue
if len(resp) <= 4 or resp.find('=') == -1:
continue
read_timeout = READ_RING_TIMOUT
cid_field, cid_data = resp.split('=')
cid_field = cid_field.strip()
cid_data = cid_data.strip()
if cid_field in ['DATE']:
self.cid_time = datetime.datetime.now()
continue
if cid_field in ['NMBR']:
self.cid_number = cid_data
continue
if cid_field in ['NAME']:
self.cid_name = cid_data
self.set_state(self.STATE_CALLERID)
self.incomingcallnotificationfunc(self.state)
_LOGGER.debug('CID: %s %s %s',
self.cid_time.strftime("%I:%M %p"),
self.cid_name,
self.cid_number)
try:
self.write(self.cmd_callerid)
except serial.SerialException:
_LOGGER.error('Unable to write to port %s', self.port)
break
continue
self.set_state(self.STATE_FAILED)
_LOGGER.debug('Exiting modem state machine')
return
|
java
|
public static <T> ObjectInstantiator<T> getInstantiatorOf(Class<T> clazz) {
return OBJENESIS_STD.getInstantiatorOf(clazz);
}
|
java
|
private int findNextMatchingLine(String[] textLines, int startingLine, String textToMatch)
{
int _foundLeft = -1;
{
int _tempLine = startingLine + 1;
while (_tempLine < textLines.length)
{
String _tempText = getTextLine(textLines, _tempLine);
if (_tempText.equals(textToMatch))
{
_foundLeft = _tempLine;
break;
}
++_tempLine;
}
}
return _foundLeft;
}
|
python
|
def plate(self):
"""
Serves up a delicious plate with your models
"""
request = self.request
if self.settings is None:
graph_settings = deepcopy(getattr(settings, 'SPAGHETTI_SAUCE', {}))
graph_settings.update(self.override_settings)
else:
graph_settings = self.settings
apps = graph_settings.get('apps', [])
excludes = [
"%s__%s" % (app, model)
for app, models in graph_settings.get('exclude', {}).items()
for model in models
]
models = ContentType.objects.filter(app_label__in=apps)
nodes = []
edges = []
for model in models:
if (model.model_class() is None):
continue
model.is_proxy = model.model_class()._meta.proxy
if (model.is_proxy and not graph_settings.get('show_proxy', False)):
continue
model.doc = model.model_class().__doc__
_id = "%s__%s" % (model.app_label, model.model)
if _id in excludes:
continue
label = self.get_node_label(model)
fields = [f for f in model.model_class()._meta.fields]
many = [f for f in model.model_class()._meta.many_to_many]
if graph_settings.get('show_fields', True):
label += "\n%s\n" % ("-" * len(model.model))
label += "\n".join([str(f.name) for f in fields])
edge_color = {'inherit': 'from'}
for f in fields + many:
if f.remote_field is not None:
m = f.remote_field.model._meta
to_id = "%s__%s" % (m.app_label, m.model_name)
if to_id in excludes:
pass
elif _id == to_id and graph_settings.get('ignore_self_referential', False):
pass
else:
if m.app_label != model.app_label:
edge_color = {'inherit': 'both'}
edge = {'from': _id, 'to': to_id, 'color': edge_color}
if str(f.name).endswith('_ptr'):
# fields that end in _ptr are pointing to a parent object
edge.update({
'arrows': {'to': {'scaleFactor': 0.75}}, # needed to draw from-to
'font': {'align': 'middle'},
'label': 'is a',
'dashes': True
})
elif type(f) == related.ForeignKey:
edge.update({
'arrows': {'to': {'scaleFactor': 0.75}}
})
elif type(f) == related.OneToOneField:
edge.update({
'font': {'align': 'middle'},
'label': '|'
})
elif type(f) == related.ManyToManyField:
edge.update({
'color': {'color': 'gray'},
'arrows': {'to': {'scaleFactor': 1}, 'from': {'scaleFactor': 1}},
})
edges.append(edge)
if model.is_proxy:
proxy = model.model_class()._meta.proxy_for_model._meta
model.proxy = proxy
edge = {
'to': _id,
'from': "%s__%s" % (proxy.app_label, proxy.model_name),
'color': edge_color,
}
edges.append(edge)
all_node_fields = fields
if graph_settings.get('show_m2m_field_detail', False):
all_node_fields = fields + many
nodes.append(
{
'id': _id,
'label': label,
'shape': 'box',
'group': model.app_label,
'title': get_template(self.meatball_template_name).render(
{'model': model, 'fields': all_node_fields}
)
}
)
data = {
'meatballs': json.dumps(nodes),
'spaghetti': json.dumps(edges)
}
return render(request, self.plate_template_name, data)
|
java
|
public void setDayPartTargeting(com.google.api.ads.admanager.axis.v201805.DayPartTargeting dayPartTargeting) {
this.dayPartTargeting = dayPartTargeting;
}
|
java
|
public static int month(EvaluationContext ctx, Object date) {
return Conversions.toDateOrDateTime(date, ctx).get(ChronoField.MONTH_OF_YEAR);
}
|
python
|
def get_example(cls) -> dict:
"""Returns an example value for the Dict type.
If an example isn't a defined attribute on the class we return
a dict of example values based on each property's annotation.
"""
if cls.example is not None:
return cls.example
return {k: v.get_example() for k, v in cls.properties.items()}
|
java
|
public StorageAccountInner update(String resourceGroupName, String accountName, StorageAccountUpdateParameters parameters) {
return updateWithServiceResponseAsync(resourceGroupName, accountName, parameters).toBlocking().single().body();
}
|
python
|
def iter_module_paths(modules=None):
""" Yield paths of all imported modules."""
modules = modules or list(sys.modules.values())
for module in modules:
try:
filename = module.__file__
except (AttributeError, ImportError): # pragma: no cover
continue
if filename is not None:
abs_filename = os.path.abspath(filename)
if os.path.isfile(abs_filename):
yield abs_filename
|
python
|
def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):
"""A default set of length-bucket boundaries."""
assert length_bucket_step > 1.0
x = min_length
boundaries = []
while x < max_length:
boundaries.append(x)
x = max(x + 1, int(x * length_bucket_step))
return boundaries
|
python
|
async def build_get_cred_def_request(submitter_did: Optional[str],
id_: str) -> str:
"""
Builds a GET_CRED_DEF request. Request to get a credential definition (in particular, public key),
that Issuer creates for a particular Credential Schema.
:param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
:param id_: Credential Definition Id in ledger.
:return: Request result as json.
"""
logger = logging.getLogger(__name__)
logger.debug("build_get_cred_def_request: >>> submitter_did: %r, id: %r",
submitter_did,
id_)
if not hasattr(build_get_cred_def_request, "cb"):
logger.debug("build_get_cred_def_request: Creating callback")
build_get_cred_def_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None
c_id = c_char_p(id_.encode('utf-8'))
request_json = await do_call('indy_build_get_cred_def_request',
c_submitter_did,
c_id,
build_get_cred_def_request.cb)
res = request_json.decode()
logger.debug("build_get_cred_def_request: <<< res: %r", res)
return res
|
python
|
def createNetwork(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create a new network
It is method for POST /network
"""
uri = self.client.base_url + "/network"
return self.client.post(uri, data, headers, query_params, content_type)
|
java
|
static PageWrapper wrapExisting(BTreePage page, PageWrapper parent, PagePointer pointer) {
return new PageWrapper(page, parent, pointer, false);
}
|
java
|
public static DefaultListOperation<AssetDeliveryPolicyInfo> list(LinkInfo<AssetDeliveryPolicyInfo> link) {
return new DefaultListOperation<AssetDeliveryPolicyInfo>(link.getHref(),
new GenericType<ListResult<AssetDeliveryPolicyInfo>>() {
});
}
|
python
|
def latex_quote(s):
"""Quote special characters for LaTeX.
(Incomplete, currently only deals with underscores, dollar and hash.)
"""
special = {'_':r'\_', '$':r'\$', '#':r'\#'}
s = str(s)
for char,repl in special.items():
new = s.replace(char, repl)
s = new[:]
return s
|
python
|
def MI_enumInstanceNames(self,
env,
ns,
cimClass):
# pylint: disable=invalid-name
"""Return instance names of a given CIM class
Implements the WBEM operation EnumerateInstanceNames in terms
of the enum_instances method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_enumInstanceNames called...')
provClass = False
keys = pywbem.NocaseDict()
[keys.__setitem__(p.name, p) for p in cimClass.properties.values() \
if 'key' in p.qualifiers]
_strip_quals(keys)
path = pywbem.CIMInstanceName(classname=cimClass.classname,
namespace=ns)
model = pywbem.CIMInstance(classname=cimClass.classname,
properties=keys,
path=path)
gen = self.enum_instances(env=env,
model=model,
cim_class=cimClass,
keys_only=True)
try:
iter(gen)
except TypeError:
logger.log_debug('CIMProvider MI_enumInstanceNames returning')
return
for inst in gen:
rval = build_instance_name(inst)
yield rval
logger.log_debug('CIMProvider MI_enumInstanceNames returning')
|
java
|
static String implode(final String glue, final String[] what) {
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < what.length; i++) {
builder.append(what[i]);
if (i + 1 != what.length) {
builder.append(glue);
}
}
return builder.toString();
}
|
java
|
@Override
public String getProperty(String name) {
Validate.notNull(name, "Property name can not be null");
return this.portalProperties.getProperty(name);
}
|
python
|
def vector_unit_nonull(v):
"""Return unit vectors.
Any null vectors raise an Exception.
Parameters
----------
v: array, shape (a1, a2, ..., d)
Cartesian vectors, with last axis indexing the dimension.
Returns
-------
v_new: array, shape of v
"""
if v.size == 0:
return v
return v / vector_mag(v)[..., np.newaxis]
|
python
|
def _onsuccess(self, result):
""" To execute on execution success
:param cdumay_result.Result result: Execution result
:return: Execution result
:rtype: cdumay_result.Result
"""
self._set_status("SUCCESS", result)
logger.info(
"{}.Success: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, result
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params
).dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return self.onsuccess(result)
|
java
|
private CellFormatResult getCellValue(final Cell cell, final Locale locale) {
return getCellValue(new POICell(cell), locale);
}
|
java
|
HsqlName getSchemaHsqlName(String name) {
return name == null ? currentSchema
: database.schemaManager.getSchemaHsqlName(name);
}
|
java
|
public void cleanup() {
long currentTime = System.currentTimeMillis();
// remove entries which have exceeded their time to live
cachedExecutionGraphs.values().removeIf(
(ExecutionGraphEntry entry) -> currentTime >= entry.getTTL());
}
|
java
|
private SuggestionsWsResponse loadSuggestionsWithoutSearch(int skip, int limit, Set<String> recentlyBrowsedKeys, List<String> qualifiers) {
List<ComponentDto> favoriteDtos = favoriteFinder.list();
if (favoriteDtos.isEmpty() && recentlyBrowsedKeys.isEmpty()) {
return newBuilder().build();
}
try (DbSession dbSession = dbClient.openSession(false)) {
Set<ComponentDto> componentDtos = new HashSet<>(favoriteDtos);
if (!recentlyBrowsedKeys.isEmpty()) {
componentDtos.addAll(dbClient.componentDao().selectByKeys(dbSession, recentlyBrowsedKeys));
}
List<ComponentDto> authorizedComponents = userSession.keepAuthorizedComponents(USER, componentDtos);
ListMultimap<String, ComponentDto> componentsPerQualifier = authorizedComponents.stream()
.collect(MoreCollectors.index(ComponentDto::qualifier));
if (componentsPerQualifier.isEmpty()) {
return newBuilder().build();
}
Set<String> favoriteUuids = favoriteDtos.stream().map(ComponentDto::uuid).collect(MoreCollectors.toSet(favoriteDtos.size()));
Comparator<ComponentDto> favoriteComparator = Comparator.comparing(c -> favoriteUuids.contains(c.uuid()) ? -1 : +1);
Comparator<ComponentDto> comparator = favoriteComparator.thenComparing(ComponentDto::name);
ComponentIndexResults componentsPerQualifiers = ComponentIndexResults.newBuilder().setQualifiers(
qualifiers.stream().map(q -> {
List<ComponentDto> componentsOfThisQualifier = componentsPerQualifier.get(q);
List<ComponentHit> hits = componentsOfThisQualifier
.stream()
.sorted(comparator)
.skip(skip)
.limit(limit)
.map(ComponentDto::uuid)
.map(ComponentHit::new)
.collect(MoreCollectors.toList(limit));
int totalHits = componentsOfThisQualifier.size();
return new ComponentHitsPerQualifier(q, hits, totalHits);
})).build();
return buildResponse(recentlyBrowsedKeys, favoriteUuids, componentsPerQualifiers, dbSession, authorizedComponents, skip + limit).build();
}
}
|
python
|
def to_json(self):
"""
Returns the JSON representation of the resource.
"""
result = {
'sys': {}
}
for k, v in self.sys.items():
if k in ['space', 'content_type', 'created_by',
'updated_by', 'published_by']:
v = v.to_json()
if k in ['created_at', 'updated_at', 'deleted_at',
'first_published_at', 'published_at', 'expires_at']:
v = v.isoformat()
result['sys'][camel_case(k)] = v
return result
|
python
|
def write_tree_from_cache(entries, odb, sl, si=0):
"""Create a tree from the given sorted list of entries and put the respective
trees into the given object database
:param entries: **sorted** list of IndexEntries
:param odb: object database to store the trees in
:param si: start index at which we should start creating subtrees
:param sl: slice indicating the range we should process on the entries list
:return: tuple(binsha, list(tree_entry, ...)) a tuple of a sha and a list of
tree entries being a tuple of hexsha, mode, name"""
tree_items = []
tree_items_append = tree_items.append
ci = sl.start
end = sl.stop
while ci < end:
entry = entries[ci]
if entry.stage != 0:
raise UnmergedEntriesError(entry)
# END abort on unmerged
ci += 1
rbound = entry.path.find('/', si)
if rbound == -1:
# its not a tree
tree_items_append((entry.binsha, entry.mode, entry.path[si:]))
else:
# find common base range
base = entry.path[si:rbound]
xi = ci
while xi < end:
oentry = entries[xi]
orbound = oentry.path.find('/', si)
if orbound == -1 or oentry.path[si:orbound] != base:
break
# END abort on base mismatch
xi += 1
# END find common base
# enter recursion
# ci - 1 as we want to count our current item as well
sha, tree_entry_list = write_tree_from_cache(entries, odb, slice(ci - 1, xi), rbound + 1) # @UnusedVariable
tree_items_append((sha, S_IFDIR, base))
# skip ahead
ci = xi
# END handle bounds
# END for each entry
# finally create the tree
sio = BytesIO()
tree_to_stream(tree_items, sio.write)
sio.seek(0)
istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio))
return (istream.binsha, tree_items)
|
python
|
def indent(s, spaces=4):
"""
Inserts `spaces` after each string of new lines in `s`
and before the start of the string.
"""
new = re.sub('(\n+)', '\\1%s' % (' ' * spaces), s)
return (' ' * spaces) + new.strip()
|
python
|
def external_session_url(self, email, chrome, url, integrator_id, client_id):
"""
Get a URL which initiates a new external session for the user with the
given email.
Full details: http://www.campaignmonitor.com/api/account/#single_sign_on
:param email: String The representing the email address of the
Campaign Monitor user for whom the login session should be created.
:param chrome: String representing which 'chrome' to display - Must be
either "all", "tabs", or "none".
:param url: String representing the URL to display once logged in.
e.g. "/subscribers/"
:param integrator_id: String representing the Integrator ID. You need to
contact Campaign Monitor support to get an Integrator ID.
:param client_id: String representing the Client ID of the client which
should be active once logged in to the Campaign Monitor account.
:returns Object containing a single field SessionUrl which represents
the URL to initiate the external Campaign Monitor session.
"""
body = {
"Email": email,
"Chrome": chrome,
"Url": url,
"IntegratorID": integrator_id,
"ClientID": client_id}
response = self._put('/externalsession.json', json.dumps(body))
return json_to_py(response)
|
java
|
@Override
public final String getString(final String key) {
String result = optString(key);
if (result == null) {
throw new ObjectMissingException(this, key);
}
return result;
}
|
python
|
def shp_dict(shp_fn, fields=None, geom=True):
"""Get a dictionary for all features in a shapefile
Optionally, specify fields
"""
from pygeotools.lib import timelib
ds = ogr.Open(shp_fn)
lyr = ds.GetLayer()
nfeat = lyr.GetFeatureCount()
print('%i input features\n' % nfeat)
if fields is None:
fields = shp_fieldnames(lyr)
d_list = []
for n,feat in enumerate(lyr):
d = {}
if geom:
geom = feat.GetGeometryRef()
d['geom'] = geom
for f_name in fields:
i = str(feat.GetField(f_name))
if 'date' in f_name:
# date_f = f_name
#If d is float, clear off decimal
i = i.rsplit('.')[0]
i = timelib.strptime_fuzzy(str(i))
d[f_name] = i
d_list.append(d)
#d_list_sort = sorted(d_list, key=lambda k: k[date_f])
return d_list
|
java
|
public static Set<Annotation> getAllAnnotations(Method m) {
Set<Annotation> annotationSet = new LinkedHashSet<Annotation>();
Annotation[] annotations = m.getAnnotations();
List<Class<?>> annotationTypes = new ArrayList<Class<?>>();
// Iterate through all annotations of the current class
for (Annotation a : annotations) {
// Add the current annotation to the result and to the annotation types that needed to be examained for stereotype annotations
annotationSet.add(a);
annotationTypes.add(a.annotationType());
}
if (stereotypeAnnotationClass != null) {
while (!annotationTypes.isEmpty()) {
Class<?> annotationType = annotationTypes.remove(annotationTypes.size() - 1);
if (annotationType.isAnnotationPresent(stereotypeAnnotationClass)) {
// If the stereotype annotation is present examine the 'inherited' annotations
for (Annotation annotation : annotationType.getAnnotations()) {
// add the 'inherited' annotations to be examined for further stereotype annotations
annotationTypes.add(annotation.annotationType());
if (!annotation.annotationType().equals(stereotypeAnnotationClass)) {
// add the stereotyped annotations to the set
annotationSet.add(annotation);
}
}
}
}
}
return annotationSet;
}
|
python
|
def _add(self, uri, methods, handler, host=None):
"""Add a handler to the route list
:param uri: path to match
:param methods: sequence of accepted method names. If none are
provided, any method is allowed
:param handler: request handler function.
When executed, it should provide a response object.
:return: Nothing
"""
if host is not None:
if isinstance(host, str):
uri = host + uri
self.hosts.add(host)
else:
if not isinstance(host, Iterable):
raise ValueError("Expected either string or Iterable of "
"host strings, not {!r}".format(host))
for host_ in host:
self.add(uri, methods, handler, host_)
return
# Dict for faster lookups of if method allowed
if methods:
methods = frozenset(methods)
parameters = []
properties = {"unhashable": None}
def add_parameter(match):
name = match.group(1)
name, _type, pattern = self.parse_parameter_string(name)
parameter = Parameter(
name=name, cast=_type)
parameters.append(parameter)
# Mark the whole route as unhashable if it has the hash key in it
if re.search(r'(^|[^^]){1}/', pattern):
properties['unhashable'] = True
# Mark the route as unhashable if it matches the hash key
elif re.search(r'/', pattern):
properties['unhashable'] = True
return '({})'.format(pattern)
pattern_string = re.sub(self.parameter_pattern, add_parameter, uri)
pattern = re.compile(r'^{}$'.format(pattern_string))
def merge_route(route, methods, handler):
# merge to the existing route when possible.
if not route.methods or not methods:
# method-unspecified routes are not mergeable.
raise RouteExists(
"Route already registered: {}".format(uri))
elif route.methods.intersection(methods):
# already existing method is not overloadable.
duplicated = methods.intersection(route.methods)
raise RouteExists(
"Route already registered: {} [{}]".format(
uri, ','.join(list(duplicated))))
if isinstance(route.handler, self._composition_view_class):
view = route.handler
else:
view = self._composition_view_class()
view.add(route.methods, route.handler)
view.add(methods, handler)
route = route._replace(
handler=view, methods=methods.union(route.methods))
return route
if parameters:
# TODO: This is too complex, we need to reduce the complexity
if properties['unhashable']:
routes_to_check = self.routes_always_check
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check)
else:
routes_to_check = self.routes_dynamic[url_hash(uri)]
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check)
if ndx != -1:
# Pop the ndx of the route, no dups of the same route
routes_to_check.pop(ndx)
else:
route = self.routes_all.get(uri)
if route:
route = merge_route(route, methods, handler)
else:
# prefix the handler name with the blueprint name
# if available
if hasattr(handler, '__blueprintname__'):
handler_name = '{}.{}'.format(
handler.__blueprintname__, handler.__name__)
else:
handler_name = getattr(handler, '__name__', None)
route = Route(
handler=handler, methods=methods, pattern=pattern,
parameters=parameters, name=handler_name, uri=uri)
self.routes_all[uri] = route
if properties['unhashable']:
self.routes_always_check.append(route)
elif parameters:
self.routes_dynamic[url_hash(uri)].append(route)
else:
self.routes_static[uri] = route
|
python
|
def get_resource(url):
"""
Issue a GET request to SWS with the given url
and return a response in json format.
:returns: http response with content in json
"""
response = DAO.getURL(url, {'Accept': 'application/json',
'Connection': 'keep-alive'})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return json.loads(response.data)
|
python
|
def create_inputs(data):
"""Index input reads and prepare groups of reads to process concurrently.
Allows parallelization of alignment beyond processors available on a single
machine. Prepares a bgzip and grabix indexed file for retrieving sections
of files.
"""
from bcbio.pipeline import sample
data = cwlutils.normalize_missing(data)
aligner = tz.get_in(("config", "algorithm", "aligner"), data)
# CRAM files must be converted to bgzipped fastq, unless not aligning.
# Also need to prep and download remote files.
if not ("files" in data and data["files"] and aligner and (_is_cram_input(data["files"]) or
objectstore.is_remote(data["files"][0]))):
# skip indexing on samples without input files or not doing alignment
if ("files" not in data or not data["files"] or data["files"][0] is None or not aligner):
return [[data]]
data["files_orig"] = data["files"]
data["files"] = prep_fastq_inputs(data["files"], data)
# preparation converts illumina into sanger format
data["config"]["algorithm"]["quality_format"] = "standard"
# Handle any necessary trimming
data = utils.to_single_data(sample.trim_sample(data)[0])
_prep_grabix_indexes(data["files"], data)
data = _set_align_split_size(data)
out = []
if tz.get_in(["config", "algorithm", "align_split_size"], data):
splits = _find_read_splits(data["files"][0], int(data["config"]["algorithm"]["align_split_size"]))
for split in splits:
cur_data = copy.deepcopy(data)
cur_data["align_split"] = split
out.append([cur_data])
else:
out.append([data])
if "output_cwl_keys" in data:
out = cwlutils.samples_to_records([utils.to_single_data(x) for x in out],
["files", "align_split", "config__algorithm__quality_format"])
return out
|
python
|
def issamedoc(self):
"""Return :const:`True` if this is a same-document reference."""
return (self.scheme is None and self.authority is None and
not self.path and self.query is None)
|
java
|
private int checkExponent(int length) {
if (length >= nDigits || length < 0)
return decExponent;
for (int i = 0; i < length; i++)
if (digits[i] != '9')
// a '9' anywhere in digits will absorb the round
return decExponent;
return decExponent + (digits[length] >= '5' ? 1 : 0);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.